diff options
author | Klaus Jensen <k.jensen@samsung.com> | 2021-06-17 21:06:54 +0200 |
---|---|---|
committer | Klaus Jensen <k.jensen@samsung.com> | 2021-06-29 07:16:25 +0200 |
commit | 796d20681d9bef4f863565f1a2f2dbe28e2322c7 (patch) | |
tree | 9efd91615ef93385710941badfab04984ecb6364 /hw/nvme | |
parent | f1c97407c5728e284b4e2b331e08c0d88ba568da (diff) |
hw/nvme: reimplement the copy command to allow aio cancellation
Before this patch the code would issue several aios simultaneously
without saving a reference to the aiocb. Without the aiocb reference the
individual copies cannot be canceled.
Fix this by issuing copies of the ranges one after another.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Diffstat (limited to 'hw/nvme')
-rw-r--r-- | hw/nvme/ctrl.c | 678 | ||||
-rw-r--r-- | hw/nvme/trace-events | 3 |
2 files changed, 370 insertions, 311 deletions
diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c index c1d95e98cb..b0cc8c44d2 100644 --- a/hw/nvme/ctrl.c +++ b/hw/nvme/ctrl.c @@ -2093,226 +2093,6 @@ out: nvme_aio_zone_reset_complete_cb(opaque, ret); } -struct nvme_copy_ctx { - int copies; - uint8_t *bounce; - uint8_t *mbounce; - uint32_t nlb; - NvmeCopySourceRange *ranges; -}; - -struct nvme_copy_in_ctx { - NvmeRequest *req; - QEMUIOVector iov; - NvmeCopySourceRange *range; -}; - -static void nvme_copy_complete_cb(void *opaque, int ret) -{ - NvmeRequest *req = opaque; - NvmeNamespace *ns = req->ns; - struct nvme_copy_ctx *ctx = req->opaque; - - if (ret) { - block_acct_failed(blk_get_stats(ns->blkconf.blk), &req->acct); - nvme_aio_err(req, ret); - goto out; - } - - block_acct_done(blk_get_stats(ns->blkconf.blk), &req->acct); - -out: - if (ns->params.zoned) { - NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; - uint64_t sdlba = le64_to_cpu(copy->sdlba); - NvmeZone *zone = nvme_get_zone_by_slba(ns, sdlba); - - nvme_advance_zone_wp(ns, zone, ctx->nlb); - } - - g_free(ctx->bounce); - g_free(ctx->mbounce); - g_free(ctx); - - nvme_enqueue_req_completion(nvme_cq(req), req); -} - -static void nvme_copy_cb(void *opaque, int ret) -{ - NvmeRequest *req = opaque; - NvmeNamespace *ns = req->ns; - struct nvme_copy_ctx *ctx = req->opaque; - - trace_pci_nvme_copy_cb(nvme_cid(req)); - - if (ret) { - goto out; - } - - if (ns->lbaf.ms) { - NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; - uint64_t sdlba = le64_to_cpu(copy->sdlba); - int64_t offset = nvme_moff(ns, sdlba); - - qemu_iovec_reset(&req->sg.iov); - qemu_iovec_add(&req->sg.iov, ctx->mbounce, nvme_m2b(ns, ctx->nlb)); - - req->aiocb = blk_aio_pwritev(ns->blkconf.blk, offset, &req->sg.iov, 0, - nvme_copy_complete_cb, req); - return; - } - -out: - nvme_copy_complete_cb(opaque, ret); -} - -static void nvme_copy_in_complete(NvmeRequest *req) -{ - NvmeNamespace *ns = req->ns; - NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; - struct nvme_copy_ctx *ctx = req->opaque; - uint64_t sdlba = le64_to_cpu(copy->sdlba); - uint16_t status; - - trace_pci_nvme_copy_in_complete(nvme_cid(req)); - - block_acct_done(blk_get_stats(ns->blkconf.blk), &req->acct); - - if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { - uint8_t prinfor = (copy->control[0] >> 4) & 0xf; - uint8_t prinfow = (copy->control[2] >> 2) & 0xf; - uint16_t nr = copy->nr + 1; - NvmeCopySourceRange *range; - uint64_t slba; - uint32_t nlb; - uint16_t apptag, appmask; - uint32_t reftag; - uint8_t *buf = ctx->bounce, *mbuf = ctx->mbounce; - size_t len, mlen; - int i; - - for (i = 0; i < nr; i++) { - range = &ctx->ranges[i]; - slba = le64_to_cpu(range->slba); - nlb = le16_to_cpu(range->nlb) + 1; - len = nvme_l2b(ns, nlb); - mlen = nvme_m2b(ns, nlb); - apptag = le16_to_cpu(range->apptag); - appmask = le16_to_cpu(range->appmask); - reftag = le32_to_cpu(range->reftag); - - status = nvme_dif_check(ns, buf, len, mbuf, mlen, prinfor, slba, - apptag, appmask, &reftag); - if (status) { - goto invalid; - } - - buf += len; - mbuf += mlen; - } - - apptag = le16_to_cpu(copy->apptag); - appmask = le16_to_cpu(copy->appmask); - reftag = le32_to_cpu(copy->reftag); - - if (prinfow & NVME_RW_PRINFO_PRACT) { - size_t len = nvme_l2b(ns, ctx->nlb); - size_t mlen = nvme_m2b(ns, ctx->nlb); - - status = nvme_check_prinfo(ns, prinfow, sdlba, reftag); - if (status) { - goto invalid; - } - - nvme_dif_pract_generate_dif(ns, ctx->bounce, len, ctx->mbounce, - mlen, apptag, &reftag); - } else { - status = nvme_dif_check(ns, ctx->bounce, len, ctx->mbounce, mlen, - prinfow, sdlba, apptag, appmask, &reftag); - if (status) { - goto invalid; - } - } - } - - status = nvme_check_bounds(ns, sdlba, ctx->nlb); - if (status) { - goto invalid; - } - - if (ns->params.zoned) { - NvmeZone *zone = nvme_get_zone_by_slba(ns, sdlba); - - status = nvme_check_zone_write(ns, zone, sdlba, ctx->nlb); - if (status) { - goto invalid; - } - - status = nvme_zrm_auto(nvme_ctrl(req), ns, zone); - if (status) { - goto invalid; - } - - zone->w_ptr += ctx->nlb; - } - - qemu_iovec_init(&req->sg.iov, 1); - qemu_iovec_add(&req->sg.iov, ctx->bounce, nvme_l2b(ns, ctx->nlb)); - - block_acct_start(blk_get_stats(ns->blkconf.blk), &req->acct, 0, - BLOCK_ACCT_WRITE); - - req->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_l2b(ns, sdlba), - &req->sg.iov, 0, nvme_copy_cb, req); - - return; - -invalid: - req->status = status; - - g_free(ctx->bounce); - g_free(ctx); - - nvme_enqueue_req_completion(nvme_cq(req), req); -} - -static void nvme_aio_copy_in_cb(void *opaque, int ret) -{ - struct nvme_copy_in_ctx *in_ctx = opaque; - NvmeRequest *req = in_ctx->req; - NvmeNamespace *ns = req->ns; - struct nvme_copy_ctx *ctx = req->opaque; - - qemu_iovec_destroy(&in_ctx->iov); - g_free(in_ctx); - - trace_pci_nvme_aio_copy_in_cb(nvme_cid(req)); - - if (ret) { - nvme_aio_err(req, ret); - } - - ctx->copies--; - - if (ctx->copies) { - return; - } - - if (req->status) { - block_acct_failed(blk_get_stats(ns->blkconf.blk), &req->acct); - - g_free(ctx->bounce); - g_free(ctx->mbounce); - g_free(ctx); - - nvme_enqueue_req_completion(nvme_cq(req), req); - - return; - } - - nvme_copy_in_complete(req); -} - struct nvme_compare_ctx { struct { QEMUIOVector iov; @@ -2713,153 +2493,433 @@ static uint16_t nvme_verify(NvmeCtrl *n, NvmeRequest *req) return NVME_NO_COMPLETE; } -static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req) +typedef struct NvmeCopyAIOCB { + BlockAIOCB common; + BlockAIOCB *aiocb; + NvmeRequest *req; + QEMUBH *bh; + int ret; + + NvmeCopySourceRange *ranges; + int nr; + int idx; + + uint8_t *bounce; + QEMUIOVector iov; + struct { + BlockAcctCookie read; + BlockAcctCookie write; + } acct; + + uint32_t reftag; + uint64_t slba; + + NvmeZone *zone; +} NvmeCopyAIOCB; + +static void nvme_copy_cancel(BlockAIOCB *aiocb) { + NvmeCopyAIOCB *iocb = container_of(aiocb, NvmeCopyAIOCB, common); + + iocb->ret = -ECANCELED; + + if (iocb->aiocb) { + blk_aio_cancel_async(iocb->aiocb); + iocb->aiocb = NULL; + } +} + +static const AIOCBInfo nvme_copy_aiocb_info = { + .aiocb_size = sizeof(NvmeCopyAIOCB), + .cancel_async = nvme_copy_cancel, +}; + +static void nvme_copy_bh(void *opaque) +{ + NvmeCopyAIOCB *iocb = opaque; + NvmeRequest *req = iocb->req; NvmeNamespace *ns = req->ns; - NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; + BlockAcctStats *stats = blk_get_stats(ns->blkconf.blk); - uint16_t nr = copy->nr + 1; - uint8_t format = copy->control[0] & 0xf; - uint8_t prinfor = (copy->control[0] >> 4) & 0xf; - uint8_t prinfow = (copy->control[2] >> 2) & 0xf; + if (iocb->idx != iocb->nr) { + req->cqe.result = cpu_to_le32(iocb->idx); + } - uint32_t nlb = 0; - uint8_t *bounce = NULL, *bouncep = NULL; - uint8_t *mbounce = NULL, *mbouncep = NULL; - struct nvme_copy_ctx *ctx; - uint16_t status; - int i; + qemu_iovec_destroy(&iocb->iov); + g_free(iocb->bounce); - trace_pci_nvme_copy(nvme_cid(req), nvme_nsid(ns), nr, format); + qemu_bh_delete(iocb->bh); + iocb->bh = NULL; - if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) && - ((prinfor & NVME_PRINFO_PRACT) != (prinfow & NVME_PRINFO_PRACT))) { - return NVME_INVALID_FIELD | NVME_DNR; + if (iocb->ret < 0) { + block_acct_failed(stats, &iocb->acct.read); + block_acct_failed(stats, &iocb->acct.write); + } else { + block_acct_done(stats, &iocb->acct.read); + block_acct_done(stats, &iocb->acct.write); } - if (!(n->id_ctrl.ocfs & (1 << format))) { - trace_pci_nvme_err_copy_invalid_format(format); - return NVME_INVALID_FIELD | NVME_DNR; + iocb->common.cb(iocb->common.opaque, iocb->ret); + qemu_aio_unref(iocb); +} + +static void nvme_copy_cb(void *opaque, int ret); + +static void nvme_copy_out_completed_cb(void *opaque, int ret) +{ + NvmeCopyAIOCB *iocb = opaque; + NvmeRequest *req = iocb->req; + NvmeNamespace *ns = req->ns; + NvmeCopySourceRange *range = &iocb->ranges[iocb->idx]; + uint32_t nlb = le32_to_cpu(range->nlb) + 1; + + if (ret < 0) { + iocb->ret = ret; + goto out; + } else if (iocb->ret < 0) { + goto out; } - if (nr > ns->id_ns.msrc + 1) { - return NVME_CMD_SIZE_LIMIT | NVME_DNR; + if (ns->params.zoned) { + nvme_advance_zone_wp(ns, iocb->zone, nlb); } - ctx = g_new(struct nvme_copy_ctx, 1); - ctx->ranges = g_new(NvmeCopySourceRange, nr); + iocb->idx++; + iocb->slba += nlb; +out: + nvme_copy_cb(iocb, iocb->ret); +} - status = nvme_h2c(n, (uint8_t *)ctx->ranges, - nr * sizeof(NvmeCopySourceRange), req); - if (status) { +static void nvme_copy_out_cb(void *opaque, int ret) +{ + NvmeCopyAIOCB *iocb = opaque; + NvmeRequest *req = iocb->req; + NvmeNamespace *ns = req->ns; + NvmeCopySourceRange *range; + uint32_t nlb; + size_t mlen; + uint8_t *mbounce; + + if (ret < 0) { + iocb->ret = ret; + goto out; + } else if (iocb->ret < 0) { goto out; } - for (i = 0; i < nr; i++) { - uint64_t slba = le64_to_cpu(ctx->ranges[i].slba); - uint32_t _nlb = le16_to_cpu(ctx->ranges[i].nlb) + 1; + if (!ns->lbaf.ms) { + nvme_copy_out_completed_cb(iocb, 0); + return; + } + + range = &iocb->ranges[iocb->idx]; + nlb = le32_to_cpu(range->nlb) + 1; - if (_nlb > le16_to_cpu(ns->id_ns.mssrl)) { - status = NVME_CMD_SIZE_LIMIT | NVME_DNR; - goto out; - } + mlen = nvme_m2b(ns, nlb); + mbounce = iocb->bounce + nvme_l2b(ns, nlb); + + qemu_iovec_reset(&iocb->iov); + qemu_iovec_add(&iocb->iov, mbounce, mlen); + + iocb->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_moff(ns, iocb->slba), + &iocb->iov, 0, nvme_copy_out_completed_cb, + iocb); + + return; - status = nvme_check_bounds(ns, slba, _nlb); +out: + nvme_copy_cb(iocb, ret); +} + +static void nvme_copy_in_completed_cb(void *opaque, int ret) +{ + NvmeCopyAIOCB *iocb = opaque; + NvmeRequest *req = iocb->req; + NvmeNamespace *ns = req->ns; + NvmeCopySourceRange *range; + uint32_t nlb; + size_t len; + uint16_t status; + + if (ret < 0) { + iocb->ret = ret; + goto out; + } else if (iocb->ret < 0) { + goto out; + } + + range = &iocb->ranges[iocb->idx]; + nlb = le32_to_cpu(range->nlb) + 1; + len = nvme_l2b(ns, nlb); + + trace_pci_nvme_copy_out(iocb->slba, nlb); + + if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { + NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; + + uint16_t prinfor = ((copy->control[0] >> 4) & 0xf); + uint16_t prinfow = ((copy->control[2] >> 2) & 0xf); + + uint16_t apptag = le16_to_cpu(range->apptag); + uint16_t appmask = le16_to_cpu(range->appmask); + uint32_t reftag = le32_to_cpu(range->reftag); + + uint64_t slba = le64_to_cpu(range->slba); + size_t mlen = nvme_m2b(ns, nlb); + uint8_t *mbounce = iocb->bounce + nvme_l2b(ns, nlb); + + status = nvme_dif_check(ns, iocb->bounce, len, mbounce, mlen, prinfor, + slba, apptag, appmask, &reftag); if (status) { - goto out; + goto invalid; } - if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { - status = nvme_check_dulbe(ns, slba, _nlb); + apptag = le16_to_cpu(copy->apptag); + appmask = le16_to_cpu(copy->appmask); + + if (prinfow & NVME_PRINFO_PRACT) { + status = nvme_check_prinfo(ns, prinfow, iocb->slba, iocb->reftag); if (status) { - goto out; + goto invalid; } - } - if (ns->params.zoned) { - status = nvme_check_zone_read(ns, slba, _nlb); + nvme_dif_pract_generate_dif(ns, iocb->bounce, len, mbounce, mlen, + apptag, &iocb->reftag); + } else { + status = nvme_dif_check(ns, iocb->bounce, len, mbounce, mlen, + prinfow, iocb->slba, apptag, appmask, + &iocb->reftag); if (status) { - goto out; + goto invalid; } } + } - nlb += _nlb; + status = nvme_check_bounds(ns, iocb->slba, nlb); + if (status) { + goto invalid; } - if (nlb > le32_to_cpu(ns->id_ns.mcl)) { - status = NVME_CMD_SIZE_LIMIT | NVME_DNR; - goto out; + if (ns->params.zoned) { + status = nvme_check_zone_write(ns, iocb->zone, iocb->slba, nlb); + if (status) { + goto invalid; + } + + iocb->zone->w_ptr += nlb; } - bounce = bouncep = g_malloc(nvme_l2b(ns, nlb)); - if (ns->lbaf.ms) { - mbounce = mbouncep = g_malloc(nvme_m2b(ns, nlb)); + qemu_iovec_reset(&iocb->iov); + qemu_iovec_add(&iocb->iov, iocb->bounce, len); + + iocb->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_l2b(ns, iocb->slba), + &iocb->iov, 0, nvme_copy_out_cb, iocb); + + return; + +invalid: + req->status = status; + iocb->aiocb = NULL; + if (iocb->bh) { + qemu_bh_schedule(iocb->bh); } - block_acct_start(blk_get_stats(ns->blkconf.blk), &req->acct, 0, - BLOCK_ACCT_READ); + return; - ctx->bounce = bounce; - ctx->mbounce = mbounce; - ctx->nlb = nlb; - ctx->copies = 1; +out: + nvme_copy_cb(iocb, ret); +} - req->opaque = ctx; +static void nvme_copy_in_cb(void *opaque, int ret) +{ + NvmeCopyAIOCB *iocb = opaque; + NvmeRequest *req = iocb->req; + NvmeNamespace *ns = req->ns; + NvmeCopySourceRange *range; + uint64_t slba; + uint32_t nlb; - for (i = 0; i < nr; i++) { - uint64_t slba = le64_to_cpu(ctx->ranges[i].slba); - uint32_t nlb = le16_to_cpu(ctx->ranges[i].nlb) + 1; + if (ret < 0) { + iocb->ret = ret; + goto out; + } else if (iocb->ret < 0) { + goto out; + } - size_t len = nvme_l2b(ns, nlb); - int64_t offset = nvme_l2b(ns, slba); + if (!ns->lbaf.ms) { + nvme_copy_in_completed_cb(iocb, 0); + return; + } - trace_pci_nvme_copy_source_range(slba, nlb); + range = &iocb->ranges[iocb->idx]; + slba = le64_to_cpu(range->slba); + nlb = le32_to_cpu(range->nlb) + 1; - struct nvme_copy_in_ctx *in_ctx = g_new(struct nvme_copy_in_ctx, 1); - in_ctx->req = req; + qemu_iovec_reset(&iocb->iov); + qemu_iovec_add(&iocb->iov, iocb->bounce + nvme_l2b(ns, nlb), + nvme_m2b(ns, nlb)); - qemu_iovec_init(&in_ctx->iov, 1); - qemu_iovec_add(&in_ctx->iov, bouncep, len); + iocb->aiocb = blk_aio_preadv(ns->blkconf.blk, nvme_moff(ns, slba), + &iocb->iov, 0, nvme_copy_in_completed_cb, + iocb); + return; - ctx->copies++; +out: + nvme_copy_cb(iocb, iocb->ret); +} - blk_aio_preadv(ns->blkconf.blk, offset, &in_ctx->iov, 0, - nvme_aio_copy_in_cb, in_ctx); +static void nvme_copy_cb(void *opaque, int ret) +{ + NvmeCopyAIOCB *iocb = opaque; + NvmeRequest *req = iocb->req; + NvmeNamespace *ns = req->ns; + NvmeCopySourceRange *range; + uint64_t slba; + uint32_t nlb; + size_t len; + uint16_t status; - bouncep += len; + if (ret < 0) { + iocb->ret = ret; + goto done; + } else if (iocb->ret < 0) { + goto done; + } - if (ns->lbaf.ms) { - len = nvme_m2b(ns, nlb); - offset = nvme_moff(ns, slba); + if (iocb->idx == iocb->nr) { + goto done; + } - in_ctx = g_new(struct nvme_copy_in_ctx, 1); - in_ctx->req = req; + range = &iocb->ranges[iocb->idx]; + slba = le64_to_cpu(range->slba); + nlb = le32_to_cpu(range->nlb) + 1; + len = nvme_l2b(ns, nlb); + + trace_pci_nvme_copy_source_range(slba, nlb); - qemu_iovec_init(&in_ctx->iov, 1); - qemu_iovec_add(&in_ctx->iov, mbouncep, len); + if (nlb > le16_to_cpu(ns->id_ns.mssrl)) { + status = NVME_CMD_SIZE_LIMIT | NVME_DNR; + goto invalid; + } - ctx->copies++; + status = nvme_check_bounds(ns, slba, nlb); + if (status) { + goto invalid; + } - blk_aio_preadv(ns->blkconf.blk, offset, &in_ctx->iov, 0, - nvme_aio_copy_in_cb, in_ctx); + if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { + status = nvme_check_dulbe(ns, slba, nlb); + if (status) { + goto invalid; + } + } - mbouncep += len; + if (ns->params.zoned) { + status = nvme_check_zone_read(ns, slba, nlb); + if (status) { + goto invalid; } } - /* account for the 1-initialization */ - ctx->copies--; + qemu_iovec_reset(&iocb->iov); + qemu_iovec_add(&iocb->iov, iocb->bounce, len); + + iocb->aiocb = blk_aio_preadv(ns->blkconf.blk, nvme_l2b(ns, slba), + &iocb->iov, 0, nvme_copy_in_cb, iocb); + return; - if (!ctx->copies) { - nvme_copy_in_complete(req); +invalid: + req->status = status; +done: + iocb->aiocb = NULL; + if (iocb->bh) { + qemu_bh_schedule(iocb->bh); } +} - return NVME_NO_COMPLETE; -out: - g_free(ctx->ranges); - g_free(ctx); +static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeNamespace *ns = req->ns; + NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; + NvmeCopyAIOCB *iocb = blk_aio_get(&nvme_copy_aiocb_info, ns->blkconf.blk, + nvme_misc_cb, req); + uint16_t nr = copy->nr + 1; + uint8_t format = copy->control[0] & 0xf; + uint16_t prinfor = ((copy->control[0] >> 4) & 0xf); + uint16_t prinfow = ((copy->control[2] >> 2) & 0xf); + + uint16_t status; + + trace_pci_nvme_copy(nvme_cid(req), nvme_nsid(ns), nr, format); + + iocb->ranges = NULL; + iocb->zone = NULL; + + if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) && + ((prinfor & NVME_PRINFO_PRACT) != (prinfow & NVME_PRINFO_PRACT))) { + status = NVME_INVALID_FIELD | NVME_DNR; + goto invalid; + } + + if (!(n->id_ctrl.ocfs & (1 << format))) { + trace_pci_nvme_err_copy_invalid_format(format); + status = NVME_INVALID_FIELD | NVME_DNR; + goto invalid; + } + + if (nr > ns->id_ns.msrc + 1) { + status = NVME_CMD_SIZE_LIMIT | NVME_DNR; + goto invalid; + } + + iocb->ranges = g_new(NvmeCopySourceRange, nr); + + status = nvme_h2c(n, (uint8_t *)iocb->ranges, + sizeof(NvmeCopySourceRange) * nr, req); + if (status) { + goto invalid; + } + + iocb->slba = le64_to_cpu(copy->sdlba); + + if (ns->params.zoned) { + iocb->zone = nvme_get_zone_by_slba(ns, iocb->slba); + if (!iocb->zone) { + status = NVME_LBA_RANGE | NVME_DNR; + goto invalid; + } + status = nvme_zrm_auto(n, ns, iocb->zone); + if (status) { + goto invalid; + } + } + + iocb->req = req; + iocb->bh = qemu_bh_new(nvme_copy_bh, iocb); + iocb->ret = 0; + iocb->nr = nr; + iocb->idx = 0; + iocb->reftag = le32_to_cpu(copy->reftag); + iocb->bounce = g_malloc_n(le16_to_cpu(ns->id_ns.mssrl), + ns->lbasz + ns->lbaf.ms); + + qemu_iovec_init(&iocb->iov, 1); + + block_acct_start(blk_get_stats(ns->blkconf.blk), &iocb->acct.read, 0, + BLOCK_ACCT_READ); + block_acct_start(blk_get_stats(ns->blkconf.blk), &iocb->acct.write, 0, + BLOCK_ACCT_WRITE); + + req->aiocb = &iocb->common; + nvme_copy_cb(iocb, 0); + + return NVME_NO_COMPLETE; + +invalid: + g_free(iocb->ranges); + qemu_aio_unref(iocb); return status; } diff --git a/hw/nvme/trace-events b/hw/nvme/trace-events index a3e1134686..cd65f8b288 100644 --- a/hw/nvme/trace-events +++ b/hw/nvme/trace-events @@ -30,8 +30,7 @@ pci_nvme_dif_prchk_apptag(uint16_t apptag, uint16_t elbat, uint16_t elbatm) "app pci_nvme_dif_prchk_reftag(uint32_t reftag, uint32_t elbrt) "reftag 0x%"PRIx32" elbrt 0x%"PRIx32"" pci_nvme_copy(uint16_t cid, uint32_t nsid, uint16_t nr, uint8_t format) "cid %"PRIu16" nsid %"PRIu32" nr %"PRIu16" format 0x%"PRIx8"" pci_nvme_copy_source_range(uint64_t slba, uint32_t nlb) "slba 0x%"PRIx64" nlb %"PRIu32"" -pci_nvme_copy_in_complete(uint16_t cid) "cid %"PRIu16"" -pci_nvme_copy_cb(uint16_t cid) "cid %"PRIu16"" +pci_nvme_copy_out(uint64_t slba, uint32_t nlb) "slba 0x%"PRIx64" nlb %"PRIu32"" pci_nvme_verify(uint16_t cid, uint32_t nsid, uint64_t slba, uint32_t nlb) "cid %"PRIu16" nsid %"PRIu32" slba 0x%"PRIx64" nlb %"PRIu32"" pci_nvme_verify_mdata_in_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'" pci_nvme_verify_cb(uint16_t cid, uint8_t prinfo, uint16_t apptag, uint16_t appmask, uint32_t reftag) "cid %"PRIu16" prinfo 0x%"PRIx8" apptag 0x%"PRIx16" appmask 0x%"PRIx16" reftag 0x%"PRIx32"" |