diff options
author | Klaus Jensen <k.jensen@samsung.com> | 2021-04-14 22:14:30 +0200 |
---|---|---|
committer | Klaus Jensen <k.jensen@samsung.com> | 2021-05-17 09:19:00 +0200 |
commit | 88eea45c536470cd3c43440cbb1cd4d3b9fa519c (patch) | |
tree | b5b4d753aa955348af07777518cd0a60b06e5e56 /hw/block | |
parent | 49ad39c55a2086637bbde4616491dfee17a142e7 (diff) |
hw/nvme: move nvme emulation out of hw/block
With the introduction of the nvme-subsystem device we are really
cluttering up the hw/block directory.
As suggested by Philippe previously, move the nvme emulation to hw/nvme.
Suggested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Diffstat (limited to 'hw/block')
-rw-r--r-- | hw/block/Kconfig | 5 | ||||
-rw-r--r-- | hw/block/meson.build | 1 | ||||
-rw-r--r-- | hw/block/nvme-dif.c | 509 | ||||
-rw-r--r-- | hw/block/nvme-ns.c | 580 | ||||
-rw-r--r-- | hw/block/nvme-subsys.c | 80 | ||||
-rw-r--r-- | hw/block/nvme.c | 6365 | ||||
-rw-r--r-- | hw/block/nvme.h | 547 | ||||
-rw-r--r-- | hw/block/trace-events | 206 |
8 files changed, 0 insertions, 8293 deletions
diff --git a/hw/block/Kconfig b/hw/block/Kconfig index 4fcd152166..295441e64a 100644 --- a/hw/block/Kconfig +++ b/hw/block/Kconfig @@ -25,11 +25,6 @@ config ONENAND config TC58128 bool -config NVME_PCI - bool - default y if PCI_DEVICES - depends on PCI - config VIRTIO_BLK bool default y diff --git a/hw/block/meson.build b/hw/block/meson.build index 5b4a7699f9..8b0de54db1 100644 --- a/hw/block/meson.build +++ b/hw/block/meson.build @@ -13,7 +13,6 @@ softmmu_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80.c')) softmmu_ss.add(when: 'CONFIG_SWIM', if_true: files('swim.c')) softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xen-block.c')) softmmu_ss.add(when: 'CONFIG_TC58128', if_true: files('tc58128.c')) -softmmu_ss.add(when: 'CONFIG_NVME_PCI', if_true: files('nvme.c', 'nvme-ns.c', 'nvme-subsys.c', 'nvme-dif.c')) specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c')) specific_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('vhost-user-blk.c')) diff --git a/hw/block/nvme-dif.c b/hw/block/nvme-dif.c deleted file mode 100644 index 88efcbe9bd..0000000000 --- a/hw/block/nvme-dif.c +++ /dev/null @@ -1,509 +0,0 @@ -/* - * QEMU NVM Express End-to-End Data Protection support - * - * Copyright (c) 2021 Samsung Electronics Co., Ltd. - * - * Authors: - * Klaus Jensen <k.jensen@samsung.com> - * Gollu Appalanaidu <anaidu.gollu@samsung.com> - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "sysemu/block-backend.h" - -#include "nvme.h" -#include "trace.h" - -uint16_t nvme_check_prinfo(NvmeNamespace *ns, uint16_t ctrl, uint64_t slba, - uint32_t reftag) -{ - if ((NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) == NVME_ID_NS_DPS_TYPE_1) && - (ctrl & NVME_RW_PRINFO_PRCHK_REF) && (slba & 0xffffffff) != reftag) { - return NVME_INVALID_PROT_INFO | NVME_DNR; - } - - return NVME_SUCCESS; -} - -/* from Linux kernel (crypto/crct10dif_common.c) */ -static uint16_t crc_t10dif(uint16_t crc, const unsigned char *buffer, - size_t len) -{ - unsigned int i; - - for (i = 0; i < len; i++) { - crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff]; - } - - return crc; -} - -void nvme_dif_pract_generate_dif(NvmeNamespace *ns, uint8_t *buf, size_t len, - uint8_t *mbuf, size_t mlen, uint16_t apptag, - uint32_t reftag) -{ - uint8_t *end = buf + len; - int16_t pil = 0; - - if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) { - pil = ns->lbaf.ms - sizeof(NvmeDifTuple); - } - - trace_pci_nvme_dif_pract_generate_dif(len, ns->lbasz, ns->lbasz + pil, - apptag, reftag); - - for (; buf < end; buf += ns->lbasz, mbuf += ns->lbaf.ms) { - NvmeDifTuple *dif = (NvmeDifTuple *)(mbuf + pil); - uint16_t crc = crc_t10dif(0x0, buf, ns->lbasz); - - if (pil) { - crc = crc_t10dif(crc, mbuf, pil); - } - - dif->guard = cpu_to_be16(crc); - dif->apptag = cpu_to_be16(apptag); - dif->reftag = cpu_to_be32(reftag); - - if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) != NVME_ID_NS_DPS_TYPE_3) { - reftag++; - } - } -} - -static uint16_t nvme_dif_prchk(NvmeNamespace *ns, NvmeDifTuple *dif, - uint8_t *buf, uint8_t *mbuf, size_t pil, - uint16_t ctrl, uint16_t apptag, - uint16_t appmask, uint32_t reftag) -{ - switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { - case NVME_ID_NS_DPS_TYPE_3: - if (be32_to_cpu(dif->reftag) != 0xffffffff) { - break; - } - - /* fallthrough */ - case NVME_ID_NS_DPS_TYPE_1: - case NVME_ID_NS_DPS_TYPE_2: - if (be16_to_cpu(dif->apptag) != 0xffff) { - break; - } - - trace_pci_nvme_dif_prchk_disabled(be16_to_cpu(dif->apptag), - be32_to_cpu(dif->reftag)); - - return NVME_SUCCESS; - } - - if (ctrl & NVME_RW_PRINFO_PRCHK_GUARD) { - uint16_t crc = crc_t10dif(0x0, buf, ns->lbasz); - - if (pil) { - crc = crc_t10dif(crc, mbuf, pil); - } - - trace_pci_nvme_dif_prchk_guard(be16_to_cpu(dif->guard), crc); - - if (be16_to_cpu(dif->guard) != crc) { - return NVME_E2E_GUARD_ERROR; - } - } - - if (ctrl & NVME_RW_PRINFO_PRCHK_APP) { - trace_pci_nvme_dif_prchk_apptag(be16_to_cpu(dif->apptag), apptag, - appmask); - - if ((be16_to_cpu(dif->apptag) & appmask) != (apptag & appmask)) { - return NVME_E2E_APP_ERROR; - } - } - - if (ctrl & NVME_RW_PRINFO_PRCHK_REF) { - trace_pci_nvme_dif_prchk_reftag(be32_to_cpu(dif->reftag), reftag); - - if (be32_to_cpu(dif->reftag) != reftag) { - return NVME_E2E_REF_ERROR; - } - } - - return NVME_SUCCESS; -} - -uint16_t nvme_dif_check(NvmeNamespace *ns, uint8_t *buf, size_t len, - uint8_t *mbuf, size_t mlen, uint16_t ctrl, - uint64_t slba, uint16_t apptag, - uint16_t appmask, uint32_t reftag) -{ - uint8_t *end = buf + len; - int16_t pil = 0; - uint16_t status; - - status = nvme_check_prinfo(ns, ctrl, slba, reftag); - if (status) { - return status; - } - - if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) { - pil = ns->lbaf.ms - sizeof(NvmeDifTuple); - } - - trace_pci_nvme_dif_check(NVME_RW_PRINFO(ctrl), ns->lbasz + pil); - - for (; buf < end; buf += ns->lbasz, mbuf += ns->lbaf.ms) { - NvmeDifTuple *dif = (NvmeDifTuple *)(mbuf + pil); - - status = nvme_dif_prchk(ns, dif, buf, mbuf, pil, ctrl, apptag, - appmask, reftag); - if (status) { - return status; - } - - if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) != NVME_ID_NS_DPS_TYPE_3) { - reftag++; - } - } - - return NVME_SUCCESS; -} - -uint16_t nvme_dif_mangle_mdata(NvmeNamespace *ns, uint8_t *mbuf, size_t mlen, - uint64_t slba) -{ - BlockBackend *blk = ns->blkconf.blk; - BlockDriverState *bs = blk_bs(blk); - - int64_t moffset = 0, offset = nvme_l2b(ns, slba); - uint8_t *mbufp, *end; - bool zeroed; - int16_t pil = 0; - int64_t bytes = (mlen / ns->lbaf.ms) << ns->lbaf.ds; - int64_t pnum = 0; - - Error *err = NULL; - - - if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) { - pil = ns->lbaf.ms - sizeof(NvmeDifTuple); - } - - do { - int ret; - - bytes -= pnum; - - ret = bdrv_block_status(bs, offset, bytes, &pnum, NULL, NULL); - if (ret < 0) { - error_setg_errno(&err, -ret, "unable to get block status"); - error_report_err(err); - - return NVME_INTERNAL_DEV_ERROR; - } - - zeroed = !!(ret & BDRV_BLOCK_ZERO); - - trace_pci_nvme_block_status(offset, bytes, pnum, ret, zeroed); - - if (zeroed) { - mbufp = mbuf + moffset; - mlen = (pnum >> ns->lbaf.ds) * ns->lbaf.ms; - end = mbufp + mlen; - - for (; mbufp < end; mbufp += ns->lbaf.ms) { - memset(mbufp + pil, 0xff, sizeof(NvmeDifTuple)); - } - } - - moffset += (pnum >> ns->lbaf.ds) * ns->lbaf.ms; - offset += pnum; - } while (pnum != bytes); - - return NVME_SUCCESS; -} - -static void nvme_dif_rw_cb(void *opaque, int ret) -{ - NvmeBounceContext *ctx = opaque; - NvmeRequest *req = ctx->req; - NvmeNamespace *ns = req->ns; - BlockBackend *blk = ns->blkconf.blk; - - trace_pci_nvme_dif_rw_cb(nvme_cid(req), blk_name(blk)); - - qemu_iovec_destroy(&ctx->data.iov); - g_free(ctx->data.bounce); - - qemu_iovec_destroy(&ctx->mdata.iov); - g_free(ctx->mdata.bounce); - - g_free(ctx); - - nvme_rw_complete_cb(req, ret); -} - -static void nvme_dif_rw_check_cb(void *opaque, int ret) -{ - NvmeBounceContext *ctx = opaque; - NvmeRequest *req = ctx->req; - NvmeNamespace *ns = req->ns; - NvmeCtrl *n = nvme_ctrl(req); - NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; - uint64_t slba = le64_to_cpu(rw->slba); - uint16_t ctrl = le16_to_cpu(rw->control); - uint16_t apptag = le16_to_cpu(rw->apptag); - uint16_t appmask = le16_to_cpu(rw->appmask); - uint32_t reftag = le32_to_cpu(rw->reftag); - uint16_t status; - - trace_pci_nvme_dif_rw_check_cb(nvme_cid(req), NVME_RW_PRINFO(ctrl), apptag, - appmask, reftag); - - if (ret) { - goto out; - } - - status = nvme_dif_mangle_mdata(ns, ctx->mdata.bounce, ctx->mdata.iov.size, - slba); - if (status) { - req->status = status; - goto out; - } - - status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size, - ctx->mdata.bounce, ctx->mdata.iov.size, ctrl, - slba, apptag, appmask, reftag); - if (status) { - req->status = status; - goto out; - } - - status = nvme_bounce_data(n, ctx->data.bounce, ctx->data.iov.size, - NVME_TX_DIRECTION_FROM_DEVICE, req); - if (status) { - req->status = status; - goto out; - } - - if (ctrl & NVME_RW_PRINFO_PRACT && ns->lbaf.ms == 8) { - goto out; - } - - status = nvme_bounce_mdata(n, ctx->mdata.bounce, ctx->mdata.iov.size, - NVME_TX_DIRECTION_FROM_DEVICE, req); - if (status) { - req->status = status; - } - -out: - nvme_dif_rw_cb(ctx, ret); -} - -static void nvme_dif_rw_mdata_in_cb(void *opaque, int ret) -{ - NvmeBounceContext *ctx = opaque; - NvmeRequest *req = ctx->req; - NvmeNamespace *ns = req->ns; - NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; - uint64_t slba = le64_to_cpu(rw->slba); - uint32_t nlb = le16_to_cpu(rw->nlb) + 1; - size_t mlen = nvme_m2b(ns, nlb); - uint64_t offset = nvme_moff(ns, slba); - BlockBackend *blk = ns->blkconf.blk; - - trace_pci_nvme_dif_rw_mdata_in_cb(nvme_cid(req), blk_name(blk)); - - if (ret) { - goto out; - } - - ctx->mdata.bounce = g_malloc(mlen); - - qemu_iovec_reset(&ctx->mdata.iov); - qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen); - - req->aiocb = blk_aio_preadv(blk, offset, &ctx->mdata.iov, 0, - nvme_dif_rw_check_cb, ctx); - return; - -out: - nvme_dif_rw_cb(ctx, ret); -} - -static void nvme_dif_rw_mdata_out_cb(void *opaque, int ret) -{ - NvmeBounceContext *ctx = opaque; - NvmeRequest *req = ctx->req; - NvmeNamespace *ns = req->ns; - NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; - uint64_t slba = le64_to_cpu(rw->slba); - uint64_t offset = nvme_moff(ns, slba); - BlockBackend *blk = ns->blkconf.blk; - - trace_pci_nvme_dif_rw_mdata_out_cb(nvme_cid(req), blk_name(blk)); - - if (ret) { - goto out; - } - - req->aiocb = blk_aio_pwritev(blk, offset, &ctx->mdata.iov, 0, - nvme_dif_rw_cb, ctx); - return; - -out: - nvme_dif_rw_cb(ctx, ret); -} - -uint16_t nvme_dif_rw(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; - NvmeNamespace *ns = req->ns; - BlockBackend *blk = ns->blkconf.blk; - bool wrz = rw->opcode == NVME_CMD_WRITE_ZEROES; - uint32_t nlb = le16_to_cpu(rw->nlb) + 1; - uint64_t slba = le64_to_cpu(rw->slba); - size_t len = nvme_l2b(ns, nlb); - size_t mlen = nvme_m2b(ns, nlb); - size_t mapped_len = len; - int64_t offset = nvme_l2b(ns, slba); - uint16_t ctrl = le16_to_cpu(rw->control); - uint16_t apptag = le16_to_cpu(rw->apptag); - uint16_t appmask = le16_to_cpu(rw->appmask); - uint32_t reftag = le32_to_cpu(rw->reftag); - bool pract = !!(ctrl & NVME_RW_PRINFO_PRACT); - NvmeBounceContext *ctx; - uint16_t status; - - trace_pci_nvme_dif_rw(pract, NVME_RW_PRINFO(ctrl)); - - ctx = g_new0(NvmeBounceContext, 1); - ctx->req = req; - - if (wrz) { - BdrvRequestFlags flags = BDRV_REQ_MAY_UNMAP; - - if (ctrl & NVME_RW_PRINFO_PRCHK_MASK) { - status = NVME_INVALID_PROT_INFO | NVME_DNR; - goto err; - } - - if (pract) { - uint8_t *mbuf, *end; - int16_t pil = ns->lbaf.ms - sizeof(NvmeDifTuple); - - status = nvme_check_prinfo(ns, ctrl, slba, reftag); - if (status) { - goto err; - } - - flags = 0; - - ctx->mdata.bounce = g_malloc0(mlen); - - qemu_iovec_init(&ctx->mdata.iov, 1); - qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen); - - mbuf = ctx->mdata.bounce; - end = mbuf + mlen; - - if (ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT) { - pil = 0; - } - - for (; mbuf < end; mbuf += ns->lbaf.ms) { - NvmeDifTuple *dif = (NvmeDifTuple *)(mbuf + pil); - - dif->apptag = cpu_to_be16(apptag); - dif->reftag = cpu_to_be32(reftag); - - switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { - case NVME_ID_NS_DPS_TYPE_1: - case NVME_ID_NS_DPS_TYPE_2: - reftag++; - } - } - } - - req->aiocb = blk_aio_pwrite_zeroes(blk, offset, len, flags, - nvme_dif_rw_mdata_out_cb, ctx); - return NVME_NO_COMPLETE; - } - - if (nvme_ns_ext(ns) && !(pract && ns->lbaf.ms == 8)) { - mapped_len += mlen; - } - - status = nvme_map_dptr(n, &req->sg, mapped_len, &req->cmd); - if (status) { - goto err; - } - - ctx->data.bounce = g_malloc(len); - - qemu_iovec_init(&ctx->data.iov, 1); - qemu_iovec_add(&ctx->data.iov, ctx->data.bounce, len); - - if (req->cmd.opcode == NVME_CMD_READ) { - block_acct_start(blk_get_stats(blk), &req->acct, ctx->data.iov.size, - BLOCK_ACCT_READ); - - req->aiocb = blk_aio_preadv(ns->blkconf.blk, offset, &ctx->data.iov, 0, - nvme_dif_rw_mdata_in_cb, ctx); - return NVME_NO_COMPLETE; - } - - status = nvme_bounce_data(n, ctx->data.bounce, ctx->data.iov.size, - NVME_TX_DIRECTION_TO_DEVICE, req); - if (status) { - goto err; - } - - ctx->mdata.bounce = g_malloc(mlen); - - qemu_iovec_init(&ctx->mdata.iov, 1); - qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen); - - if (!(pract && ns->lbaf.ms == 8)) { - status = nvme_bounce_mdata(n, ctx->mdata.bounce, ctx->mdata.iov.size, - NVME_TX_DIRECTION_TO_DEVICE, req); - if (status) { - goto err; - } - } - - status = nvme_check_prinfo(ns, ctrl, slba, reftag); - if (status) { - goto err; - } - - if (pract) { - /* splice generated protection information into the buffer */ - nvme_dif_pract_generate_dif(ns, ctx->data.bounce, ctx->data.iov.size, - ctx->mdata.bounce, ctx->mdata.iov.size, - apptag, reftag); - } else { - status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size, - ctx->mdata.bounce, ctx->mdata.iov.size, ctrl, - slba, apptag, appmask, reftag); - if (status) { - goto err; - } - } - - block_acct_start(blk_get_stats(blk), &req->acct, ctx->data.iov.size, - BLOCK_ACCT_WRITE); - - req->aiocb = blk_aio_pwritev(ns->blkconf.blk, offset, &ctx->data.iov, 0, - nvme_dif_rw_mdata_out_cb, ctx); - - return NVME_NO_COMPLETE; - -err: - qemu_iovec_destroy(&ctx->data.iov); - g_free(ctx->data.bounce); - - qemu_iovec_destroy(&ctx->mdata.iov); - g_free(ctx->mdata.bounce); - - g_free(ctx); - - return status; -} diff --git a/hw/block/nvme-ns.c b/hw/block/nvme-ns.c deleted file mode 100644 index 992e5a13f5..0000000000 --- a/hw/block/nvme-ns.c +++ /dev/null @@ -1,580 +0,0 @@ -/* - * QEMU NVM Express Virtual Namespace - * - * Copyright (c) 2019 CNEX Labs - * Copyright (c) 2020 Samsung Electronics - * - * Authors: - * Klaus Jensen <k.jensen@samsung.com> - * - * This work is licensed under the terms of the GNU GPL, version 2. See the - * COPYING file in the top-level directory. - * - */ - -#include "qemu/osdep.h" -#include "qemu/units.h" -#include "qemu/error-report.h" -#include "qapi/error.h" -#include "sysemu/sysemu.h" -#include "sysemu/block-backend.h" - -#include "nvme.h" -#include "trace.h" - -#define MIN_DISCARD_GRANULARITY (4 * KiB) -#define NVME_DEFAULT_ZONE_SIZE (128 * MiB) - -void nvme_ns_init_format(NvmeNamespace *ns) -{ - NvmeIdNs *id_ns = &ns->id_ns; - BlockDriverInfo bdi; - int npdg, nlbas, ret; - - ns->lbaf = id_ns->lbaf[NVME_ID_NS_FLBAS_INDEX(id_ns->flbas)]; - ns->lbasz = 1 << ns->lbaf.ds; - - nlbas = ns->size / (ns->lbasz + ns->lbaf.ms); - - id_ns->nsze = cpu_to_le64(nlbas); - - /* no thin provisioning */ - id_ns->ncap = id_ns->nsze; - id_ns->nuse = id_ns->ncap; - - ns->moff = (int64_t)nlbas << ns->lbaf.ds; - - npdg = ns->blkconf.discard_granularity / ns->lbasz; - - ret = bdrv_get_info(blk_bs(ns->blkconf.blk), &bdi); - if (ret >= 0 && bdi.cluster_size > ns->blkconf.discard_granularity) { - npdg = bdi.cluster_size / ns->lbasz; - } - - id_ns->npda = id_ns->npdg = npdg - 1; -} - -static int nvme_ns_init(NvmeNamespace *ns, Error **errp) -{ - NvmeIdNs *id_ns = &ns->id_ns; - uint8_t ds; - uint16_t ms; - int i; - - ns->csi = NVME_CSI_NVM; - ns->status = 0x0; - - ns->id_ns.dlfeat = 0x1; - - /* support DULBE and I/O optimization fields */ - id_ns->nsfeat |= (0x4 | 0x10); - - if (ns->params.shared) { - id_ns->nmic |= NVME_NMIC_NS_SHARED; - } - - /* simple copy */ - id_ns->mssrl = cpu_to_le16(ns->params.mssrl); - id_ns->mcl = cpu_to_le32(ns->params.mcl); - id_ns->msrc = ns->params.msrc; - - ds = 31 - clz32(ns->blkconf.logical_block_size); - ms = ns->params.ms; - - if (ns->params.ms) { - id_ns->mc = 0x3; - - if (ns->params.mset) { - id_ns->flbas |= 0x10; - } - - id_ns->dpc = 0x1f; - id_ns->dps = ((ns->params.pil & 0x1) << 3) | ns->params.pi; - - NvmeLBAF lbaf[16] = { - [0] = { .ds = 9 }, - [1] = { .ds = 9, .ms = 8 }, - [2] = { .ds = 9, .ms = 16 }, - [3] = { .ds = 9, .ms = 64 }, - [4] = { .ds = 12 }, - [5] = { .ds = 12, .ms = 8 }, - [6] = { .ds = 12, .ms = 16 }, - [7] = { .ds = 12, .ms = 64 }, - }; - - memcpy(&id_ns->lbaf, &lbaf, sizeof(lbaf)); - id_ns->nlbaf = 7; - } else { - NvmeLBAF lbaf[16] = { - [0] = { .ds = 9 }, - [1] = { .ds = 12 }, - }; - - memcpy(&id_ns->lbaf, &lbaf, sizeof(lbaf)); - id_ns->nlbaf = 1; - } - - for (i = 0; i <= id_ns->nlbaf; i++) { - NvmeLBAF *lbaf = &id_ns->lbaf[i]; - if (lbaf->ds == ds) { - if (lbaf->ms == ms) { - id_ns->flbas |= i; - goto lbaf_found; - } - } - } - - /* add non-standard lba format */ - id_ns->nlbaf++; - id_ns->lbaf[id_ns->nlbaf].ds = ds; - id_ns->lbaf[id_ns->nlbaf].ms = ms; - id_ns->flbas |= id_ns->nlbaf; - -lbaf_found: - nvme_ns_init_format(ns); - - return 0; -} - -static int nvme_ns_init_blk(NvmeNamespace *ns, Error **errp) -{ - bool read_only; - - if (!blkconf_blocksizes(&ns->blkconf, errp)) { - return -1; - } - - read_only = !blk_supports_write_perm(ns->blkconf.blk); - if (!blkconf_apply_backend_options(&ns->blkconf, read_only, false, errp)) { - return -1; - } - - if (ns->blkconf.discard_granularity == -1) { - ns->blkconf.discard_granularity = - MAX(ns->blkconf.logical_block_size, MIN_DISCARD_GRANULARITY); - } - - ns->size = blk_getlength(ns->blkconf.blk); - if (ns->size < 0) { - error_setg_errno(errp, -ns->size, "could not get blockdev size"); - return -1; - } - - return 0; -} - -static int nvme_ns_zoned_check_calc_geometry(NvmeNamespace *ns, Error **errp) -{ - uint64_t zone_size, zone_cap; - - /* Make sure that the values of ZNS properties are sane */ - if (ns->params.zone_size_bs) { - zone_size = ns->params.zone_size_bs; - } else { - zone_size = NVME_DEFAULT_ZONE_SIZE; - } - if (ns->params.zone_cap_bs) { - zone_cap = ns->params.zone_cap_bs; - } else { - zone_cap = zone_size; - } - if (zone_cap > zone_size) { - error_setg(errp, "zone capacity %"PRIu64"B exceeds " - "zone size %"PRIu64"B", zone_cap, zone_size); - return -1; - } - if (zone_size < ns->lbasz) { - error_setg(errp, "zone size %"PRIu64"B too small, " - "must be at least %zuB", zone_size, ns->lbasz); - return -1; - } - if (zone_cap < ns->lbasz) { - error_setg(errp, "zone capacity %"PRIu64"B too small, " - "must be at least %zuB", zone_cap, ns->lbasz); - return -1; - } - - /* - * Save the main zone geometry values to avoid - * calculating them later again. - */ - ns->zone_size = zone_size / ns->lbasz; - ns->zone_capacity = zone_cap / ns->lbasz; - ns->num_zones = le64_to_cpu(ns->id_ns.nsze) / ns->zone_size; - - /* Do a few more sanity checks of ZNS properties */ - if (!ns->num_zones) { - error_setg(errp, - "insufficient drive capacity, must be at least the size " - "of one zone (%"PRIu64"B)", zone_size); - return -1; - } - - return 0; -} - -static void nvme_ns_zoned_init_state(NvmeNamespace *ns) -{ - uint64_t start = 0, zone_size = ns->zone_size; - uint64_t capacity = ns->num_zones * zone_size; - NvmeZone *zone; - int i; - - ns->zone_array = g_new0(NvmeZone, ns->num_zones); - if (ns->params.zd_extension_size) { - ns->zd_extensions = g_malloc0(ns->params.zd_extension_size * - ns->num_zones); - } - - QTAILQ_INIT(&ns->exp_open_zones); - QTAILQ_INIT(&ns->imp_open_zones); - QTAILQ_INIT(&ns->closed_zones); - QTAILQ_INIT(&ns->full_zones); - - zone = ns->zone_array; - for (i = 0; i < ns->num_zones; i++, zone++) { - if (start + zone_size > capacity) { - zone_size = capacity - start; - } - zone->d.zt = NVME_ZONE_TYPE_SEQ_WRITE; - nvme_set_zone_state(zone, NVME_ZONE_STATE_EMPTY); - zone->d.za = 0; - zone->d.zcap = ns->zone_capacity; - zone->d.zslba = start; - zone->d.wp = start; - zone->w_ptr = start; - start += zone_size; - } - - ns->zone_size_log2 = 0; - if (is_power_of_2(ns->zone_size)) { - ns->zone_size_log2 = 63 - clz64(ns->zone_size); - } -} - -static void nvme_ns_init_zoned(NvmeNamespace *ns) -{ - NvmeIdNsZoned *id_ns_z; - int i; - - nvme_ns_zoned_init_state(ns); - - id_ns_z = g_malloc0(sizeof(NvmeIdNsZoned)); - - /* MAR/MOR are zeroes-based, FFFFFFFFFh means no limit */ - id_ns_z->mar = cpu_to_le32(ns->params.max_active_zones - 1); - id_ns_z->mor = cpu_to_le32(ns->params.max_open_zones - 1); - id_ns_z->zoc = 0; - id_ns_z->ozcs = ns->params.cross_zone_read ? 0x01 : 0x00; - - for (i = 0; i <= ns->id_ns.nlbaf; i++) { - id_ns_z->lbafe[i].zsze = cpu_to_le64(ns->zone_size); - id_ns_z->lbafe[i].zdes = - ns->params.zd_extension_size >> 6; /* Units of 64B */ - } - - ns->csi = NVME_CSI_ZONED; - ns->id_ns.nsze = cpu_to_le64(ns->num_zones * ns->zone_size); - ns->id_ns.ncap = ns->id_ns.nsze; - ns->id_ns.nuse = ns->id_ns.ncap; - - /* - * The device uses the BDRV_BLOCK_ZERO flag to determine the "deallocated" - * status of logical blocks. Since the spec defines that logical blocks - * SHALL be deallocated when then zone is in the Empty or Offline states, - * we can only support DULBE if the zone size is a multiple of the - * calculated NPDG. - */ - if (ns->zone_size % (ns->id_ns.npdg + 1)) { - warn_report("the zone size (%"PRIu64" blocks) is not a multiple of " - "the calculated deallocation granularity (%d blocks); " - "DULBE support disabled", - ns->zone_size, ns->id_ns.npdg + 1); - - ns->id_ns.nsfeat &= ~0x4; - } - - ns->id_ns_zoned = id_ns_z; -} - -static void nvme_clear_zone(NvmeNamespace *ns, NvmeZone *zone) -{ - uint8_t state; - - zone->w_ptr = zone->d.wp; - state = nvme_get_zone_state(zone); - if (zone->d.wp != zone->d.zslba || - (zone->d.za & NVME_ZA_ZD_EXT_VALID)) { - if (state != NVME_ZONE_STATE_CLOSED) { - trace_pci_nvme_clear_ns_close(state, zone->d.zslba); - nvme_set_zone_state(zone, NVME_ZONE_STATE_CLOSED); - } - nvme_aor_inc_active(ns); - QTAILQ_INSERT_HEAD(&ns->closed_zones, zone, entry); - } else { - trace_pci_nvme_clear_ns_reset(state, zone->d.zslba); - nvme_set_zone_state(zone, NVME_ZONE_STATE_EMPTY); - } -} - -/* - * Close all the zones that are currently open. - */ -static void nvme_zoned_ns_shutdown(NvmeNamespace *ns) -{ - NvmeZone *zone, *next; - - QTAILQ_FOREACH_SAFE(zone, &ns->closed_zones, entry, next) { - QTAILQ_REMOVE(&ns->closed_zones, zone, entry); - nvme_aor_dec_active(ns); - nvme_clear_zone(ns, zone); - } - QTAILQ_FOREACH_SAFE(zone, &ns->imp_open_zones, entry, next) { - QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry); - nvme_aor_dec_open(ns); - nvme_aor_dec_active(ns); - nvme_clear_zone(ns, zone); - } - QTAILQ_FOREACH_SAFE(zone, &ns->exp_open_zones, entry, next) { - QTAILQ_REMOVE(&ns->exp_open_zones, zone, entry); - nvme_aor_dec_open(ns); - nvme_aor_dec_active(ns); - nvme_clear_zone(ns, zone); - } - - assert(ns->nr_open_zones == 0); -} - -static int nvme_ns_check_constraints(NvmeCtrl *n, NvmeNamespace *ns, - Error **errp) -{ - if (!ns->blkconf.blk) { - error_setg(errp, "block backend not configured"); - return -1; - } - - if (ns->params.pi && ns->params.ms < 8) { - error_setg(errp, "at least 8 bytes of metadata required to enable " - "protection information"); - return -1; - } - - if (ns->params.nsid > NVME_MAX_NAMESPACES) { - error_setg(errp, "invalid namespace id (must be between 0 and %d)", - NVME_MAX_NAMESPACES); - return -1; - } - - if (!n->subsys) { - if (ns->params.detached) { - error_setg(errp, "detached requires that the nvme device is " - "linked to an nvme-subsys device"); - return -1; - } - - if (ns->params.shared) { - error_setg(errp, "shared requires that the nvme device is " - "linked to an nvme-subsys device"); - return -1; - } - } - - if (ns->params.zoned) { - if (ns->params.max_active_zones) { - if (ns->params.max_open_zones > ns->params.max_active_zones) { - error_setg(errp, "max_open_zones (%u) exceeds " - "max_active_zones (%u)", ns->params.max_open_zones, - ns->params.max_active_zones); - return -1; - } - - if (!ns->params.max_open_zones) { - ns->params.max_open_zones = ns->params.max_active_zones; - } - } - - if (ns->params.zd_extension_size) { - if (ns->params.zd_extension_size & 0x3f) { - error_setg(errp, "zone descriptor extension size must be a " - "multiple of 64B"); - return -1; - } - if ((ns->params.zd_extension_size >> 6) > 0xff) { - error_setg(errp, - "zone descriptor extension size is too large"); - return -1; - } - } - } - - return 0; -} - -int nvme_ns_setup(NvmeCtrl *n, NvmeNamespace *ns, Error **errp) -{ - if (nvme_ns_check_constraints(n, ns, errp)) { - return -1; - } - - if (nvme_ns_init_blk(ns, errp)) { - return -1; - } - - if (nvme_ns_init(ns, errp)) { - return -1; - } - if (ns->params.zoned) { - if (nvme_ns_zoned_check_calc_geometry(ns, errp) != 0) { - return -1; - } - nvme_ns_init_zoned(ns); - } - - return 0; -} - -void nvme_ns_drain(NvmeNamespace *ns) -{ - blk_drain(ns->blkconf.blk); -} - -void nvme_ns_shutdown(NvmeNamespace *ns) -{ - blk_flush(ns->blkconf.blk); - if (ns->params.zoned) { - nvme_zoned_ns_shutdown(ns); - } -} - -void nvme_ns_cleanup(NvmeNamespace *ns) -{ - if (ns->params.zoned) { - g_free(ns->id_ns_zoned); - g_free(ns->zone_array); - g_free(ns->zd_extensions); - } -} - -static void nvme_ns_realize(DeviceState *dev, Error **errp) -{ - NvmeNamespace *ns = NVME_NS(dev); - BusState *s = qdev_get_parent_bus(dev); - NvmeCtrl *n = NVME(s->parent); - NvmeSubsystem *subsys = n->subsys; - uint32_t nsid = ns->params.nsid; - int i; - - if (nvme_ns_setup(n, ns, errp)) { - return; - } - - if (!nsid) { - for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { - if (nvme_ns(n, i) || nvme_subsys_ns(subsys, i)) { - continue; - } - - nsid = ns->params.nsid = i; - break; - } - - if (!nsid) { - error_setg(errp, "no free namespace id"); - return; - } - } else { - if (nvme_ns(n, nsid) || nvme_subsys_ns(subsys, nsid)) { - error_setg(errp, "namespace id '%d' already allocated", nsid); - return; - } - } - - if (subsys) { - subsys->namespaces[nsid] = ns; - - if (ns->params.detached) { - return; - } - - if (ns->params.shared) { - for (i = 0; i < ARRAY_SIZE(subsys->ctrls); i++) { - NvmeCtrl *ctrl = subsys->ctrls[i]; - - if (ctrl) { - nvme_attach_ns(ctrl, ns); - } - } - - return; - } - } - - nvme_attach_ns(n, ns); -} - -static Property nvme_ns_props[] = { - DEFINE_BLOCK_PROPERTIES(NvmeNamespace, blkconf), - DEFINE_PROP_BOOL("detached", NvmeNamespace, params.detached, false), - DEFINE_PROP_BOOL("shared", NvmeNamespace, params.shared, false), - DEFINE_PROP_UINT32("nsid", NvmeNamespace, params.nsid, 0), - DEFINE_PROP_UUID("uuid", NvmeNamespace, params.uuid), - DEFINE_PROP_UINT16("ms", NvmeNamespace, params.ms, 0), - DEFINE_PROP_UINT8("mset", NvmeNamespace, params.mset, 0), - DEFINE_PROP_UINT8("pi", NvmeNamespace, params.pi, 0), - DEFINE_PROP_UINT8("pil", NvmeNamespace, params.pil, 0), - DEFINE_PROP_UINT16("mssrl", NvmeNamespace, params.mssrl, 128), - DEFINE_PROP_UINT32("mcl", NvmeNamespace, params.mcl, 128), - DEFINE_PROP_UINT8("msrc", NvmeNamespace, params.msrc, 127), - DEFINE_PROP_BOOL("zoned", NvmeNamespace, params.zoned, false), - DEFINE_PROP_SIZE("zoned.zone_size", NvmeNamespace, params.zone_size_bs, - NVME_DEFAULT_ZONE_SIZE), - DEFINE_PROP_SIZE("zoned.zone_capacity", NvmeNamespace, params.zone_cap_bs, - 0), - DEFINE_PROP_BOOL("zoned.cross_read", NvmeNamespace, - params.cross_zone_read, false), - DEFINE_PROP_UINT32("zoned.max_active", NvmeNamespace, - params.max_active_zones, 0), - DEFINE_PROP_UINT32("zoned.max_open", NvmeNamespace, - params.max_open_zones, 0), - DEFINE_PROP_UINT32("zoned.descr_ext_size", NvmeNamespace, - params.zd_extension_size, 0), - DEFINE_PROP_END_OF_LIST(), -}; - -static void nvme_ns_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - - set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); - - dc->bus_type = TYPE_NVME_BUS; - dc->realize = nvme_ns_realize; - device_class_set_props(dc, nvme_ns_props); - dc->desc = "Virtual NVMe namespace"; -} - -static void nvme_ns_instance_init(Object *obj) -{ - NvmeNamespace *ns = NVME_NS(obj); - char *bootindex = g_strdup_printf("/namespace@%d,0", ns->params.nsid); - - device_add_bootindex_property(obj, &ns->bootindex, "bootindex", - bootindex, DEVICE(obj)); - - g_free(bootindex); -} - -static const TypeInfo nvme_ns_info = { - .name = TYPE_NVME_NS, - .parent = TYPE_DEVICE, - .class_init = nvme_ns_class_init, - .instance_size = sizeof(NvmeNamespace), - .instance_init = nvme_ns_instance_init, -}; - -static void nvme_ns_register_types(void) -{ - type_register_static(&nvme_ns_info); -} - -type_init(nvme_ns_register_types) diff --git a/hw/block/nvme-subsys.c b/hw/block/nvme-subsys.c deleted file mode 100644 index 192223d17c..0000000000 --- a/hw/block/nvme-subsys.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * QEMU NVM Express Subsystem: nvme-subsys - * - * Copyright (c) 2021 Minwoo Im <minwoo.im.dev@gmail.com> - * - * This code is licensed under the GNU GPL v2. Refer COPYING. - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" - -#include "nvme.h" - -int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp) -{ - NvmeSubsystem *subsys = n->subsys; - int cntlid; - - for (cntlid = 0; cntlid < ARRAY_SIZE(subsys->ctrls); cntlid++) { - if (!subsys->ctrls[cntlid]) { - break; - } - } - - if (cntlid == ARRAY_SIZE(subsys->ctrls)) { - error_setg(errp, "no more free controller id"); - return -1; - } - - subsys->ctrls[cntlid] = n; - - return cntlid; -} - -static void nvme_subsys_setup(NvmeSubsystem *subsys) -{ - const char *nqn = subsys->params.nqn ? - subsys->params.nqn : subsys->parent_obj.id; - - snprintf((char *)subsys->subnqn, sizeof(subsys->subnqn), - "nqn.2019-08.org.qemu:%s", nqn); -} - -static void nvme_subsys_realize(DeviceState *dev, Error **errp) -{ - NvmeSubsystem *subsys = NVME_SUBSYS(dev); - - nvme_subsys_setup(subsys); -} - -static Property nvme_subsystem_props[] = { - DEFINE_PROP_STRING("nqn", NvmeSubsystem, params.nqn), - DEFINE_PROP_END_OF_LIST(), -}; - -static void nvme_subsys_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - - set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); - - dc->realize = nvme_subsys_realize; - dc->desc = "Virtual NVMe subsystem"; - - device_class_set_props(dc, nvme_subsystem_props); -} - -static const TypeInfo nvme_subsys_info = { - .name = TYPE_NVME_SUBSYS, - .parent = TYPE_DEVICE, - .class_init = nvme_subsys_class_init, - .instance_size = sizeof(NvmeSubsystem), -}; - -static void nvme_subsys_register_types(void) -{ - type_register_static(&nvme_subsys_info); -} - -type_init(nvme_subsys_register_types) diff --git a/hw/block/nvme.c b/hw/block/nvme.c deleted file mode 100644 index 0bcaf7192f..0000000000 --- a/hw/block/nvme.c +++ /dev/null @@ -1,6365 +0,0 @@ -/* - * QEMU NVM Express Controller - * - * Copyright (c) 2012, Intel Corporation - * - * Written by Keith Busch <keith.busch@intel.com> - * - * This code is licensed under the GNU GPL v2 or later. - */ - -/** - * Reference Specs: http://www.nvmexpress.org, 1.4, 1.3, 1.2, 1.1, 1.0e - * - * https://nvmexpress.org/developers/nvme-specification/ - * - * - * Notes on coding style - * --------------------- - * While QEMU coding style prefers lowercase hexadecimals in constants, the - * NVMe subsystem use thes format from the NVMe specifications in the comments - * (i.e. 'h' suffix instead of '0x' prefix). - * - * Usage - * ----- - * See docs/system/nvme.rst for extensive documentation. - * - * Add options: - * -drive file=<file>,if=none,id=<drive_id> - * -device nvme-subsys,id=<subsys_id>,nqn=<nqn_id> - * -device nvme,serial=<serial>,id=<bus_name>, \ - * cmb_size_mb=<cmb_size_mb[optional]>, \ - * [pmrdev=<mem_backend_file_id>,] \ - * max_ioqpairs=<N[optional]>, \ - * aerl=<N[optional]>,aer_max_queued=<N[optional]>, \ - * mdts=<N[optional]>,vsl=<N[optional]>, \ - * zoned.zasl=<N[optional]>, \ - * subsys=<subsys_id> - * -device nvme-ns,drive=<drive_id>,bus=<bus_name>,nsid=<nsid>,\ - * zoned=<true|false[optional]>, \ - * subsys=<subsys_id>,detached=<true|false[optional]> - * - * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at - * offset 0 in BAR2 and supports only WDS, RDS and SQS for now. By default, the - * device will use the "v1.4 CMB scheme" - use the `legacy-cmb` parameter to - * always enable the CMBLOC and CMBSZ registers (v1.3 behavior). - * - * Enabling pmr emulation can be achieved by pointing to memory-backend-file. - * For example: - * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \ - * size=<size> .... -device nvme,...,pmrdev=<mem_id> - * - * The PMR will use BAR 4/5 exclusively. - * - * To place controller(s) and namespace(s) to a subsystem, then provide - * nvme-subsys device as above. - * - * nvme subsystem device parameters - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - `nqn` - * This parameter provides the `<nqn_id>` part of the string - * `nqn.2019-08.org.qemu:<nqn_id>` which will be reported in the SUBNQN field - * of subsystem controllers. Note that `<nqn_id>` should be unique per - * subsystem, but this is not enforced by QEMU. If not specified, it will - * default to the value of the `id` parameter (`<subsys_id>`). - * - * nvme device parameters - * ~~~~~~~~~~~~~~~~~~~~~~ - * - `subsys` - * Specifying this parameter attaches the controller to the subsystem and - * the SUBNQN field in the controller will report the NQN of the subsystem - * device. This also enables multi controller capability represented in - * Identify Controller data structure in CMIC (Controller Multi-path I/O and - * Namesapce Sharing Capabilities). - * - * - `aerl` - * The Asynchronous Event Request Limit (AERL). Indicates the maximum number - * of concurrently outstanding Asynchronous Event Request commands support - * by the controller. This is a 0's based value. - * - * - `aer_max_queued` - * This is the maximum number of events that the device will enqueue for - * completion when there are no outstanding AERs. When the maximum number of - * enqueued events are reached, subsequent events will be dropped. - * - * - `mdts` - * Indicates the maximum data transfer size for a command that transfers data - * between host-accessible memory and the controller. The value is specified - * as a power of two (2^n) and is in units of the minimum memory page size - * (CAP.MPSMIN). The default value is 7 (i.e. 512 KiB). - * - * - `vsl` - * Indicates the maximum data size limit for the Verify command. Like `mdts`, - * this value is specified as a power of two (2^n) and is in units of the - * minimum memory page size (CAP.MPSMIN). The default value is 7 (i.e. 512 - * KiB). - * - * - `zoned.zasl` - * Indicates the maximum data transfer size for the Zone Append command. Like - * `mdts`, the value is specified as a power of two (2^n) and is in units of - * the minimum memory page size (CAP.MPSMIN). The default value is 0 (i.e. - * defaulting to the value of `mdts`). - * - * nvme namespace device parameters - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - `shared` - * When the parent nvme device (as defined explicitly by the 'bus' parameter - * or implicitly by the most recently defined NvmeBus) is linked to an - * nvme-subsys device, the namespace will be attached to all controllers in - * the subsystem. If set to 'off' (the default), the namespace will remain a - * private namespace and may only be attached to a single controller at a - * time. - * - * - `detached` - * This parameter is only valid together with the `subsys` parameter. If left - * at the default value (`false/off`), the namespace will be attached to all - * controllers in the NVMe subsystem at boot-up. If set to `true/on`, the - * namespace will be be available in the subsystem not not attached to any - * controllers. - * - * Setting `zoned` to true selects Zoned Command Set at the namespace. - * In this case, the following namespace properties are available to configure - * zoned operation: - * zoned.zone_size=<zone size in bytes, default: 128MiB> - * The number may be followed by K, M, G as in kilo-, mega- or giga-. - * - * zoned.zone_capacity=<zone capacity in bytes, default: zone size> - * The value 0 (default) forces zone capacity to be the same as zone - * size. The value of this property may not exceed zone size. - * - * zoned.descr_ext_size=<zone descriptor extension size, default 0> - * This value needs to be specified in 64B units. If it is zero, - * namespace(s) will not support zone descriptor extensions. - * - * zoned.max_active=<Maximum Active Resources (zones), default: 0> - * The default value means there is no limit to the number of - * concurrently active zones. - * - * zoned.max_open=<Maximum Open Resources (zones), default: 0> - * The default value means there is no limit to the number of - * concurrently open zones. - * - * zoned.cross_read=<enable RAZB, default: false> - * Setting this property to true enables Read Across Zone Boundaries. - */ - -#include "qemu/osdep.h" -#include "qemu/cutils.h" -#include "qemu/error-report.h" -#include "qemu/log.h" -#include "qemu/units.h" -#include "qapi/error.h" -#include "qapi/visitor.h" -#include "sysemu/sysemu.h" -#include "sysemu/block-backend.h" -#include "sysemu/hostmem.h" -#include "hw/pci/msix.h" -#include "migration/vmstate.h" - -#include "nvme.h" -#include "trace.h" - -#define NVME_MAX_IOQPAIRS 0xffff -#define NVME_DB_SIZE 4 -#define NVME_SPEC_VER 0x00010400 -#define NVME_CMB_BIR 2 -#define NVME_PMR_BIR 4 -#define NVME_TEMPERATURE 0x143 -#define NVME_TEMPERATURE_WARNING 0x157 -#define NVME_TEMPERATURE_CRITICAL 0x175 -#define NVME_NUM_FW_SLOTS 1 -#define NVME_DEFAULT_MAX_ZA_SIZE (128 * KiB) - -#define NVME_GUEST_ERR(trace, fmt, ...) \ - do { \ - (trace_##trace)(__VA_ARGS__); \ - qemu_log_mask(LOG_GUEST_ERROR, #trace \ - " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \ - } while (0) - -static const bool nvme_feature_support[NVME_FID_MAX] = { - [NVME_ARBITRATION] = true, - [NVME_POWER_MANAGEMENT] = true, - [NVME_TEMPERATURE_THRESHOLD] = true, - [NVME_ERROR_RECOVERY] = true, - [NVME_VOLATILE_WRITE_CACHE] = true, - [NVME_NUMBER_OF_QUEUES] = true, - [NVME_INTERRUPT_COALESCING] = true, - [NVME_INTERRUPT_VECTOR_CONF] = true, - [NVME_WRITE_ATOMICITY] = true, - [NVME_ASYNCHRONOUS_EVENT_CONF] = true, - [NVME_TIMESTAMP] = true, - [NVME_COMMAND_SET_PROFILE] = true, -}; - -static const uint32_t nvme_feature_cap[NVME_FID_MAX] = { - [NVME_TEMPERATURE_THRESHOLD] = NVME_FEAT_CAP_CHANGE, - [NVME_ERROR_RECOVERY] = NVME_FEAT_CAP_CHANGE | NVME_FEAT_CAP_NS, - [NVME_VOLATILE_WRITE_CACHE] = NVME_FEAT_CAP_CHANGE, - [NVME_NUMBER_OF_QUEUES] = NVME_FEAT_CAP_CHANGE, - [NVME_ASYNCHRONOUS_EVENT_CONF] = NVME_FEAT_CAP_CHANGE, - [NVME_TIMESTAMP] = NVME_FEAT_CAP_CHANGE, - [NVME_COMMAND_SET_PROFILE] = NVME_FEAT_CAP_CHANGE, -}; - -static const uint32_t nvme_cse_acs[256] = { - [NVME_ADM_CMD_DELETE_SQ] = NVME_CMD_EFF_CSUPP, - [NVME_ADM_CMD_CREATE_SQ] = NVME_CMD_EFF_CSUPP, - [NVME_ADM_CMD_GET_LOG_PAGE] = NVME_CMD_EFF_CSUPP, - [NVME_ADM_CMD_DELETE_CQ] = NVME_CMD_EFF_CSUPP, - [NVME_ADM_CMD_CREATE_CQ] = NVME_CMD_EFF_CSUPP, - [NVME_ADM_CMD_IDENTIFY] = NVME_CMD_EFF_CSUPP, - [NVME_ADM_CMD_ABORT] = NVME_CMD_EFF_CSUPP, - [NVME_ADM_CMD_SET_FEATURES] = NVME_CMD_EFF_CSUPP, - [NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP, - [NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP, - [NVME_ADM_CMD_NS_ATTACHMENT] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC, - [NVME_ADM_CMD_FORMAT_NVM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, -}; - -static const uint32_t nvme_cse_iocs_none[256]; - -static const uint32_t nvme_cse_iocs_nvm[256] = { - [NVME_CMD_FLUSH] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, - [NVME_CMD_WRITE_ZEROES] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, - [NVME_CMD_WRITE] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, - [NVME_CMD_READ] = NVME_CMD_EFF_CSUPP, - [NVME_CMD_DSM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, - [NVME_CMD_VERIFY] = NVME_CMD_EFF_CSUPP, - [NVME_CMD_COPY] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, - [NVME_CMD_COMPARE] = NVME_CMD_EFF_CSUPP, -}; - -static const uint32_t nvme_cse_iocs_zoned[256] = { - [NVME_CMD_FLUSH] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, - [NVME_CMD_WRITE_ZEROES] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, - [NVME_CMD_WRITE] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, - [NVME_CMD_READ] = NVME_CMD_EFF_CSUPP, - [NVME_CMD_DSM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, - [NVME_CMD_VERIFY] = NVME_CMD_EFF_CSUPP, - [NVME_CMD_COPY] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, - [NVME_CMD_COMPARE] = NVME_CMD_EFF_CSUPP, - [NVME_CMD_ZONE_APPEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, - [NVME_CMD_ZONE_MGMT_SEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, - [NVME_CMD_ZONE_MGMT_RECV] = NVME_CMD_EFF_CSUPP, -}; - -static void nvme_process_sq(void *opaque); - -static uint16_t nvme_sqid(NvmeRequest *req) -{ - return le16_to_cpu(req->sq->sqid); -} - -static void nvme_assign_zone_state(NvmeNamespace *ns, NvmeZone *zone, - NvmeZoneState state) -{ - if (QTAILQ_IN_USE(zone, entry)) { - switch (nvme_get_zone_state(zone)) { - case NVME_ZONE_STATE_EXPLICITLY_OPEN: - QTAILQ_REMOVE(&ns->exp_open_zones, zone, entry); - break; - case NVME_ZONE_STATE_IMPLICITLY_OPEN: - QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry); - break; - case NVME_ZONE_STATE_CLOSED: - QTAILQ_REMOVE(&ns->closed_zones, zone, entry); - break; - case NVME_ZONE_STATE_FULL: - QTAILQ_REMOVE(&ns->full_zones, zone, entry); - default: - ; - } - } - - nvme_set_zone_state(zone, state); - - switch (state) { - case NVME_ZONE_STATE_EXPLICITLY_OPEN: - QTAILQ_INSERT_TAIL(&ns->exp_open_zones, zone, entry); - break; - case NVME_ZONE_STATE_IMPLICITLY_OPEN: - QTAILQ_INSERT_TAIL(&ns->imp_open_zones, zone, entry); - break; - case NVME_ZONE_STATE_CLOSED: - QTAILQ_INSERT_TAIL(&ns->closed_zones, zone, entry); - break; - case NVME_ZONE_STATE_FULL: - QTAILQ_INSERT_TAIL(&ns->full_zones, zone, entry); - case NVME_ZONE_STATE_READ_ONLY: - break; - default: - zone->d.za = 0; - } -} - -/* - * Check if we can open a zone without exceeding open/active limits. - * AOR stands for "Active and Open Resources" (see TP 4053 section 2.5). - */ -static int nvme_aor_check(NvmeNamespace *ns, uint32_t act, uint32_t opn) -{ - if (ns->params.max_active_zones != 0 && - ns->nr_active_zones + act > ns->params.max_active_zones) { - trace_pci_nvme_err_insuff_active_res(ns->params.max_active_zones); - return NVME_ZONE_TOO_MANY_ACTIVE | NVME_DNR; - } - if (ns->params.max_open_zones != 0 && - ns->nr_open_zones + opn > ns->params.max_open_zones) { - trace_pci_nvme_err_insuff_open_res(ns->params.max_open_zones); - return NVME_ZONE_TOO_MANY_OPEN | NVME_DNR; - } - - return NVME_SUCCESS; -} - -static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr) -{ - hwaddr hi, lo; - - if (!n->cmb.cmse) { - return false; - } - - lo = n->params.legacy_cmb ? n->cmb.mem.addr : n->cmb.cba; - hi = lo + int128_get64(n->cmb.mem.size); - - return addr >= lo && addr < hi; -} - -static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr) -{ - hwaddr base = n->params.legacy_cmb ? n->cmb.mem.addr : n->cmb.cba; - return &n->cmb.buf[addr - base]; -} - -static bool nvme_addr_is_pmr(NvmeCtrl *n, hwaddr addr) -{ - hwaddr hi; - - if (!n->pmr.cmse) { - return false; - } - - hi = n->pmr.cba + int128_get64(n->pmr.dev->mr.size); - - return addr >= n->pmr.cba && addr < hi; -} - -static inline void *nvme_addr_to_pmr(NvmeCtrl *n, hwaddr addr) -{ - return memory_region_get_ram_ptr(&n->pmr.dev->mr) + (addr - n->pmr.cba); -} - -static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size) -{ - hwaddr hi = addr + size - 1; - if (hi < addr) { - return 1; - } - - if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr) && nvme_addr_is_cmb(n, hi)) { - memcpy(buf, nvme_addr_to_cmb(n, addr), size); - return 0; - } - - if (nvme_addr_is_pmr(n, addr) && nvme_addr_is_pmr(n, hi)) { - memcpy(buf, nvme_addr_to_pmr(n, addr), size); - return 0; - } - - return pci_dma_read(&n->parent_obj, addr, buf, size); -} - -static int nvme_addr_write(NvmeCtrl *n, hwaddr addr, void *buf, int size) -{ - hwaddr hi = addr + size - 1; - if (hi < addr) { - return 1; - } - - if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr) && nvme_addr_is_cmb(n, hi)) { - memcpy(nvme_addr_to_cmb(n, addr), buf, size); - return 0; - } - - if (nvme_addr_is_pmr(n, addr) && nvme_addr_is_pmr(n, hi)) { - memcpy(nvme_addr_to_pmr(n, addr), buf, size); - return 0; - } - - return pci_dma_write(&n->parent_obj, addr, buf, size); -} - -static bool nvme_nsid_valid(NvmeCtrl *n, uint32_t nsid) -{ - return nsid && - (nsid == NVME_NSID_BROADCAST || nsid <= NVME_MAX_NAMESPACES); -} - -static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid) -{ - return sqid < n->params.max_ioqpairs + 1 && n->sq[sqid] != NULL ? 0 : -1; -} - -static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid) -{ - return cqid < n->params.max_ioqpairs + 1 && n->cq[cqid] != NULL ? 0 : -1; -} - -static void nvme_inc_cq_tail(NvmeCQueue *cq) -{ - cq->tail++; - if (cq->tail >= cq->size) { - cq->tail = 0; - cq->phase = !cq->phase; - } -} - -static void nvme_inc_sq_head(NvmeSQueue *sq) -{ - sq->head = (sq->head + 1) % sq->size; -} - -static uint8_t nvme_cq_full(NvmeCQueue *cq) -{ - return (cq->tail + 1) % cq->size == cq->head; -} - -static uint8_t nvme_sq_empty(NvmeSQueue *sq) -{ - return sq->head == sq->tail; -} - -static void nvme_irq_check(NvmeCtrl *n) -{ - if (msix_enabled(&(n->parent_obj))) { - return; - } - if (~n->bar.intms & n->irq_status) { - pci_irq_assert(&n->parent_obj); - } else { - pci_irq_deassert(&n->parent_obj); - } -} - -static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq) -{ - if (cq->irq_enabled) { - if (msix_enabled(&(n->parent_obj))) { - trace_pci_nvme_irq_msix(cq->vector); - msix_notify(&(n->parent_obj), cq->vector); - } else { - trace_pci_nvme_irq_pin(); - assert(cq->vector < 32); - n->irq_status |= 1 << cq->vector; - nvme_irq_check(n); - } - } else { - trace_pci_nvme_irq_masked(); - } -} - -static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq) -{ - if (cq->irq_enabled) { - if (msix_enabled(&(n->parent_obj))) { - return; - } else { - assert(cq->vector < 32); - n->irq_status &= ~(1 << cq->vector); - nvme_irq_check(n); - } - } -} - -static void nvme_req_clear(NvmeRequest *req) -{ - req->ns = NULL; - req->opaque = NULL; - req->aiocb = NULL; - memset(&req->cqe, 0x0, sizeof(req->cqe)); - req->status = NVME_SUCCESS; -} - -static inline void nvme_sg_init(NvmeCtrl *n, NvmeSg *sg, bool dma) -{ - if (dma) { - pci_dma_sglist_init(&sg->qsg, &n->parent_obj, 0); - sg->flags = NVME_SG_DMA; - } else { - qemu_iovec_init(&sg->iov, 0); - } - - sg->flags |= NVME_SG_ALLOC; -} - -static inline void nvme_sg_unmap(NvmeSg *sg) -{ - if (!(sg->flags & NVME_SG_ALLOC)) { - return; - } - - if (sg->flags & NVME_SG_DMA) { - qemu_sglist_destroy(&sg->qsg); - } else { - qemu_iovec_destroy(&sg->iov); - } - - memset(sg, 0x0, sizeof(*sg)); -} - -/* - * When metadata is transfered as extended LBAs, the DPTR mapped into `sg` - * holds both data and metadata. This function splits the data and metadata - * into two separate QSG/IOVs. - */ -static void nvme_sg_split(NvmeSg *sg, NvmeNamespace *ns, NvmeSg *data, - NvmeSg *mdata) -{ - NvmeSg *dst = data; - uint32_t trans_len, count = ns->lbasz; - uint64_t offset = 0; - bool dma = sg->flags & NVME_SG_DMA; - size_t sge_len; - size_t sg_len = dma ? sg->qsg.size : sg->iov.size; - int sg_idx = 0; - - assert(sg->flags & NVME_SG_ALLOC); - - while (sg_len) { - sge_len = dma ? sg->qsg.sg[sg_idx].len : sg->iov.iov[sg_idx].iov_len; - - trans_len = MIN(sg_len, count); - trans_len = MIN(trans_len, sge_len - offset); - - if (dst) { - if (dma) { - qemu_sglist_add(&dst->qsg, sg->qsg.sg[sg_idx].base + offset, - trans_len); - } else { - qemu_iovec_add(&dst->iov, - sg->iov.iov[sg_idx].iov_base + offset, - trans_len); - } - } - - sg_len -= trans_len; - count -= trans_len; - offset += trans_len; - - if (count == 0) { - dst = (dst == data) ? mdata : data; - count = (dst == data) ? ns->lbasz : ns->lbaf.ms; - } - - if (sge_len == offset) { - offset = 0; - sg_idx++; - } - } -} - -static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr, - size_t len) -{ - if (!len) { - return NVME_SUCCESS; - } - - trace_pci_nvme_map_addr_cmb(addr, len); - - if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len - 1)) { - return NVME_DATA_TRAS_ERROR; - } - - qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len); - - return NVME_SUCCESS; -} - -static uint16_t nvme_map_addr_pmr(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr, - size_t len) -{ - if (!len) { - return NVME_SUCCESS; - } - - if (!nvme_addr_is_pmr(n, addr) || !nvme_addr_is_pmr(n, addr + len - 1)) { - return NVME_DATA_TRAS_ERROR; - } - - qemu_iovec_add(iov, nvme_addr_to_pmr(n, addr), len); - - return NVME_SUCCESS; -} - -static uint16_t nvme_map_addr(NvmeCtrl *n, NvmeSg *sg, hwaddr addr, size_t len) -{ - bool cmb = false, pmr = false; - - if (!len) { - return NVME_SUCCESS; - } - - trace_pci_nvme_map_addr(addr, len); - - if (nvme_addr_is_cmb(n, addr)) { - cmb = true; - } else if (nvme_addr_is_pmr(n, addr)) { - pmr = true; - } - - if (cmb || pmr) { - if (sg->flags & NVME_SG_DMA) { - return NVME_INVALID_USE_OF_CMB | NVME_DNR; - } - - if (cmb) { - return nvme_map_addr_cmb(n, &sg->iov, addr, len); - } else { - return nvme_map_addr_pmr(n, &sg->iov, addr, len); - } - } - - if (!(sg->flags & NVME_SG_DMA)) { - return NVME_INVALID_USE_OF_CMB | NVME_DNR; - } - - qemu_sglist_add(&sg->qsg, addr, len); - - return NVME_SUCCESS; -} - -static inline bool nvme_addr_is_dma(NvmeCtrl *n, hwaddr addr) -{ - return !(nvme_addr_is_cmb(n, addr) || nvme_addr_is_pmr(n, addr)); -} - -static uint16_t nvme_map_prp(NvmeCtrl *n, NvmeSg *sg, uint64_t prp1, - uint64_t prp2, uint32_t len) -{ - hwaddr trans_len = n->page_size - (prp1 % n->page_size); - trans_len = MIN(len, trans_len); - int num_prps = (len >> n->page_bits) + 1; - uint16_t status; - int ret; - - trace_pci_nvme_map_prp(trans_len, len, prp1, prp2, num_prps); - - nvme_sg_init(n, sg, nvme_addr_is_dma(n, prp1)); - - status = nvme_map_addr(n, sg, prp1, trans_len); - if (status) { - goto unmap; - } - - len -= trans_len; - if (len) { - if (len > n->page_size) { - uint64_t prp_list[n->max_prp_ents]; - uint32_t nents, prp_trans; - int i = 0; - - /* - * The first PRP list entry, pointed to by PRP2 may contain offset. - * Hence, we need to calculate the number of entries in based on - * that offset. - */ - nents = (n->page_size - (prp2 & (n->page_size - 1))) >> 3; - prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t); - ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans); - if (ret) { - trace_pci_nvme_err_addr_read(prp2); - status = NVME_DATA_TRAS_ERROR; - goto unmap; - } - while (len != 0) { - uint64_t prp_ent = le64_to_cpu(prp_list[i]); - - if (i == nents - 1 && len > n->page_size) { - if (unlikely(prp_ent & (n->page_size - 1))) { - trace_pci_nvme_err_invalid_prplist_ent(prp_ent); - status = NVME_INVALID_PRP_OFFSET | NVME_DNR; - goto unmap; - } - - i = 0; - nents = (len + n->page_size - 1) >> n->page_bits; - nents = MIN(nents, n->max_prp_ents); - prp_trans = nents * sizeof(uint64_t); - ret = nvme_addr_read(n, prp_ent, (void *)prp_list, - prp_trans); - if (ret) { - trace_pci_nvme_err_addr_read(prp_ent); - status = NVME_DATA_TRAS_ERROR; - goto unmap; - } - prp_ent = le64_to_cpu(prp_list[i]); - } - - if (unlikely(prp_ent & (n->page_size - 1))) { - trace_pci_nvme_err_invalid_prplist_ent(prp_ent); - status = NVME_INVALID_PRP_OFFSET | NVME_DNR; - goto unmap; - } - - trans_len = MIN(len, n->page_size); - status = nvme_map_addr(n, sg, prp_ent, trans_len); - if (status) { - goto unmap; - } - - len -= trans_len; - i++; - } - } else { - if (unlikely(prp2 & (n->page_size - 1))) { - trace_pci_nvme_err_invalid_prp2_align(prp2); - status = NVME_INVALID_PRP_OFFSET | NVME_DNR; - goto unmap; - } - status = nvme_map_addr(n, sg, prp2, len); - if (status) { - goto unmap; - } - } - } - - return NVME_SUCCESS; - -unmap: - nvme_sg_unmap(sg); - return status; -} - -/* - * Map 'nsgld' data descriptors from 'segment'. The function will subtract the - * number of bytes mapped in len. - */ -static uint16_t nvme_map_sgl_data(NvmeCtrl *n, NvmeSg *sg, - NvmeSglDescriptor *segment, uint64_t nsgld, - size_t *len, NvmeCmd *cmd) -{ - dma_addr_t addr, trans_len; - uint32_t dlen; - uint16_t status; - - for (int i = 0; i < nsgld; i++) { - uint8_t type = NVME_SGL_TYPE(segment[i].type); - - switch (type) { - case NVME_SGL_DESCR_TYPE_BIT_BUCKET: - if (cmd->opcode == NVME_CMD_WRITE) { - continue; - } - case NVME_SGL_DESCR_TYPE_DATA_BLOCK: - break; - case NVME_SGL_DESCR_TYPE_SEGMENT: - case NVME_SGL_DESCR_TYPE_LAST_SEGMENT: - return NVME_INVALID_NUM_SGL_DESCRS | NVME_DNR; - default: - return NVME_SGL_DESCR_TYPE_INVALID | NVME_DNR; - } - - dlen = le32_to_cpu(segment[i].len); - - if (!dlen) { - continue; - } - - if (*len == 0) { - /* - * All data has been mapped, but the SGL contains additional - * segments and/or descriptors. The controller might accept - * ignoring the rest of the SGL. - */ - uint32_t sgls = le32_to_cpu(n->id_ctrl.sgls); - if (sgls & NVME_CTRL_SGLS_EXCESS_LENGTH) { - break; - } - - trace_pci_nvme_err_invalid_sgl_excess_length(dlen); - return NVME_DATA_SGL_LEN_INVALID | NVME_DNR; - } - - trans_len = MIN(*len, dlen); - - if (type == NVME_SGL_DESCR_TYPE_BIT_BUCKET) { - goto next; - } - - addr = le64_to_cpu(segment[i].addr); - - if (UINT64_MAX - addr < dlen) { - return NVME_DATA_SGL_LEN_INVALID | NVME_DNR; - } - - status = nvme_map_addr(n, sg, addr, trans_len); - if (status) { - return status; - } - -next: - *len -= trans_len; - } - - return NVME_SUCCESS; -} - -static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl, - size_t len, NvmeCmd *cmd) -{ - /* - * Read the segment in chunks of 256 descriptors (one 4k page) to avoid - * dynamically allocating a potentially huge SGL. The spec allows the SGL - * to be larger (as in number of bytes required to describe the SGL - * descriptors and segment chain) than the command transfer size, so it is - * not bounded by MDTS. - */ - const int SEG_CHUNK_SIZE = 256; - - NvmeSglDescriptor segment[SEG_CHUNK_SIZE], *sgld, *last_sgld; - uint64_t nsgld; - uint32_t seg_len; - uint16_t status; - hwaddr addr; - int ret; - - sgld = &sgl; - addr = le64_to_cpu(sgl.addr); - - trace_pci_nvme_map_sgl(NVME_SGL_TYPE(sgl.type), len); - - nvme_sg_init(n, sg, nvme_addr_is_dma(n, addr)); - - /* - * If the entire transfer can be described with a single data block it can - * be mapped directly. - */ - if (NVME_SGL_TYPE(sgl.type) == NVME_SGL_DESCR_TYPE_DATA_BLOCK) { - status = nvme_map_sgl_data(n, sg, sgld, 1, &len, cmd); - if (status) { - goto unmap; - } - - goto out; - } - - for (;;) { - switch (NVME_SGL_TYPE(sgld->type)) { - case NVME_SGL_DESCR_TYPE_SEGMENT: - case NVME_SGL_DESCR_TYPE_LAST_SEGMENT: - break; - default: - return NVME_INVALID_SGL_SEG_DESCR | NVME_DNR; - } - - seg_len = le32_to_cpu(sgld->len); - - /* check the length of the (Last) Segment descriptor */ - if ((!seg_len || seg_len & 0xf) && - (NVME_SGL_TYPE(sgld->type) != NVME_SGL_DESCR_TYPE_BIT_BUCKET)) { - return NVME_INVALID_SGL_SEG_DESCR | NVME_DNR; - } - - if (UINT64_MAX - addr < seg_len) { - return NVME_DATA_SGL_LEN_INVALID | NVME_DNR; - } - - nsgld = seg_len / sizeof(NvmeSglDescriptor); - - while (nsgld > SEG_CHUNK_SIZE) { - if (nvme_addr_read(n, addr, segment, sizeof(segment))) { - trace_pci_nvme_err_addr_read(addr); - status = NVME_DATA_TRAS_ERROR; - goto unmap; - } - - status = nvme_map_sgl_data(n, sg, segment, SEG_CHUNK_SIZE, - &len, cmd); - if (status) { - goto unmap; - } - - nsgld -= SEG_CHUNK_SIZE; - addr += SEG_CHUNK_SIZE * sizeof(NvmeSglDescriptor); - } - - ret = nvme_addr_read(n, addr, segment, nsgld * - sizeof(NvmeSglDescriptor)); - if (ret) { - trace_pci_nvme_err_addr_read(addr); - status = NVME_DATA_TRAS_ERROR; - goto unmap; - } - - last_sgld = &segment[nsgld - 1]; - - /* - * If the segment ends with a Data Block or Bit Bucket Descriptor Type, - * then we are done. - */ - switch (NVME_SGL_TYPE(last_sgld->type)) { - case NVME_SGL_DESCR_TYPE_DATA_BLOCK: - case NVME_SGL_DESCR_TYPE_BIT_BUCKET: - status = nvme_map_sgl_data(n, sg, segment, nsgld, &len, cmd); - if (status) { - goto unmap; - } - - goto out; - - default: - break; - } - - /* - * If the last descriptor was not a Data Block or Bit Bucket, then the - * current segment must not be a Last Segment. - */ - if (NVME_SGL_TYPE(sgld->type) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT) { - status = NVME_INVALID_SGL_SEG_DESCR | NVME_DNR; - goto unmap; - } - - sgld = last_sgld; - addr = le64_to_cpu(sgld->addr); - - /* - * Do not map the last descriptor; it will be a Segment or Last Segment - * descriptor and is handled by the next iteration. - */ - status = nvme_map_sgl_data(n, sg, segment, nsgld - 1, &len, cmd); - if (status) { - goto unmap; - } - } - -out: - /* if there is any residual left in len, the SGL was too short */ - if (len) { - status = NVME_DATA_SGL_LEN_INVALID | NVME_DNR; - goto unmap; - } - - return NVME_SUCCESS; - -unmap: - nvme_sg_unmap(sg); - return status; -} - -uint16_t nvme_map_dptr(NvmeCtrl *n, NvmeSg *sg, size_t len, - NvmeCmd *cmd) -{ - uint64_t prp1, prp2; - - switch (NVME_CMD_FLAGS_PSDT(cmd->flags)) { - case NVME_PSDT_PRP: - prp1 = le64_to_cpu(cmd->dptr.prp1); - prp2 = le64_to_cpu(cmd->dptr.prp2); - - return nvme_map_prp(n, sg, prp1, prp2, len); - case NVME_PSDT_SGL_MPTR_CONTIGUOUS: - case NVME_PSDT_SGL_MPTR_SGL: - return nvme_map_sgl(n, sg, cmd->dptr.sgl, len, cmd); - default: - return NVME_INVALID_FIELD; - } -} - -static uint16_t nvme_map_mptr(NvmeCtrl *n, NvmeSg *sg, size_t len, - NvmeCmd *cmd) -{ - int psdt = NVME_CMD_FLAGS_PSDT(cmd->flags); - hwaddr mptr = le64_to_cpu(cmd->mptr); - uint16_t status; - - if (psdt == NVME_PSDT_SGL_MPTR_SGL) { - NvmeSglDescriptor sgl; - - if (nvme_addr_read(n, mptr, &sgl, sizeof(sgl))) { - return NVME_DATA_TRAS_ERROR; - } - - status = nvme_map_sgl(n, sg, sgl, len, cmd); - if (status && (status & 0x7ff) == NVME_DATA_SGL_LEN_INVALID) { - status = NVME_MD_SGL_LEN_INVALID | NVME_DNR; - } - - return status; - } - - nvme_sg_init(n, sg, nvme_addr_is_dma(n, mptr)); - status = nvme_map_addr(n, sg, mptr, len); - if (status) { - nvme_sg_unmap(sg); - } - - return status; -} - -static uint16_t nvme_map_data(NvmeCtrl *n, uint32_t nlb, NvmeRequest *req) -{ - NvmeNamespace *ns = req->ns; - NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; - uint16_t ctrl = le16_to_cpu(rw->control); - size_t len = nvme_l2b(ns, nlb); - uint16_t status; - - if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) && - (ctrl & NVME_RW_PRINFO_PRACT && ns->lbaf.ms == 8)) { - goto out; - } - - if (nvme_ns_ext(ns)) { - NvmeSg sg; - - len += nvme_m2b(ns, nlb); - - status = nvme_map_dptr(n, &sg, len, &req->cmd); - if (status) { - return status; - } - - nvme_sg_init(n, &req->sg, sg.flags & NVME_SG_DMA); - nvme_sg_split(&sg, ns, &req->sg, NULL); - nvme_sg_unmap(&sg); - - return NVME_SUCCESS; - } - -out: - return nvme_map_dptr(n, &req->sg, len, &req->cmd); -} - -static uint16_t nvme_map_mdata(NvmeCtrl *n, uint32_t nlb, NvmeRequest *req) -{ - NvmeNamespace *ns = req->ns; - size_t len = nvme_m2b(ns, nlb); - uint16_t status; - - if (nvme_ns_ext(ns)) { - NvmeSg sg; - - len += nvme_l2b(ns, nlb); - - status = nvme_map_dptr(n, &sg, len, &req->cmd); - if (status) { - return status; - } - - nvme_sg_init(n, &req->sg, sg.flags & NVME_SG_DMA); - nvme_sg_split(&sg, ns, NULL, &req->sg); - nvme_sg_unmap(&sg); - - return NVME_SUCCESS; - } - - return nvme_map_mptr(n, &req->sg, len, &req->cmd); -} - -static uint16_t nvme_tx_interleaved(NvmeCtrl *n, NvmeSg *sg, uint8_t *ptr, - uint32_t len, uint32_t bytes, - int32_t skip_bytes, int64_t offset, - NvmeTxDirection dir) -{ - hwaddr addr; - uint32_t trans_len, count = bytes; - bool dma = sg->flags & NVME_SG_DMA; - int64_t sge_len; - int sg_idx = 0; - int ret; - - assert(sg->flags & NVME_SG_ALLOC); - - while (len) { - sge_len = dma ? sg->qsg.sg[sg_idx].len : sg->iov.iov[sg_idx].iov_len; - - if (sge_len - offset < 0) { - offset -= sge_len; - sg_idx++; - continue; - } - - if (sge_len == offset) { - offset = 0; - sg_idx++; - continue; - } - - trans_len = MIN(len, count); - trans_len = MIN(trans_len, sge_len - offset); - - if (dma) { - addr = sg->qsg.sg[sg_idx].base + offset; - } else { - addr = (hwaddr)(uintptr_t)sg->iov.iov[sg_idx].iov_base + offset; - } - - if (dir == NVME_TX_DIRECTION_TO_DEVICE) { - ret = nvme_addr_read(n, addr, ptr, trans_len); - } else { - ret = nvme_addr_write(n, addr, ptr, trans_len); - } - - if (ret) { - return NVME_DATA_TRAS_ERROR; - } - - ptr += trans_len; - len -= trans_len; - count -= trans_len; - offset += trans_len; - - if (count == 0) { - count = bytes; - offset += skip_bytes; - } - } - - return NVME_SUCCESS; -} - -static uint16_t nvme_tx(NvmeCtrl *n, NvmeSg *sg, uint8_t *ptr, uint32_t len, - NvmeTxDirection dir) -{ - assert(sg->flags & NVME_SG_ALLOC); - - if (sg->flags & NVME_SG_DMA) { - uint64_t residual; - - if (dir == NVME_TX_DIRECTION_TO_DEVICE) { - residual = dma_buf_write(ptr, len, &sg->qsg); - } else { - residual = dma_buf_read(ptr, len, &sg->qsg); - } - - if (unlikely(residual)) { - trace_pci_nvme_err_invalid_dma(); - return NVME_INVALID_FIELD | NVME_DNR; - } - } else { - size_t bytes; - - if (dir == NVME_TX_DIRECTION_TO_DEVICE) { - bytes = qemu_iovec_to_buf(&sg->iov, 0, ptr, len); - } else { - bytes = qemu_iovec_from_buf(&sg->iov, 0, ptr, len); - } - - if (unlikely(bytes != len)) { - trace_pci_nvme_err_invalid_dma(); - return NVME_INVALID_FIELD | NVME_DNR; - } - } - - return NVME_SUCCESS; -} - -static inline uint16_t nvme_c2h(NvmeCtrl *n, uint8_t *ptr, uint32_t len, - NvmeRequest *req) -{ - uint16_t status; - - status = nvme_map_dptr(n, &req->sg, len, &req->cmd); - if (status) { - return status; - } - - return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_FROM_DEVICE); -} - -static inline uint16_t nvme_h2c(NvmeCtrl *n, uint8_t *ptr, uint32_t len, - NvmeRequest *req) -{ - uint16_t status; - - status = nvme_map_dptr(n, &req->sg, len, &req->cmd); - if (status) { - return status; - } - - return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_TO_DEVICE); -} - -uint16_t nvme_bounce_data(NvmeCtrl *n, uint8_t *ptr, uint32_t len, - NvmeTxDirection dir, NvmeRequest *req) -{ - NvmeNamespace *ns = req->ns; - NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; - uint16_t ctrl = le16_to_cpu(rw->control); - - if (nvme_ns_ext(ns) && - !(ctrl & NVME_RW_PRINFO_PRACT && ns->lbaf.ms == 8)) { - return nvme_tx_interleaved(n, &req->sg, ptr, len, ns->lbasz, - ns->lbaf.ms, 0, dir); - } - - return nvme_tx(n, &req->sg, ptr, len, dir); -} - -uint16_t nvme_bounce_mdata(NvmeCtrl *n, uint8_t *ptr, uint32_t len, - NvmeTxDirection dir, NvmeRequest *req) -{ - NvmeNamespace *ns = req->ns; - uint16_t status; - - if (nvme_ns_ext(ns)) { - return nvme_tx_interleaved(n, &req->sg, ptr, len, ns->lbaf.ms, - ns->lbasz, ns->lbasz, dir); - } - - nvme_sg_unmap(&req->sg); - - status = nvme_map_mptr(n, &req->sg, len, &req->cmd); - if (status) { - return status; - } - - return nvme_tx(n, &req->sg, ptr, len, dir); -} - -static inline void nvme_blk_read(BlockBackend *blk, int64_t offset, - BlockCompletionFunc *cb, NvmeRequest *req) -{ - assert(req->sg.flags & NVME_SG_ALLOC); - - if (req->sg.flags & NVME_SG_DMA) { - req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE, - cb, req); - } else { - req->aiocb = blk_aio_preadv(blk, offset, &req->sg.iov, 0, cb, req); - } -} - -static inline void nvme_blk_write(BlockBackend *blk, int64_t offset, - BlockCompletionFunc *cb, NvmeRequest *req) -{ - assert(req->sg.flags & NVME_SG_ALLOC); - - if (req->sg.flags & NVME_SG_DMA) { - req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE, - cb, req); - } else { - req->aiocb = blk_aio_pwritev(blk, offset, &req->sg.iov, 0, cb, req); - } -} - -static void nvme_post_cqes(void *opaque) -{ - NvmeCQueue *cq = opaque; - NvmeCtrl *n = cq->ctrl; - NvmeRequest *req, *next; - int ret; - - QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) { - NvmeSQueue *sq; - hwaddr addr; - - if (nvme_cq_full(cq)) { - break; - } - - sq = req->sq; - req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase); - req->cqe.sq_id = cpu_to_le16(sq->sqid); - req->cqe.sq_head = cpu_to_le16(sq->head); - addr = cq->dma_addr + cq->tail * n->cqe_size; - ret = pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe, - sizeof(req->cqe)); - if (ret) { - trace_pci_nvme_err_addr_write(addr); - trace_pci_nvme_err_cfs(); - n->bar.csts = NVME_CSTS_FAILED; - break; - } - QTAILQ_REMOVE(&cq->req_list, req, entry); - nvme_inc_cq_tail(cq); - nvme_sg_unmap(&req->sg); - QTAILQ_INSERT_TAIL(&sq->req_list, req, entry); - } - if (cq->tail != cq->head) { - nvme_irq_assert(n, cq); - } -} - -static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req) -{ - assert(cq->cqid == req->sq->cqid); - trace_pci_nvme_enqueue_req_completion(nvme_cid(req), cq->cqid, - req->status); - - if (req->status) { - trace_pci_nvme_err_req_status(nvme_cid(req), nvme_nsid(req->ns), - req->status, req->cmd.opcode); - } - - QTAILQ_REMOVE(&req->sq->out_req_list, req, entry); - QTAILQ_INSERT_TAIL(&cq->req_list, req, entry); - timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); -} - -static void nvme_process_aers(void *opaque) -{ - NvmeCtrl *n = opaque; - NvmeAsyncEvent *event, *next; - - trace_pci_nvme_process_aers(n->aer_queued); - - QTAILQ_FOREACH_SAFE(event, &n->aer_queue, entry, next) { - NvmeRequest *req; - NvmeAerResult *result; - - /* can't post cqe if there is nothing to complete */ - if (!n->outstanding_aers) { - trace_pci_nvme_no_outstanding_aers(); - break; - } - - /* ignore if masked (cqe posted, but event not cleared) */ - if (n->aer_mask & (1 << event->result.event_type)) { - trace_pci_nvme_aer_masked(event->result.event_type, n->aer_mask); - continue; - } - - QTAILQ_REMOVE(&n->aer_queue, event, entry); - n->aer_queued--; - - n->aer_mask |= 1 << event->result.event_type; - n->outstanding_aers--; - - req = n->aer_reqs[n->outstanding_aers]; - - result = (NvmeAerResult *) &req->cqe.result; - result->event_type = event->result.event_type; - result->event_info = event->result.event_info; - result->log_page = event->result.log_page; - g_free(event); - - trace_pci_nvme_aer_post_cqe(result->event_type, result->event_info, - result->log_page); - - nvme_enqueue_req_completion(&n->admin_cq, req); - } -} - -static void nvme_enqueue_event(NvmeCtrl *n, uint8_t event_type, - uint8_t event_info, uint8_t log_page) -{ - NvmeAsyncEvent *event; - - trace_pci_nvme_enqueue_event(event_type, event_info, log_page); - - if (n->aer_queued == n->params.aer_max_queued) { - trace_pci_nvme_enqueue_event_noqueue(n->aer_queued); - return; - } - - event = g_new(NvmeAsyncEvent, 1); - event->result = (NvmeAerResult) { - .event_type = event_type, - .event_info = event_info, - .log_page = log_page, - }; - - QTAILQ_INSERT_TAIL(&n->aer_queue, event, entry); - n->aer_queued++; - - nvme_process_aers(n); -} - -static void nvme_smart_event(NvmeCtrl *n, uint8_t event) -{ - uint8_t aer_info; - - /* Ref SPEC <Asynchronous Event Information 0x2013 SMART / Health Status> */ - if (!(NVME_AEC_SMART(n->features.async_config) & event)) { - return; - } - - switch (event) { - case NVME_SMART_SPARE: - aer_info = NVME_AER_INFO_SMART_SPARE_THRESH; - break; - case NVME_SMART_TEMPERATURE: - aer_info = NVME_AER_INFO_SMART_TEMP_THRESH; - break; - case NVME_SMART_RELIABILITY: - case NVME_SMART_MEDIA_READ_ONLY: - case NVME_SMART_FAILED_VOLATILE_MEDIA: - case NVME_SMART_PMR_UNRELIABLE: - aer_info = NVME_AER_INFO_SMART_RELIABILITY; - break; - default: - return; - } - - nvme_enqueue_event(n, NVME_AER_TYPE_SMART, aer_info, NVME_LOG_SMART_INFO); -} - -static void nvme_clear_events(NvmeCtrl *n, uint8_t event_type) -{ - n->aer_mask &= ~(1 << event_type); - if (!QTAILQ_EMPTY(&n->aer_queue)) { - nvme_process_aers(n); - } -} - -static inline uint16_t nvme_check_mdts(NvmeCtrl *n, size_t len) -{ - uint8_t mdts = n->params.mdts; - - if (mdts && len > n->page_size << mdts) { - trace_pci_nvme_err_mdts(len); - return NVME_INVALID_FIELD | NVME_DNR; - } - - return NVME_SUCCESS; -} - -static inline uint16_t nvme_check_bounds(NvmeNamespace *ns, uint64_t slba, - uint32_t nlb) -{ - uint64_t nsze = le64_to_cpu(ns->id_ns.nsze); - - if (unlikely(UINT64_MAX - slba < nlb || slba + nlb > nsze)) { - trace_pci_nvme_err_invalid_lba_range(slba, nlb, nsze); - return NVME_LBA_RANGE | NVME_DNR; - } - - return NVME_SUCCESS; -} - -static uint16_t nvme_check_dulbe(NvmeNamespace *ns, uint64_t slba, - uint32_t nlb) -{ - BlockDriverState *bs = blk_bs(ns->blkconf.blk); - - int64_t pnum = 0, bytes = nvme_l2b(ns, nlb); - int64_t offset = nvme_l2b(ns, slba); - bool zeroed; - int ret; - - Error *local_err = NULL; - - /* - * `pnum` holds the number of bytes after offset that shares the same - * allocation status as the byte at offset. If `pnum` is different from - * `bytes`, we should check the allocation status of the next range and - * continue this until all bytes have been checked. - */ - do { - bytes -= pnum; - - ret = bdrv_block_status(bs, offset, bytes, &pnum, NULL, NULL); - if (ret < 0) { - error_setg_errno(&local_err, -ret, "unable to get block status"); - error_report_err(local_err); - - return NVME_INTERNAL_DEV_ERROR; - } - - zeroed = !!(ret & BDRV_BLOCK_ZERO); - - trace_pci_nvme_block_status(offset, bytes, pnum, ret, zeroed); - - if (zeroed) { - return NVME_DULB; - } - - offset += pnum; - } while (pnum != bytes); - - return NVME_SUCCESS; -} - -static void nvme_aio_err(NvmeRequest *req, int ret) -{ - uint16_t status = NVME_SUCCESS; - Error *local_err = NULL; - - switch (req->cmd.opcode) { - case NVME_CMD_READ: - status = NVME_UNRECOVERED_READ; - break; - case NVME_CMD_FLUSH: - case NVME_CMD_WRITE: - case NVME_CMD_WRITE_ZEROES: - case NVME_CMD_ZONE_APPEND: - status = NVME_WRITE_FAULT; - break; - default: - status = NVME_INTERNAL_DEV_ERROR; - break; - } - - trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), status); - - error_setg_errno(&local_err, -ret, "aio failed"); - error_report_err(local_err); - - /* - * Set the command status code to the first encountered error but allow a - * subsequent Internal Device Error to trump it. - */ - if (req->status && status != NVME_INTERNAL_DEV_ERROR) { - return; - } - - req->status = status; -} - -static inline uint32_t nvme_zone_idx(NvmeNamespace *ns, uint64_t slba) -{ - return ns->zone_size_log2 > 0 ? slba >> ns->zone_size_log2 : - slba / ns->zone_size; -} - -static inline NvmeZone *nvme_get_zone_by_slba(NvmeNamespace *ns, uint64_t slba) -{ - uint32_t zone_idx = nvme_zone_idx(ns, slba); - - assert(zone_idx < ns->num_zones); - return &ns->zone_array[zone_idx]; -} - -static uint16_t nvme_check_zone_state_for_write(NvmeZone *zone) -{ - uint64_t zslba = zone->d.zslba; - - switch (nvme_get_zone_state(zone)) { - case NVME_ZONE_STATE_EMPTY: - case NVME_ZONE_STATE_IMPLICITLY_OPEN: - case NVME_ZONE_STATE_EXPLICITLY_OPEN: - case NVME_ZONE_STATE_CLOSED: - return NVME_SUCCESS; - case NVME_ZONE_STATE_FULL: - trace_pci_nvme_err_zone_is_full(zslba); - return NVME_ZONE_FULL; - case NVME_ZONE_STATE_OFFLINE: - trace_pci_nvme_err_zone_is_offline(zslba); - return NVME_ZONE_OFFLINE; - case NVME_ZONE_STATE_READ_ONLY: - trace_pci_nvme_err_zone_is_read_only(zslba); - return NVME_ZONE_READ_ONLY; - default: - assert(false); - } - - return NVME_INTERNAL_DEV_ERROR; -} - -static uint16_t nvme_check_zone_write(NvmeNamespace *ns, NvmeZone *zone, - uint64_t slba, uint32_t nlb) -{ - uint64_t zcap = nvme_zone_wr_boundary(zone); - uint16_t status; - - status = nvme_check_zone_state_for_write(zone); - if (status) { - return status; - } - - if (unlikely(slba != zone->w_ptr)) { - trace_pci_nvme_err_write_not_at_wp(slba, zone->d.zslba, zone->w_ptr); - return NVME_ZONE_INVALID_WRITE; - } - - if (unlikely((slba + nlb) > zcap)) { - trace_pci_nvme_err_zone_boundary(slba, nlb, zcap); - return NVME_ZONE_BOUNDARY_ERROR; - } - - return NVME_SUCCESS; -} - -static uint16_t nvme_check_zone_state_for_read(NvmeZone *zone) -{ - switch (nvme_get_zone_state(zone)) { - case NVME_ZONE_STATE_EMPTY: - case NVME_ZONE_STATE_IMPLICITLY_OPEN: - case NVME_ZONE_STATE_EXPLICITLY_OPEN: - case NVME_ZONE_STATE_FULL: - case NVME_ZONE_STATE_CLOSED: - case NVME_ZONE_STATE_READ_ONLY: - return NVME_SUCCESS; - case NVME_ZONE_STATE_OFFLINE: - trace_pci_nvme_err_zone_is_offline(zone->d.zslba); - return NVME_ZONE_OFFLINE; - default: - assert(false); - } - - return NVME_INTERNAL_DEV_ERROR; -} - -static uint16_t nvme_check_zone_read(NvmeNamespace *ns, uint64_t slba, - uint32_t nlb) -{ - NvmeZone *zone = nvme_get_zone_by_slba(ns, slba); - uint64_t bndry = nvme_zone_rd_boundary(ns, zone); - uint64_t end = slba + nlb; - uint16_t status; - - status = nvme_check_zone_state_for_read(zone); - if (status) { - ; - } else if (unlikely(end > bndry)) { - if (!ns->params.cross_zone_read) { - status = NVME_ZONE_BOUNDARY_ERROR; - } else { - /* - * Read across zone boundary - check that all subsequent - * zones that are being read have an appropriate state. - */ - do { - zone++; - status = nvme_check_zone_state_for_read(zone); - if (status) { - break; - } - } while (end > nvme_zone_rd_boundary(ns, zone)); - } - } - - return status; -} - -static uint16_t nvme_zrm_finish(NvmeNamespace *ns, NvmeZone *zone) -{ - switch (nvme_get_zone_state(zone)) { - case NVME_ZONE_STATE_FULL: - return NVME_SUCCESS; - - case NVME_ZONE_STATE_IMPLICITLY_OPEN: - case NVME_ZONE_STATE_EXPLICITLY_OPEN: - nvme_aor_dec_open(ns); - /* fallthrough */ - case NVME_ZONE_STATE_CLOSED: - nvme_aor_dec_active(ns); - /* fallthrough */ - case NVME_ZONE_STATE_EMPTY: - nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_FULL); - return NVME_SUCCESS; - - default: - return NVME_ZONE_INVAL_TRANSITION; - } -} - -static uint16_t nvme_zrm_close(NvmeNamespace *ns, NvmeZone *zone) -{ - switch (nvme_get_zone_state(zone)) { - case NVME_ZONE_STATE_EXPLICITLY_OPEN: - case NVME_ZONE_STATE_IMPLICITLY_OPEN: - nvme_aor_dec_open(ns); - nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED); - /* fall through */ - case NVME_ZONE_STATE_CLOSED: - return NVME_SUCCESS; - - default: - return NVME_ZONE_INVAL_TRANSITION; - } -} - -static void nvme_zrm_auto_transition_zone(NvmeNamespace *ns) -{ - NvmeZone *zone; - - if (ns->params.max_open_zones && - ns->nr_open_zones == ns->params.max_open_zones) { - zone = QTAILQ_FIRST(&ns->imp_open_zones); - if (zone) { - /* - * Automatically close this implicitly open zone. - */ - QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry); - nvme_zrm_close(ns, zone); - } - } -} - -enum { - NVME_ZRM_AUTO = 1 << 0, -}; - -static uint16_t nvme_zrm_open_flags(NvmeNamespace *ns, NvmeZone *zone, - int flags) -{ - int act = 0; - uint16_t status; - - switch (nvme_get_zone_state(zone)) { - case NVME_ZONE_STATE_EMPTY: - act = 1; - - /* fallthrough */ - - case NVME_ZONE_STATE_CLOSED: - nvme_zrm_auto_transition_zone(ns); - status = nvme_aor_check(ns, act, 1); - if (status) { - return status; - } - - if (act) { - nvme_aor_inc_active(ns); - } - - nvme_aor_inc_open(ns); - - if (flags & NVME_ZRM_AUTO) { - nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_IMPLICITLY_OPEN); - return NVME_SUCCESS; - } - - /* fallthrough */ - - case NVME_ZONE_STATE_IMPLICITLY_OPEN: - if (flags & NVME_ZRM_AUTO) { - return NVME_SUCCESS; - } - - nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_EXPLICITLY_OPEN); - - /* fallthrough */ - - case NVME_ZONE_STATE_EXPLICITLY_OPEN: - return NVME_SUCCESS; - - default: - return NVME_ZONE_INVAL_TRANSITION; - } -} - -static inline uint16_t nvme_zrm_auto(NvmeNamespace *ns, NvmeZone *zone) -{ - return nvme_zrm_open_flags(ns, zone, NVME_ZRM_AUTO); -} - -static inline uint16_t nvme_zrm_open(NvmeNamespace *ns, NvmeZone *zone) -{ - return nvme_zrm_open_flags(ns, zone, 0); -} - -static void nvme_advance_zone_wp(NvmeNamespace *ns, NvmeZone *zone, - uint32_t nlb) -{ - zone->d.wp += nlb; - - if (zone->d.wp == nvme_zone_wr_boundary(zone)) { - nvme_zrm_finish(ns, zone); - } -} - -static void nvme_finalize_zoned_write(NvmeNamespace *ns, NvmeRequest *req) -{ - NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; - NvmeZone *zone; - uint64_t slba; - uint32_t nlb; - - slba = le64_to_cpu(rw->slba); - nlb = le16_to_cpu(rw->nlb) + 1; - zone = nvme_get_zone_by_slba(ns, slba); - - nvme_advance_zone_wp(ns, zone, nlb); -} - -static inline bool nvme_is_write(NvmeRequest *req) -{ - NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; - - return rw->opcode == NVME_CMD_WRITE || - rw->opcode == NVME_CMD_ZONE_APPEND || - rw->opcode == NVME_CMD_WRITE_ZEROES; -} - -static void nvme_misc_cb(void *opaque, int ret) -{ - NvmeRequest *req = opaque; - NvmeNamespace *ns = req->ns; - - BlockBackend *blk = ns->blkconf.blk; - BlockAcctCookie *acct = &req->acct; - BlockAcctStats *stats = blk_get_stats(blk); - - trace_pci_nvme_misc_cb(nvme_cid(req), blk_name(blk)); - - if (ret) { - block_acct_failed(stats, acct); - nvme_aio_err(req, ret); - } else { - block_acct_done(stats, acct); - } - - nvme_enqueue_req_completion(nvme_cq(req), req); -} - -void nvme_rw_complete_cb(void *opaque, int ret) -{ - NvmeRequest *req = opaque; - NvmeNamespace *ns = req->ns; - BlockBackend *blk = ns->blkconf.blk; - BlockAcctCookie *acct = &req->acct; - BlockAcctStats *stats = blk_get_stats(blk); - - trace_pci_nvme_rw_complete_cb(nvme_cid(req), blk_name(blk)); - - if (ret) { - block_acct_failed(stats, acct); - nvme_aio_err(req, ret); - } else { - block_acct_done(stats, acct); - } - - if (ns->params.zoned && nvme_is_write(req)) { - nvme_finalize_zoned_write(ns, req); - } - - nvme_enqueue_req_completion(nvme_cq(req), req); -} - -static void nvme_rw_cb(void *opaque, int ret) -{ - NvmeRequest *req = opaque; - NvmeNamespace *ns = req->ns; - - BlockBackend *blk = ns->blkconf.blk; - - trace_pci_nvme_rw_cb(nvme_cid(req), blk_name(blk)); - - if (ret) { - goto out; - } - - if (ns->lbaf.ms) { - NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; - uint64_t slba = le64_to_cpu(rw->slba); - uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1; - uint64_t offset = nvme_moff(ns, slba); - - if (req->cmd.opcode == NVME_CMD_WRITE_ZEROES) { - size_t mlen = nvme_m2b(ns, nlb); - - req->aiocb = blk_aio_pwrite_zeroes(blk, offset, mlen, - BDRV_REQ_MAY_UNMAP, - nvme_rw_complete_cb, req); - return; - } - - if (nvme_ns_ext(ns) || req->cmd.mptr) { - uint16_t status; - - nvme_sg_unmap(&req->sg); - status = nvme_map_mdata(nvme_ctrl(req), nlb, req); - if (status) { - ret = -EFAULT; - goto out; - } - - if (req->cmd.opcode == NVME_CMD_READ) { - return nvme_blk_read(blk, offset, nvme_rw_complete_cb, req); - } - - return nvme_blk_write(blk, offset, nvme_rw_complete_cb, req); - } - } - -out: - nvme_rw_complete_cb(req, ret); -} - -struct nvme_aio_format_ctx { - NvmeRequest *req; - NvmeNamespace *ns; - - /* number of outstanding write zeroes for this namespace */ - int *count; -}; - -static void nvme_aio_format_cb(void *opaque, int ret) -{ - struct nvme_aio_format_ctx *ctx = opaque; - NvmeRequest *req = ctx->req; - NvmeNamespace *ns = ctx->ns; - uintptr_t *num_formats = (uintptr_t *)&req->opaque; - int *count = ctx->count; - - g_free(ctx); - - if (ret) { - nvme_aio_err(req, ret); - } - - if (--(*count)) { - return; - } - - g_free(count); - ns->status = 0x0; - - if (--(*num_formats)) { - return; - } - - nvme_enqueue_req_completion(nvme_cq(req), req); -} - -struct nvme_aio_flush_ctx { - NvmeRequest *req; - NvmeNamespace *ns; - BlockAcctCookie acct; -}; - -static void nvme_aio_flush_cb(void *opaque, int ret) -{ - struct nvme_aio_flush_ctx *ctx = opaque; - NvmeRequest *req = ctx->req; - uintptr_t *num_flushes = (uintptr_t *)&req->opaque; - - BlockBackend *blk = ctx->ns->blkconf.blk; - BlockAcctCookie *acct = &ctx->acct; - BlockAcctStats *stats = blk_get_stats(blk); - - trace_pci_nvme_aio_flush_cb(nvme_cid(req), blk_name(blk)); - - if (!ret) { - block_acct_done(stats, acct); - } else { - block_acct_failed(stats, acct); - nvme_aio_err(req, ret); - } - - (*num_flushes)--; - g_free(ctx); - - if (*num_flushes) { - return; - } - - nvme_enqueue_req_completion(nvme_cq(req), req); -} - -static void nvme_verify_cb(void *opaque, int ret) -{ - NvmeBounceContext *ctx = opaque; - NvmeRequest *req = ctx->req; - NvmeNamespace *ns = req->ns; - BlockBackend *blk = ns->blkconf.blk; - BlockAcctCookie *acct = &req->acct; - BlockAcctStats *stats = blk_get_stats(blk); - NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; - uint64_t slba = le64_to_cpu(rw->slba); - uint16_t ctrl = le16_to_cpu(rw->control); - uint16_t apptag = le16_to_cpu(rw->apptag); - uint16_t appmask = le16_to_cpu(rw->appmask); - uint32_t reftag = le32_to_cpu(rw->reftag); - uint16_t status; - - trace_pci_nvme_verify_cb(nvme_cid(req), NVME_RW_PRINFO(ctrl), apptag, - appmask, reftag); - - if (ret) { - block_acct_failed(stats, acct); - nvme_aio_err(req, ret); - goto out; - } - - block_acct_done(stats, acct); - - if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { - status = nvme_dif_mangle_mdata(ns, ctx->mdata.bounce, - ctx->mdata.iov.size, slba); - if (status) { - req->status = status; - goto out; - } - - req->status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size, - ctx->mdata.bounce, ctx->mdata.iov.size, - ctrl, slba, apptag, appmask, reftag); - } - -out: - qemu_iovec_destroy(&ctx->data.iov); - g_free(ctx->data.bounce); - - qemu_iovec_destroy(&ctx->mdata.iov); - g_free(ctx->mdata.bounce); - - g_free(ctx); - - nvme_enqueue_req_completion(nvme_cq(req), req); -} - - -static void nvme_verify_mdata_in_cb(void *opaque, int ret) -{ - NvmeBounceContext *ctx = opaque; - NvmeRequest *req = ctx->req; - NvmeNamespace *ns = req->ns; - NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; - uint64_t slba = le64_to_cpu(rw->slba); - uint32_t nlb = le16_to_cpu(rw->nlb) + 1; - size_t mlen = nvme_m2b(ns, nlb); - uint64_t offset = nvme_moff(ns, slba); - BlockBackend *blk = ns->blkconf.blk; - - trace_pci_nvme_verify_mdata_in_cb(nvme_cid(req), blk_name(blk)); - - if (ret) { - goto out; - } - - ctx->mdata.bounce = g_malloc(mlen); - - qemu_iovec_reset(&ctx->mdata.iov); - qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen); - - req->aiocb = blk_aio_preadv(blk, offset, &ctx->mdata.iov, 0, - nvme_verify_cb, ctx); - return; - -out: - nvme_verify_cb(ctx, ret); -} - -static void nvme_aio_discard_cb(void *opaque, int ret) -{ - NvmeRequest *req = opaque; - uintptr_t *discards = (uintptr_t *)&req->opaque; - - trace_pci_nvme_aio_discard_cb(nvme_cid(req)); - - if (ret) { - nvme_aio_err(req, ret); - } - - (*discards)--; - - if (*discards) { - return; - } - - nvme_enqueue_req_completion(nvme_cq(req), req); -} - -struct nvme_zone_reset_ctx { - NvmeRequest *req; - NvmeZone *zone; -}; - -static void nvme_aio_zone_reset_complete_cb(void *opaque, int ret) -{ - struct nvme_zone_reset_ctx *ctx = opaque; - NvmeRequest *req = ctx->req; - NvmeNamespace *ns = req->ns; - NvmeZone *zone = ctx->zone; - uintptr_t *resets = (uintptr_t *)&req->opaque; - - if (ret) { - nvme_aio_err(req, ret); - goto out; - } - - switch (nvme_get_zone_state(zone)) { - case NVME_ZONE_STATE_EXPLICITLY_OPEN: - case NVME_ZONE_STATE_IMPLICITLY_OPEN: - nvme_aor_dec_open(ns); - /* fall through */ - case NVME_ZONE_STATE_CLOSED: - nvme_aor_dec_active(ns); - /* fall through */ - case NVME_ZONE_STATE_FULL: - zone->w_ptr = zone->d.zslba; - zone->d.wp = zone->w_ptr; - nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_EMPTY); - /* fall through */ - default: - break; - } - -out: - g_free(ctx); - - (*resets)--; - - if (*resets) { - return; - } - - nvme_enqueue_req_completion(nvme_cq(req), req); -} - -static void nvme_aio_zone_reset_cb(void *opaque, int ret) -{ - struct nvme_zone_reset_ctx *ctx = opaque; - NvmeRequest *req = ctx->req; - NvmeNamespace *ns = req->ns; - NvmeZone *zone = ctx->zone; - - trace_pci_nvme_aio_zone_reset_cb(nvme_cid(req), zone->d.zslba); - - if (ret) { - goto out; - } - - if (ns->lbaf.ms) { - int64_t offset = nvme_moff(ns, zone->d.zslba); - - blk_aio_pwrite_zeroes(ns->blkconf.blk, offset, - nvme_m2b(ns, ns->zone_size), BDRV_REQ_MAY_UNMAP, - nvme_aio_zone_reset_complete_cb, ctx); - return; - } - -out: - nvme_aio_zone_reset_complete_cb(opaque, ret); -} - -struct nvme_copy_ctx { - int copies; - uint8_t *bounce; - uint8_t *mbounce; - uint32_t nlb; - NvmeCopySourceRange *ranges; -}; - -struct nvme_copy_in_ctx { - NvmeRequest *req; - QEMUIOVector iov; - NvmeCopySourceRange *range; -}; - -static void nvme_copy_complete_cb(void *opaque, int ret) -{ - NvmeRequest *req = opaque; - NvmeNamespace *ns = req->ns; - struct nvme_copy_ctx *ctx = req->opaque; - - if (ret) { - block_acct_failed(blk_get_stats(ns->blkconf.blk), &req->acct); - nvme_aio_err(req, ret); - goto out; - } - - block_acct_done(blk_get_stats(ns->blkconf.blk), &req->acct); - -out: - if (ns->params.zoned) { - NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; - uint64_t sdlba = le64_to_cpu(copy->sdlba); - NvmeZone *zone = nvme_get_zone_by_slba(ns, sdlba); - - nvme_advance_zone_wp(ns, zone, ctx->nlb); - } - - g_free(ctx->bounce); - g_free(ctx->mbounce); - g_free(ctx); - - nvme_enqueue_req_completion(nvme_cq(req), req); -} - -static void nvme_copy_cb(void *opaque, int ret) -{ - NvmeRequest *req = opaque; - NvmeNamespace *ns = req->ns; - struct nvme_copy_ctx *ctx = req->opaque; - - trace_pci_nvme_copy_cb(nvme_cid(req)); - - if (ret) { - goto out; - } - - if (ns->lbaf.ms) { - NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; - uint64_t sdlba = le64_to_cpu(copy->sdlba); - int64_t offset = nvme_moff(ns, sdlba); - - qemu_iovec_reset(&req->sg.iov); - qemu_iovec_add(&req->sg.iov, ctx->mbounce, nvme_m2b(ns, ctx->nlb)); - - req->aiocb = blk_aio_pwritev(ns->blkconf.blk, offset, &req->sg.iov, 0, - nvme_copy_complete_cb, req); - return; - } - -out: - nvme_copy_complete_cb(opaque, ret); -} - -static void nvme_copy_in_complete(NvmeRequest *req) -{ - NvmeNamespace *ns = req->ns; - NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; - struct nvme_copy_ctx *ctx = req->opaque; - uint64_t sdlba = le64_to_cpu(copy->sdlba); - uint16_t status; - - trace_pci_nvme_copy_in_complete(nvme_cid(req)); - - block_acct_done(blk_get_stats(ns->blkconf.blk), &req->acct); - - if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { - uint16_t prinfor = (copy->control[0] >> 4) & 0xf; - uint16_t prinfow = (copy->control[2] >> 2) & 0xf; - uint16_t nr = copy->nr + 1; - NvmeCopySourceRange *range; - uint64_t slba; - uint32_t nlb; - uint16_t apptag, appmask; - uint32_t reftag; - uint8_t *buf = ctx->bounce, *mbuf = ctx->mbounce; - size_t len, mlen; - int i; - - /* - * The dif helpers expects prinfo to be similar to the control field of - * the NvmeRwCmd, so shift by 10 to fake it. - */ - prinfor = prinfor << 10; - prinfow = prinfow << 10; - - for (i = 0; i < nr; i++) { - range = &ctx->ranges[i]; - slba = le64_to_cpu(range->slba); - nlb = le16_to_cpu(range->nlb) + 1; - len = nvme_l2b(ns, nlb); - mlen = nvme_m2b(ns, nlb); - apptag = le16_to_cpu(range->apptag); - appmask = le16_to_cpu(range->appmask); - reftag = le32_to_cpu(range->reftag); - - status = nvme_dif_check(ns, buf, len, mbuf, mlen, prinfor, slba, - apptag, appmask, reftag); - if (status) { - goto invalid; - } - - buf += len; - mbuf += mlen; - } - - apptag = le16_to_cpu(copy->apptag); - appmask = le16_to_cpu(copy->appmask); - reftag = le32_to_cpu(copy->reftag); - - if (prinfow & NVME_RW_PRINFO_PRACT) { - size_t len = nvme_l2b(ns, ctx->nlb); - size_t mlen = nvme_m2b(ns, ctx->nlb); - - status = nvme_check_prinfo(ns, prinfow, sdlba, reftag); - if (status) { - goto invalid; - } - - nvme_dif_pract_generate_dif(ns, ctx->bounce, len, ctx->mbounce, - mlen, apptag, reftag); - } else { - status = nvme_dif_check(ns, ctx->bounce, len, ctx->mbounce, mlen, - prinfow, sdlba, apptag, appmask, reftag); - if (status) { - goto invalid; - } - } - } - - status = nvme_check_bounds(ns, sdlba, ctx->nlb); - if (status) { - goto invalid; - } - - if (ns->params.zoned) { - NvmeZone *zone = nvme_get_zone_by_slba(ns, sdlba); - - status = nvme_check_zone_write(ns, zone, sdlba, ctx->nlb); - if (status) { - goto invalid; - } - - status = nvme_zrm_auto(ns, zone); - if (status) { - goto invalid; - } - - zone->w_ptr += ctx->nlb; - } - - qemu_iovec_init(&req->sg.iov, 1); - qemu_iovec_add(&req->sg.iov, ctx->bounce, nvme_l2b(ns, ctx->nlb)); - - block_acct_start(blk_get_stats(ns->blkconf.blk), &req->acct, 0, - BLOCK_ACCT_WRITE); - - req->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_l2b(ns, sdlba), - &req->sg.iov, 0, nvme_copy_cb, req); - - return; - -invalid: - req->status = status; - - g_free(ctx->bounce); - g_free(ctx); - - nvme_enqueue_req_completion(nvme_cq(req), req); -} - -static void nvme_aio_copy_in_cb(void *opaque, int ret) -{ - struct nvme_copy_in_ctx *in_ctx = opaque; - NvmeRequest *req = in_ctx->req; - NvmeNamespace *ns = req->ns; - struct nvme_copy_ctx *ctx = req->opaque; - - qemu_iovec_destroy(&in_ctx->iov); - g_free(in_ctx); - - trace_pci_nvme_aio_copy_in_cb(nvme_cid(req)); - - if (ret) { - nvme_aio_err(req, ret); - } - - ctx->copies--; - - if (ctx->copies) { - return; - } - - if (req->status) { - block_acct_failed(blk_get_stats(ns->blkconf.blk), &req->acct); - - g_free(ctx->bounce); - g_free(ctx->mbounce); - g_free(ctx); - - nvme_enqueue_req_completion(nvme_cq(req), req); - - return; - } - - nvme_copy_in_complete(req); -} - -struct nvme_compare_ctx { - struct { - QEMUIOVector iov; - uint8_t *bounce; - } data; - - struct { - QEMUIOVector iov; - uint8_t *bounce; - } mdata; -}; - -static void nvme_compare_mdata_cb(void *opaque, int ret) -{ - NvmeRequest *req = opaque; - NvmeNamespace *ns = req->ns; - NvmeCtrl *n = nvme_ctrl(req); - NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; - uint16_t ctrl = le16_to_cpu(rw->control); - uint16_t apptag = le16_to_cpu(rw->apptag); - uint16_t appmask = le16_to_cpu(rw->appmask); - uint32_t reftag = le32_to_cpu(rw->reftag); - struct nvme_compare_ctx *ctx = req->opaque; - g_autofree uint8_t *buf = NULL; - BlockBackend *blk = ns->blkconf.blk; - BlockAcctCookie *acct = &req->acct; - BlockAcctStats *stats = blk_get_stats(blk); - uint16_t status = NVME_SUCCESS; - - trace_pci_nvme_compare_mdata_cb(nvme_cid(req)); - - if (ret) { - block_acct_failed(stats, acct); - nvme_aio_err(req, ret); - goto out; - } - - buf = g_malloc(ctx->mdata.iov.size); - - status = nvme_bounce_mdata(n, buf, ctx->mdata.iov.size, - NVME_TX_DIRECTION_TO_DEVICE, req); - if (status) { - req->status = status; - goto out; - } - - if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { - uint64_t slba = le64_to_cpu(rw->slba); - uint8_t *bufp; - uint8_t *mbufp = ctx->mdata.bounce; - uint8_t *end = mbufp + ctx->mdata.iov.size; - int16_t pil = 0; - - status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size, - ctx->mdata.bounce, ctx->mdata.iov.size, ctrl, - slba, apptag, appmask, reftag); - if (status) { - req->status = status; - goto out; - } - - /* - * When formatted with protection information, do not compare the DIF - * tuple. - */ - if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) { - pil = ns->lbaf.ms - sizeof(NvmeDifTuple); - } - - for (bufp = buf; mbufp < end; bufp += ns->lbaf.ms, mbufp += ns->lbaf.ms) { - if (memcmp(bufp + pil, mbufp + pil, ns->lbaf.ms - pil)) { - req->status = NVME_CMP_FAILURE; - goto out; - } - } - - goto out; - } - - if (memcmp(buf, ctx->mdata.bounce, ctx->mdata.iov.size)) { - req->status = NVME_CMP_FAILURE; - goto out; - } - - block_acct_done(stats, acct); - -out: - qemu_iovec_destroy(&ctx->data.iov); - g_free(ctx->data.bounce); - - qemu_iovec_destroy(&ctx->mdata.iov); - g_free(ctx->mdata.bounce); - - g_free(ctx); - - nvme_enqueue_req_completion(nvme_cq(req), req); -} - -static void nvme_compare_data_cb(void *opaque, int ret) -{ - NvmeRequest *req = opaque; - NvmeCtrl *n = nvme_ctrl(req); - NvmeNamespace *ns = req->ns; - BlockBackend *blk = ns->blkconf.blk; - BlockAcctCookie *acct = &req->acct; - BlockAcctStats *stats = blk_get_stats(blk); - - struct nvme_compare_ctx *ctx = req->opaque; - g_autofree uint8_t *buf = NULL; - uint16_t status; - - trace_pci_nvme_compare_data_cb(nvme_cid(req)); - - if (ret) { - block_acct_failed(stats, acct); - nvme_aio_err(req, ret); - goto out; - } - - buf = g_malloc(ctx->data.iov.size); - - status = nvme_bounce_data(n, buf, ctx->data.iov.size, - NVME_TX_DIRECTION_TO_DEVICE, req); - if (status) { - req->status = status; - goto out; - } - - if (memcmp(buf, ctx->data.bounce, ctx->data.iov.size)) { - req->status = NVME_CMP_FAILURE; - goto out; - } - - if (ns->lbaf.ms) { - NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; - uint64_t slba = le64_to_cpu(rw->slba); - uint32_t nlb = le16_to_cpu(rw->nlb) + 1; - size_t mlen = nvme_m2b(ns, nlb); - uint64_t offset = nvme_moff(ns, slba); - - ctx->mdata.bounce = g_malloc(mlen); - - qemu_iovec_init(&ctx->mdata.iov, 1); - qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen); - - req->aiocb = blk_aio_preadv(blk, offset, &ctx->mdata.iov, 0, - nvme_compare_mdata_cb, req); - return; - } - - block_acct_done(stats, acct); - -out: - qemu_iovec_destroy(&ctx->data.iov); - g_free(ctx->data.bounce); - g_free(ctx); - - nvme_enqueue_req_completion(nvme_cq(req), req); -} - -static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeNamespace *ns = req->ns; - NvmeDsmCmd *dsm = (NvmeDsmCmd *) &req->cmd; - - uint32_t attr = le32_to_cpu(dsm->attributes); - uint32_t nr = (le32_to_cpu(dsm->nr) & 0xff) + 1; - - uint16_t status = NVME_SUCCESS; - - trace_pci_nvme_dsm(nvme_cid(req), nvme_nsid(ns), nr, attr); - - if (attr & NVME_DSMGMT_AD) { - int64_t offset; - size_t len; - NvmeDsmRange range[nr]; - uintptr_t *discards = (uintptr_t *)&req->opaque; - - status = nvme_h2c(n, (uint8_t *)range, sizeof(range), req); - if (status) { - return status; - } - - /* - * AIO callbacks may be called immediately, so initialize discards to 1 - * to make sure the the callback does not complete the request before - * all discards have been issued. - */ - *discards = 1; - - for (int i = 0; i < nr; i++) { - uint64_t slba = le64_to_cpu(range[i].slba); - uint32_t nlb = le32_to_cpu(range[i].nlb); - - if (nvme_check_bounds(ns, slba, nlb)) { - continue; - } - - trace_pci_nvme_dsm_deallocate(nvme_cid(req), nvme_nsid(ns), slba, - nlb); - - if (nlb > n->dmrsl) { - trace_pci_nvme_dsm_single_range_limit_exceeded(nlb, n->dmrsl); - } - - offset = nvme_l2b(ns, slba); - len = nvme_l2b(ns, nlb); - - while (len) { - size_t bytes = MIN(BDRV_REQUEST_MAX_BYTES, len); - - (*discards)++; - - blk_aio_pdiscard(ns->blkconf.blk, offset, bytes, - nvme_aio_discard_cb, req); - - offset += bytes; - len -= bytes; - } - } - - /* account for the 1-initialization */ - (*discards)--; - - if (*discards) { - status = NVME_NO_COMPLETE; - } else { - status = req->status; - } - } - - return status; -} - -static uint16_t nvme_verify(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; - NvmeNamespace *ns = req->ns; - BlockBackend *blk = ns->blkconf.blk; - uint64_t slba = le64_to_cpu(rw->slba); - uint32_t nlb = le16_to_cpu(rw->nlb) + 1; - size_t len = nvme_l2b(ns, nlb); - int64_t offset = nvme_l2b(ns, slba); - uint16_t ctrl = le16_to_cpu(rw->control); - uint32_t reftag = le32_to_cpu(rw->reftag); - NvmeBounceContext *ctx = NULL; - uint16_t status; - - trace_pci_nvme_verify(nvme_cid(req), nvme_nsid(ns), slba, nlb); - - if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { - status = nvme_check_prinfo(ns, ctrl, slba, reftag); - if (status) { - return status; - } - - if (ctrl & NVME_RW_PRINFO_PRACT) { - return NVME_INVALID_PROT_INFO | NVME_DNR; - } - } - - if (len > n->page_size << n->params.vsl) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - status = nvme_check_bounds(ns, slba, nlb); - if (status) { - return status; - } - - if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { - status = nvme_check_dulbe(ns, slba, nlb); - if (status) { - return status; - } - } - - ctx = g_new0(NvmeBounceContext, 1); - ctx->req = req; - - ctx->data.bounce = g_malloc(len); - - qemu_iovec_init(&ctx->data.iov, 1); - qemu_iovec_add(&ctx->data.iov, ctx->data.bounce, len); - - block_acct_start(blk_get_stats(blk), &req->acct, ctx->data.iov.size, - BLOCK_ACCT_READ); - - req->aiocb = blk_aio_preadv(ns->blkconf.blk, offset, &ctx->data.iov, 0, - nvme_verify_mdata_in_cb, ctx); - return NVME_NO_COMPLETE; -} - -static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeNamespace *ns = req->ns; - NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; - - uint16_t nr = copy->nr + 1; - uint8_t format = copy->control[0] & 0xf; - - /* - * Shift the PRINFOR/PRINFOW values by 10 to allow reusing the - * NVME_RW_PRINFO constants. - */ - uint16_t prinfor = ((copy->control[0] >> 4) & 0xf) << 10; - uint16_t prinfow = ((copy->control[2] >> 2) & 0xf) << 10; - - uint32_t nlb = 0; - uint8_t *bounce = NULL, *bouncep = NULL; - uint8_t *mbounce = NULL, *mbouncep = NULL; - struct nvme_copy_ctx *ctx; - uint16_t status; - int i; - - trace_pci_nvme_copy(nvme_cid(req), nvme_nsid(ns), nr, format); - - if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) && - ((prinfor & NVME_RW_PRINFO_PRACT) != (prinfow & NVME_RW_PRINFO_PRACT))) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - if (!(n->id_ctrl.ocfs & (1 << format))) { - trace_pci_nvme_err_copy_invalid_format(format); - return NVME_INVALID_FIELD | NVME_DNR; - } - - if (nr > ns->id_ns.msrc + 1) { - return NVME_CMD_SIZE_LIMIT | NVME_DNR; - } - - ctx = g_new(struct nvme_copy_ctx, 1); - ctx->ranges = g_new(NvmeCopySourceRange, nr); - - status = nvme_h2c(n, (uint8_t *)ctx->ranges, - nr * sizeof(NvmeCopySourceRange), req); - if (status) { - goto out; - } - - for (i = 0; i < nr; i++) { - uint64_t slba = le64_to_cpu(ctx->ranges[i].slba); - uint32_t _nlb = le16_to_cpu(ctx->ranges[i].nlb) + 1; - - if (_nlb > le16_to_cpu(ns->id_ns.mssrl)) { - status = NVME_CMD_SIZE_LIMIT | NVME_DNR; - goto out; - } - - status = nvme_check_bounds(ns, slba, _nlb); - if (status) { - goto out; - } - - if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { - status = nvme_check_dulbe(ns, slba, _nlb); - if (status) { - goto out; - } - } - - if (ns->params.zoned) { - status = nvme_check_zone_read(ns, slba, _nlb); - if (status) { - goto out; - } - } - - nlb += _nlb; - } - - if (nlb > le32_to_cpu(ns->id_ns.mcl)) { - status = NVME_CMD_SIZE_LIMIT | NVME_DNR; - goto out; - } - - bounce = bouncep = g_malloc(nvme_l2b(ns, nlb)); - if (ns->lbaf.ms) { - mbounce = mbouncep = g_malloc(nvme_m2b(ns, nlb)); - } - - block_acct_start(blk_get_stats(ns->blkconf.blk), &req->acct, 0, - BLOCK_ACCT_READ); - - ctx->bounce = bounce; - ctx->mbounce = mbounce; - ctx->nlb = nlb; - ctx->copies = 1; - - req->opaque = ctx; - - for (i = 0; i < nr; i++) { - uint64_t slba = le64_to_cpu(ctx->ranges[i].slba); - uint32_t nlb = le16_to_cpu(ctx->ranges[i].nlb) + 1; - - size_t len = nvme_l2b(ns, nlb); - int64_t offset = nvme_l2b(ns, slba); - - trace_pci_nvme_copy_source_range(slba, nlb); - - struct nvme_copy_in_ctx *in_ctx = g_new(struct nvme_copy_in_ctx, 1); - in_ctx->req = req; - - qemu_iovec_init(&in_ctx->iov, 1); - qemu_iovec_add(&in_ctx->iov, bouncep, len); - - ctx->copies++; - - blk_aio_preadv(ns->blkconf.blk, offset, &in_ctx->iov, 0, - nvme_aio_copy_in_cb, in_ctx); - - bouncep += len; - - if (ns->lbaf.ms) { - len = nvme_m2b(ns, nlb); - offset = nvme_moff(ns, slba); - - in_ctx = g_new(struct nvme_copy_in_ctx, 1); - in_ctx->req = req; - - qemu_iovec_init(&in_ctx->iov, 1); - qemu_iovec_add(&in_ctx->iov, mbouncep, len); - - ctx->copies++; - - blk_aio_preadv(ns->blkconf.blk, offset, &in_ctx->iov, 0, - nvme_aio_copy_in_cb, in_ctx); - - mbouncep += len; - } - } - - /* account for the 1-initialization */ - ctx->copies--; - - if (!ctx->copies) { - nvme_copy_in_complete(req); - } - - return NVME_NO_COMPLETE; - -out: - g_free(ctx->ranges); - g_free(ctx); - - return status; -} - -static uint16_t nvme_compare(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; - NvmeNamespace *ns = req->ns; - BlockBackend *blk = ns->blkconf.blk; - uint64_t slba = le64_to_cpu(rw->slba); - uint32_t nlb = le16_to_cpu(rw->nlb) + 1; - uint16_t ctrl = le16_to_cpu(rw->control); - size_t data_len = nvme_l2b(ns, nlb); - size_t len = data_len; - int64_t offset = nvme_l2b(ns, slba); - struct nvme_compare_ctx *ctx = NULL; - uint16_t status; - - trace_pci_nvme_compare(nvme_cid(req), nvme_nsid(ns), slba, nlb); - - if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) && (ctrl & NVME_RW_PRINFO_PRACT)) { - return NVME_INVALID_PROT_INFO | NVME_DNR; - } - - if (nvme_ns_ext(ns)) { - len += nvme_m2b(ns, nlb); - } - - status = nvme_check_mdts(n, len); - if (status) { - return status; - } - - status = nvme_check_bounds(ns, slba, nlb); - if (status) { - return status; - } - - if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { - status = nvme_check_dulbe(ns, slba, nlb); - if (status) { - return status; - } - } - - status = nvme_map_dptr(n, &req->sg, len, &req->cmd); - if (status) { - return status; - } - - ctx = g_new(struct nvme_compare_ctx, 1); - ctx->data.bounce = g_malloc(data_len); - - req->opaque = ctx; - - qemu_iovec_init(&ctx->data.iov, 1); - qemu_iovec_add(&ctx->data.iov, ctx->data.bounce, data_len); - - block_acct_start(blk_get_stats(blk), &req->acct, data_len, - BLOCK_ACCT_READ); - req->aiocb = blk_aio_preadv(blk, offset, &ctx->data.iov, 0, - nvme_compare_data_cb, req); - - return NVME_NO_COMPLETE; -} - -static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req) -{ - uint32_t nsid = le32_to_cpu(req->cmd.nsid); - uintptr_t *num_flushes = (uintptr_t *)&req->opaque; - uint16_t status; - struct nvme_aio_flush_ctx *ctx; - NvmeNamespace *ns; - - trace_pci_nvme_flush(nvme_cid(req), nsid); - - if (nsid != NVME_NSID_BROADCAST) { - req->ns = nvme_ns(n, nsid); - if (unlikely(!req->ns)) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - block_acct_start(blk_get_stats(req->ns->blkconf.blk), &req->acct, 0, - BLOCK_ACCT_FLUSH); - req->aiocb = blk_aio_flush(req->ns->blkconf.blk, nvme_misc_cb, req); - return NVME_NO_COMPLETE; - } - - /* 1-initialize; see comment in nvme_dsm */ - *num_flushes = 1; - - for (int i = 1; i <= NVME_MAX_NAMESPACES; i++) { - ns = nvme_ns(n, i); - if (!ns) { - continue; - } - - ctx = g_new(struct nvme_aio_flush_ctx, 1); - ctx->req = req; - ctx->ns = ns; - - (*num_flushes)++; - - block_acct_start(blk_get_stats(ns->blkconf.blk), &ctx->acct, 0, - BLOCK_ACCT_FLUSH); - blk_aio_flush(ns->blkconf.blk, nvme_aio_flush_cb, ctx); - } - - /* account for the 1-initialization */ - (*num_flushes)--; - - if (*num_flushes) { - status = NVME_NO_COMPLETE; - } else { - status = req->status; - } - - return status; -} - -static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; - NvmeNamespace *ns = req->ns; - uint64_t slba = le64_to_cpu(rw->slba); - uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1; - uint16_t ctrl = le16_to_cpu(rw->control); - uint64_t data_size = nvme_l2b(ns, nlb); - uint64_t mapped_size = data_size; - uint64_t data_offset; - BlockBackend *blk = ns->blkconf.blk; - uint16_t status; - - if (nvme_ns_ext(ns)) { - mapped_size += nvme_m2b(ns, nlb); - - if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { - bool pract = ctrl & NVME_RW_PRINFO_PRACT; - - if (pract && ns->lbaf.ms == 8) { - mapped_size = data_size; - } - } - } - - trace_pci_nvme_read(nvme_cid(req), nvme_nsid(ns), nlb, mapped_size, slba); - - status = nvme_check_mdts(n, mapped_size); - if (status) { - goto invalid; - } - - status = nvme_check_bounds(ns, slba, nlb); - if (status) { - goto invalid; - } - - if (ns->params.zoned) { - status = nvme_check_zone_read(ns, slba, nlb); - if (status) { - trace_pci_nvme_err_zone_read_not_ok(slba, nlb, status); - goto invalid; - } - } - - if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { - status = nvme_check_dulbe(ns, slba, nlb); - if (status) { - goto invalid; - } - } - - if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { - return nvme_dif_rw(n, req); - } - - status = nvme_map_data(n, nlb, req); - if (status) { - goto invalid; - } - - data_offset = nvme_l2b(ns, slba); - - block_acct_start(blk_get_stats(blk), &req->acct, data_size, - BLOCK_ACCT_READ); - nvme_blk_read(blk, data_offset, nvme_rw_cb, req); - return NVME_NO_COMPLETE; - -invalid: - block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_READ); - return status | NVME_DNR; -} - -static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append, - bool wrz) -{ - NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; - NvmeNamespace *ns = req->ns; - uint64_t slba = le64_to_cpu(rw->slba); - uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1; - uint16_t ctrl = le16_to_cpu(rw->control); - uint64_t data_size = nvme_l2b(ns, nlb); - uint64_t mapped_size = data_size; - uint64_t data_offset; - NvmeZone *zone; - NvmeZonedResult *res = (NvmeZonedResult *)&req->cqe; - BlockBackend *blk = ns->blkconf.blk; - uint16_t status; - - if (nvme_ns_ext(ns)) { - mapped_size += nvme_m2b(ns, nlb); - - if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { - bool pract = ctrl & NVME_RW_PRINFO_PRACT; - - if (pract && ns->lbaf.ms == 8) { - mapped_size -= nvme_m2b(ns, nlb); - } - } - } - - trace_pci_nvme_write(nvme_cid(req), nvme_io_opc_str(rw->opcode), - nvme_nsid(ns), nlb, mapped_size, slba); - - if (!wrz) { - status = nvme_check_mdts(n, mapped_size); - if (status) { - goto invalid; - } - } - - status = nvme_check_bounds(ns, slba, nlb); - if (status) { - goto invalid; - } - - if (ns->params.zoned) { - zone = nvme_get_zone_by_slba(ns, slba); - - if (append) { - bool piremap = !!(ctrl & NVME_RW_PIREMAP); - - if (unlikely(slba != zone->d.zslba)) { - trace_pci_nvme_err_append_not_at_start(slba, zone->d.zslba); - status = NVME_INVALID_FIELD; - goto invalid; - } - - if (n->params.zasl && - data_size > (uint64_t)n->page_size << n->params.zasl) { - trace_pci_nvme_err_zasl(data_size); - return NVME_INVALID_FIELD | NVME_DNR; - } - - slba = zone->w_ptr; - rw->slba = cpu_to_le64(slba); - res->slba = cpu_to_le64(slba); - - switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { - case NVME_ID_NS_DPS_TYPE_1: - if (!piremap) { - return NVME_INVALID_PROT_INFO | NVME_DNR; - } - - /* fallthrough */ - - case NVME_ID_NS_DPS_TYPE_2: - if (piremap) { - uint32_t reftag = le32_to_cpu(rw->reftag); - rw->reftag = cpu_to_le32(reftag + (slba - zone->d.zslba)); - } - - break; - - case NVME_ID_NS_DPS_TYPE_3: - if (piremap) { - return NVME_INVALID_PROT_INFO | NVME_DNR; - } - - break; - } - } - - status = nvme_check_zone_write(ns, zone, slba, nlb); - if (status) { - goto invalid; - } - - status = nvme_zrm_auto(ns, zone); - if (status) { - goto invalid; - } - - zone->w_ptr += nlb; - } - - data_offset = nvme_l2b(ns, slba); - - if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { - return nvme_dif_rw(n, req); - } - - if (!wrz) { - status = nvme_map_data(n, nlb, req); - if (status) { - goto invalid; - } - - block_acct_start(blk_get_stats(blk), &req->acct, data_size, - BLOCK_ACCT_WRITE); - nvme_blk_write(blk, data_offset, nvme_rw_cb, req); - } else { - req->aiocb = blk_aio_pwrite_zeroes(blk, data_offset, data_size, - BDRV_REQ_MAY_UNMAP, nvme_rw_cb, - req); - } - - return NVME_NO_COMPLETE; - -invalid: - block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_WRITE); - return status | NVME_DNR; -} - -static inline uint16_t nvme_write(NvmeCtrl *n, NvmeRequest *req) -{ - return nvme_do_write(n, req, false, false); -} - -static inline uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req) -{ - return nvme_do_write(n, req, false, true); -} - -static inline uint16_t nvme_zone_append(NvmeCtrl *n, NvmeRequest *req) -{ - return nvme_do_write(n, req, true, false); -} - -static uint16_t nvme_get_mgmt_zone_slba_idx(NvmeNamespace *ns, NvmeCmd *c, - uint64_t *slba, uint32_t *zone_idx) -{ - uint32_t dw10 = le32_to_cpu(c->cdw10); - uint32_t dw11 = le32_to_cpu(c->cdw11); - - if (!ns->params.zoned) { - trace_pci_nvme_err_invalid_opc(c->opcode); - return NVME_INVALID_OPCODE | NVME_DNR; - } - - *slba = ((uint64_t)dw11) << 32 | dw10; - if (unlikely(*slba >= ns->id_ns.nsze)) { - trace_pci_nvme_err_invalid_lba_range(*slba, 0, ns->id_ns.nsze); - *slba = 0; - return NVME_LBA_RANGE | NVME_DNR; - } - - *zone_idx = nvme_zone_idx(ns, *slba); - assert(*zone_idx < ns->num_zones); - - return NVME_SUCCESS; -} - -typedef uint16_t (*op_handler_t)(NvmeNamespace *, NvmeZone *, NvmeZoneState, - NvmeRequest *); - -enum NvmeZoneProcessingMask { - NVME_PROC_CURRENT_ZONE = 0, - NVME_PROC_OPENED_ZONES = 1 << 0, - NVME_PROC_CLOSED_ZONES = 1 << 1, - NVME_PROC_READ_ONLY_ZONES = 1 << 2, - NVME_PROC_FULL_ZONES = 1 << 3, -}; - -static uint16_t nvme_open_zone(NvmeNamespace *ns, NvmeZone *zone, - NvmeZoneState state, NvmeRequest *req) -{ - return nvme_zrm_open(ns, zone); -} - -static uint16_t nvme_close_zone(NvmeNamespace *ns, NvmeZone *zone, - NvmeZoneState state, NvmeRequest *req) -{ - return nvme_zrm_close(ns, zone); -} - -static uint16_t nvme_finish_zone(NvmeNamespace *ns, NvmeZone *zone, - NvmeZoneState state, NvmeRequest *req) -{ - return nvme_zrm_finish(ns, zone); -} - -static uint16_t nvme_reset_zone(NvmeNamespace *ns, NvmeZone *zone, - NvmeZoneState state, NvmeRequest *req) -{ - uintptr_t *resets = (uintptr_t *)&req->opaque; - struct nvme_zone_reset_ctx *ctx; - - switch (state) { - case NVME_ZONE_STATE_EMPTY: - return NVME_SUCCESS; - case NVME_ZONE_STATE_EXPLICITLY_OPEN: - case NVME_ZONE_STATE_IMPLICITLY_OPEN: - case NVME_ZONE_STATE_CLOSED: - case NVME_ZONE_STATE_FULL: - break; - default: - return NVME_ZONE_INVAL_TRANSITION; - } - - /* - * The zone reset aio callback needs to know the zone that is being reset - * in order to transition the zone on completion. - */ - ctx = g_new(struct nvme_zone_reset_ctx, 1); - ctx->req = req; - ctx->zone = zone; - - (*resets)++; - - blk_aio_pwrite_zeroes(ns->blkconf.blk, nvme_l2b(ns, zone->d.zslba), - nvme_l2b(ns, ns->zone_size), BDRV_REQ_MAY_UNMAP, - nvme_aio_zone_reset_cb, ctx); - - return NVME_NO_COMPLETE; -} - -static uint16_t nvme_offline_zone(NvmeNamespace *ns, NvmeZone *zone, - NvmeZoneState state, NvmeRequest *req) -{ - switch (state) { - case NVME_ZONE_STATE_READ_ONLY: - nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_OFFLINE); - /* fall through */ - case NVME_ZONE_STATE_OFFLINE: - return NVME_SUCCESS; - default: - return NVME_ZONE_INVAL_TRANSITION; - } -} - -static uint16_t nvme_set_zd_ext(NvmeNamespace *ns, NvmeZone *zone) -{ - uint16_t status; - uint8_t state = nvme_get_zone_state(zone); - - if (state == NVME_ZONE_STATE_EMPTY) { - status = nvme_aor_check(ns, 1, 0); - if (status) { - return status; - } - nvme_aor_inc_active(ns); - zone->d.za |= NVME_ZA_ZD_EXT_VALID; - nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED); - return NVME_SUCCESS; - } - - return NVME_ZONE_INVAL_TRANSITION; -} - -static uint16_t nvme_bulk_proc_zone(NvmeNamespace *ns, NvmeZone *zone, - enum NvmeZoneProcessingMask proc_mask, - op_handler_t op_hndlr, NvmeRequest *req) -{ - uint16_t status = NVME_SUCCESS; - NvmeZoneState zs = nvme_get_zone_state(zone); - bool proc_zone; - - switch (zs) { - case NVME_ZONE_STATE_IMPLICITLY_OPEN: - case NVME_ZONE_STATE_EXPLICITLY_OPEN: - proc_zone = proc_mask & NVME_PROC_OPENED_ZONES; - break; - case NVME_ZONE_STATE_CLOSED: - proc_zone = proc_mask & NVME_PROC_CLOSED_ZONES; - break; - case NVME_ZONE_STATE_READ_ONLY: - proc_zone = proc_mask & NVME_PROC_READ_ONLY_ZONES; - break; - case NVME_ZONE_STATE_FULL: - proc_zone = proc_mask & NVME_PROC_FULL_ZONES; - break; - default: - proc_zone = false; - } - - if (proc_zone) { - status = op_hndlr(ns, zone, zs, req); - } - - return status; -} - -static uint16_t nvme_do_zone_op(NvmeNamespace *ns, NvmeZone *zone, - enum NvmeZoneProcessingMask proc_mask, - op_handler_t op_hndlr, NvmeRequest *req) -{ - NvmeZone *next; - uint16_t status = NVME_SUCCESS; - int i; - - if (!proc_mask) { - status = op_hndlr(ns, zone, nvme_get_zone_state(zone), req); - } else { - if (proc_mask & NVME_PROC_CLOSED_ZONES) { - QTAILQ_FOREACH_SAFE(zone, &ns->closed_zones, entry, next) { - status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, - req); - if (status && status != NVME_NO_COMPLETE) { - goto out; - } - } - } - if (proc_mask & NVME_PROC_OPENED_ZONES) { - QTAILQ_FOREACH_SAFE(zone, &ns->imp_open_zones, entry, next) { - status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, - req); - if (status && status != NVME_NO_COMPLETE) { - goto out; - } - } - - QTAILQ_FOREACH_SAFE(zone, &ns->exp_open_zones, entry, next) { - status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, - req); - if (status && status != NVME_NO_COMPLETE) { - goto out; - } - } - } - if (proc_mask & NVME_PROC_FULL_ZONES) { - QTAILQ_FOREACH_SAFE(zone, &ns->full_zones, entry, next) { - status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, - req); - if (status && status != NVME_NO_COMPLETE) { - goto out; - } - } - } - - if (proc_mask & NVME_PROC_READ_ONLY_ZONES) { - for (i = 0; i < ns->num_zones; i++, zone++) { - status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, - req); - if (status && status != NVME_NO_COMPLETE) { - goto out; - } - } - } - } - -out: - return status; -} - -static uint16_t nvme_zone_mgmt_send(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeCmd *cmd = (NvmeCmd *)&req->cmd; - NvmeNamespace *ns = req->ns; - NvmeZone *zone; - uintptr_t *resets; - uint8_t *zd_ext; - uint32_t dw13 = le32_to_cpu(cmd->cdw13); - uint64_t slba = 0; - uint32_t zone_idx = 0; - uint16_t status; - uint8_t action; - bool all; - enum NvmeZoneProcessingMask proc_mask = NVME_PROC_CURRENT_ZONE; - - action = dw13 & 0xff; - all = dw13 & 0x100; - - req->status = NVME_SUCCESS; - - if (!all) { - status = nvme_get_mgmt_zone_slba_idx(ns, cmd, &slba, &zone_idx); - if (status) { - return status; - } - } - - zone = &ns->zone_array[zone_idx]; - if (slba != zone->d.zslba) { - trace_pci_nvme_err_unaligned_zone_cmd(action, slba, zone->d.zslba); - return NVME_INVALID_FIELD | NVME_DNR; - } - - switch (action) { - - case NVME_ZONE_ACTION_OPEN: - if (all) { - proc_mask = NVME_PROC_CLOSED_ZONES; - } - trace_pci_nvme_open_zone(slba, zone_idx, all); - status = nvme_do_zone_op(ns, zone, proc_mask, nvme_open_zone, req); - break; - - case NVME_ZONE_ACTION_CLOSE: - if (all) { - proc_mask = NVME_PROC_OPENED_ZONES; - } - trace_pci_nvme_close_zone(slba, zone_idx, all); - status = nvme_do_zone_op(ns, zone, proc_mask, nvme_close_zone, req); - break; - - case NVME_ZONE_ACTION_FINISH: - if (all) { - proc_mask = NVME_PROC_OPENED_ZONES | NVME_PROC_CLOSED_ZONES; - } - trace_pci_nvme_finish_zone(slba, zone_idx, all); - status = nvme_do_zone_op(ns, zone, proc_mask, nvme_finish_zone, req); - break; - - case NVME_ZONE_ACTION_RESET: - resets = (uintptr_t *)&req->opaque; - - if (all) { - proc_mask = NVME_PROC_OPENED_ZONES | NVME_PROC_CLOSED_ZONES | - NVME_PROC_FULL_ZONES; - } - trace_pci_nvme_reset_zone(slba, zone_idx, all); - - *resets = 1; - - status = nvme_do_zone_op(ns, zone, proc_mask, nvme_reset_zone, req); - - (*resets)--; - - return *resets ? NVME_NO_COMPLETE : req->status; - - case NVME_ZONE_ACTION_OFFLINE: - if (all) { - proc_mask = NVME_PROC_READ_ONLY_ZONES; - } - trace_pci_nvme_offline_zone(slba, zone_idx, all); - status = nvme_do_zone_op(ns, zone, proc_mask, nvme_offline_zone, req); - break; - - case NVME_ZONE_ACTION_SET_ZD_EXT: - trace_pci_nvme_set_descriptor_extension(slba, zone_idx); - if (all || !ns->params.zd_extension_size) { - return NVME_INVALID_FIELD | NVME_DNR; - } - zd_ext = nvme_get_zd_extension(ns, zone_idx); - status = nvme_h2c(n, zd_ext, ns->params.zd_extension_size, req); - if (status) { - trace_pci_nvme_err_zd_extension_map_error(zone_idx); - return status; - } - - status = nvme_set_zd_ext(ns, zone); - if (status == NVME_SUCCESS) { - trace_pci_nvme_zd_extension_set(zone_idx); - return status; - } - break; - - default: - trace_pci_nvme_err_invalid_mgmt_action(action); - status = NVME_INVALID_FIELD; - } - - if (status == NVME_ZONE_INVAL_TRANSITION) { - trace_pci_nvme_err_invalid_zone_state_transition(action, slba, - zone->d.za); - } - if (status) { - status |= NVME_DNR; - } - - return status; -} - -static bool nvme_zone_matches_filter(uint32_t zafs, NvmeZone *zl) -{ - NvmeZoneState zs = nvme_get_zone_state(zl); - - switch (zafs) { - case NVME_ZONE_REPORT_ALL: - return true; - case NVME_ZONE_REPORT_EMPTY: - return zs == NVME_ZONE_STATE_EMPTY; - case NVME_ZONE_REPORT_IMPLICITLY_OPEN: - return zs == NVME_ZONE_STATE_IMPLICITLY_OPEN; - case NVME_ZONE_REPORT_EXPLICITLY_OPEN: - return zs == NVME_ZONE_STATE_EXPLICITLY_OPEN; - case NVME_ZONE_REPORT_CLOSED: - return zs == NVME_ZONE_STATE_CLOSED; - case NVME_ZONE_REPORT_FULL: - return zs == NVME_ZONE_STATE_FULL; - case NVME_ZONE_REPORT_READ_ONLY: - return zs == NVME_ZONE_STATE_READ_ONLY; - case NVME_ZONE_REPORT_OFFLINE: - return zs == NVME_ZONE_STATE_OFFLINE; - default: - return false; - } -} - -static uint16_t nvme_zone_mgmt_recv(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeCmd *cmd = (NvmeCmd *)&req->cmd; - NvmeNamespace *ns = req->ns; - /* cdw12 is zero-based number of dwords to return. Convert to bytes */ - uint32_t data_size = (le32_to_cpu(cmd->cdw12) + 1) << 2; - uint32_t dw13 = le32_to_cpu(cmd->cdw13); - uint32_t zone_idx, zra, zrasf, partial; - uint64_t max_zones, nr_zones = 0; - uint16_t status; - uint64_t slba; - NvmeZoneDescr *z; - NvmeZone *zone; - NvmeZoneReportHeader *header; - void *buf, *buf_p; - size_t zone_entry_sz; - int i; - - req->status = NVME_SUCCESS; - - status = nvme_get_mgmt_zone_slba_idx(ns, cmd, &slba, &zone_idx); - if (status) { - return status; - } - - zra = dw13 & 0xff; - if (zra != NVME_ZONE_REPORT && zra != NVME_ZONE_REPORT_EXTENDED) { - return NVME_INVALID_FIELD | NVME_DNR; - } - if (zra == NVME_ZONE_REPORT_EXTENDED && !ns->params.zd_extension_size) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - zrasf = (dw13 >> 8) & 0xff; - if (zrasf > NVME_ZONE_REPORT_OFFLINE) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - if (data_size < sizeof(NvmeZoneReportHeader)) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - status = nvme_check_mdts(n, data_size); - if (status) { - return status; - } - - partial = (dw13 >> 16) & 0x01; - - zone_entry_sz = sizeof(NvmeZoneDescr); - if (zra == NVME_ZONE_REPORT_EXTENDED) { - zone_entry_sz += ns->params.zd_extension_size; - } - - max_zones = (data_size - sizeof(NvmeZoneReportHeader)) / zone_entry_sz; - buf = g_malloc0(data_size); - - zone = &ns->zone_array[zone_idx]; - for (i = zone_idx; i < ns->num_zones; i++) { - if (partial && nr_zones >= max_zones) { - break; - } - if (nvme_zone_matches_filter(zrasf, zone++)) { - nr_zones++; - } - } - header = (NvmeZoneReportHeader *)buf; - header->nr_zones = cpu_to_le64(nr_zones); - - buf_p = buf + sizeof(NvmeZoneReportHeader); - for (; zone_idx < ns->num_zones && max_zones > 0; zone_idx++) { - zone = &ns->zone_array[zone_idx]; - if (nvme_zone_matches_filter(zrasf, zone)) { - z = (NvmeZoneDescr *)buf_p; - buf_p += sizeof(NvmeZoneDescr); - - z->zt = zone->d.zt; - z->zs = zone->d.zs; - z->zcap = cpu_to_le64(zone->d.zcap); - z->zslba = cpu_to_le64(zone->d.zslba); - z->za = zone->d.za; - - if (nvme_wp_is_valid(zone)) { - z->wp = cpu_to_le64(zone->d.wp); - } else { - z->wp = cpu_to_le64(~0ULL); - } - - if (zra == NVME_ZONE_REPORT_EXTENDED) { - if (zone->d.za & NVME_ZA_ZD_EXT_VALID) { - memcpy(buf_p, nvme_get_zd_extension(ns, zone_idx), - ns->params.zd_extension_size); - } - buf_p += ns->params.zd_extension_size; - } - - max_zones--; - } - } - - status = nvme_c2h(n, (uint8_t *)buf, data_size, req); - - g_free(buf); - - return status; -} - -static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeNamespace *ns; - uint32_t nsid = le32_to_cpu(req->cmd.nsid); - - trace_pci_nvme_io_cmd(nvme_cid(req), nsid, nvme_sqid(req), - req->cmd.opcode, nvme_io_opc_str(req->cmd.opcode)); - - if (!nvme_nsid_valid(n, nsid)) { - return NVME_INVALID_NSID | NVME_DNR; - } - - /* - * In the base NVM command set, Flush may apply to all namespaces - * (indicated by NSID being set to FFFFFFFFh). But if that feature is used - * along with TP 4056 (Namespace Types), it may be pretty screwed up. - * - * If NSID is indeed set to FFFFFFFFh, we simply cannot associate the - * opcode with a specific command since we cannot determine a unique I/O - * command set. Opcode 0h could have any other meaning than something - * equivalent to flushing and say it DOES have completely different - * semantics in some other command set - does an NSID of FFFFFFFFh then - * mean "for all namespaces, apply whatever command set specific command - * that uses the 0h opcode?" Or does it mean "for all namespaces, apply - * whatever command that uses the 0h opcode if, and only if, it allows NSID - * to be FFFFFFFFh"? - * - * Anyway (and luckily), for now, we do not care about this since the - * device only supports namespace types that includes the NVM Flush command - * (NVM and Zoned), so always do an NVM Flush. - */ - if (req->cmd.opcode == NVME_CMD_FLUSH) { - return nvme_flush(n, req); - } - - ns = nvme_ns(n, nsid); - if (unlikely(!ns)) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - if (!(ns->iocs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) { - trace_pci_nvme_err_invalid_opc(req->cmd.opcode); - return NVME_INVALID_OPCODE | NVME_DNR; - } - - if (ns->status) { - return ns->status; - } - - req->ns = ns; - - switch (req->cmd.opcode) { - case NVME_CMD_WRITE_ZEROES: - return nvme_write_zeroes(n, req); - case NVME_CMD_ZONE_APPEND: - return nvme_zone_append(n, req); - case NVME_CMD_WRITE: - return nvme_write(n, req); - case NVME_CMD_READ: - return nvme_read(n, req); - case NVME_CMD_COMPARE: - return nvme_compare(n, req); - case NVME_CMD_DSM: - return nvme_dsm(n, req); - case NVME_CMD_VERIFY: - return nvme_verify(n, req); - case NVME_CMD_COPY: - return nvme_copy(n, req); - case NVME_CMD_ZONE_MGMT_SEND: - return nvme_zone_mgmt_send(n, req); - case NVME_CMD_ZONE_MGMT_RECV: - return nvme_zone_mgmt_recv(n, req); - default: - assert(false); - } - - return NVME_INVALID_OPCODE | NVME_DNR; -} - -static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n) -{ - n->sq[sq->sqid] = NULL; - timer_free(sq->timer); - g_free(sq->io_req); - if (sq->sqid) { - g_free(sq); - } -} - -static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd; - NvmeRequest *r, *next; - NvmeSQueue *sq; - NvmeCQueue *cq; - uint16_t qid = le16_to_cpu(c->qid); - uint32_t nsid; - - if (unlikely(!qid || nvme_check_sqid(n, qid))) { - trace_pci_nvme_err_invalid_del_sq(qid); - return NVME_INVALID_QID | NVME_DNR; - } - - trace_pci_nvme_del_sq(qid); - - sq = n->sq[qid]; - while (!QTAILQ_EMPTY(&sq->out_req_list)) { - r = QTAILQ_FIRST(&sq->out_req_list); - if (r->aiocb) { - blk_aio_cancel(r->aiocb); - } - } - - /* - * Drain all namespaces if there are still outstanding requests that we - * could not cancel explicitly. - */ - if (!QTAILQ_EMPTY(&sq->out_req_list)) { - for (nsid = 1; nsid <= NVME_MAX_NAMESPACES; nsid++) { - NvmeNamespace *ns = nvme_ns(n, nsid); - if (ns) { - nvme_ns_drain(ns); - } - } - } - - assert(QTAILQ_EMPTY(&sq->out_req_list)); - - if (!nvme_check_cqid(n, sq->cqid)) { - cq = n->cq[sq->cqid]; - QTAILQ_REMOVE(&cq->sq_list, sq, entry); - - nvme_post_cqes(cq); - QTAILQ_FOREACH_SAFE(r, &cq->req_list, entry, next) { - if (r->sq == sq) { - QTAILQ_REMOVE(&cq->req_list, r, entry); - QTAILQ_INSERT_TAIL(&sq->req_list, r, entry); - } - } - } - - nvme_free_sq(sq, n); - return NVME_SUCCESS; -} - -static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr, - uint16_t sqid, uint16_t cqid, uint16_t size) -{ - int i; - NvmeCQueue *cq; - - sq->ctrl = n; - sq->dma_addr = dma_addr; - sq->sqid = sqid; - sq->size = size; - sq->cqid = cqid; - sq->head = sq->tail = 0; - sq->io_req = g_new0(NvmeRequest, sq->size); - - QTAILQ_INIT(&sq->req_list); - QTAILQ_INIT(&sq->out_req_list); - for (i = 0; i < sq->size; i++) { - sq->io_req[i].sq = sq; - QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry); - } - sq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_process_sq, sq); - - assert(n->cq[cqid]); - cq = n->cq[cqid]; - QTAILQ_INSERT_TAIL(&(cq->sq_list), sq, entry); - n->sq[sqid] = sq; -} - -static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeSQueue *sq; - NvmeCreateSq *c = (NvmeCreateSq *)&req->cmd; - - uint16_t cqid = le16_to_cpu(c->cqid); - uint16_t sqid = le16_to_cpu(c->sqid); - uint16_t qsize = le16_to_cpu(c->qsize); - uint16_t qflags = le16_to_cpu(c->sq_flags); - uint64_t prp1 = le64_to_cpu(c->prp1); - - trace_pci_nvme_create_sq(prp1, sqid, cqid, qsize, qflags); - - if (unlikely(!cqid || nvme_check_cqid(n, cqid))) { - trace_pci_nvme_err_invalid_create_sq_cqid(cqid); - return NVME_INVALID_CQID | NVME_DNR; - } - if (unlikely(!sqid || sqid > n->params.max_ioqpairs || - n->sq[sqid] != NULL)) { - trace_pci_nvme_err_invalid_create_sq_sqid(sqid); - return NVME_INVALID_QID | NVME_DNR; - } - if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) { - trace_pci_nvme_err_invalid_create_sq_size(qsize); - return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR; - } - if (unlikely(prp1 & (n->page_size - 1))) { - trace_pci_nvme_err_invalid_create_sq_addr(prp1); - return NVME_INVALID_PRP_OFFSET | NVME_DNR; - } - if (unlikely(!(NVME_SQ_FLAGS_PC(qflags)))) { - trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags)); - return NVME_INVALID_FIELD | NVME_DNR; - } - sq = g_malloc0(sizeof(*sq)); - nvme_init_sq(sq, n, prp1, sqid, cqid, qsize + 1); - return NVME_SUCCESS; -} - -struct nvme_stats { - uint64_t units_read; - uint64_t units_written; - uint64_t read_commands; - uint64_t write_commands; -}; - -static void nvme_set_blk_stats(NvmeNamespace *ns, struct nvme_stats *stats) -{ - BlockAcctStats *s = blk_get_stats(ns->blkconf.blk); - - stats->units_read += s->nr_bytes[BLOCK_ACCT_READ] >> BDRV_SECTOR_BITS; - stats->units_written += s->nr_bytes[BLOCK_ACCT_WRITE] >> BDRV_SECTOR_BITS; - stats->read_commands += s->nr_ops[BLOCK_ACCT_READ]; - stats->write_commands += s->nr_ops[BLOCK_ACCT_WRITE]; -} - -static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, - uint64_t off, NvmeRequest *req) -{ - uint32_t nsid = le32_to_cpu(req->cmd.nsid); - struct nvme_stats stats = { 0 }; - NvmeSmartLog smart = { 0 }; - uint32_t trans_len; - NvmeNamespace *ns; - time_t current_ms; - - if (off >= sizeof(smart)) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - if (nsid != 0xffffffff) { - ns = nvme_ns(n, nsid); - if (!ns) { - return NVME_INVALID_NSID | NVME_DNR; - } - nvme_set_blk_stats(ns, &stats); - } else { - int i; - - for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { - ns = nvme_ns(n, i); - if (!ns) { - continue; - } - nvme_set_blk_stats(ns, &stats); - } - } - - trans_len = MIN(sizeof(smart) - off, buf_len); - smart.critical_warning = n->smart_critical_warning; - - smart.data_units_read[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_read, - 1000)); - smart.data_units_written[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_written, - 1000)); - smart.host_read_commands[0] = cpu_to_le64(stats.read_commands); - smart.host_write_commands[0] = cpu_to_le64(stats.write_commands); - - smart.temperature = cpu_to_le16(n->temperature); - - if ((n->temperature >= n->features.temp_thresh_hi) || - (n->temperature <= n->features.temp_thresh_low)) { - smart.critical_warning |= NVME_SMART_TEMPERATURE; - } - - current_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); - smart.power_on_hours[0] = - cpu_to_le64((((current_ms - n->starttime_ms) / 1000) / 60) / 60); - - if (!rae) { - nvme_clear_events(n, NVME_AER_TYPE_SMART); - } - - return nvme_c2h(n, (uint8_t *) &smart + off, trans_len, req); -} - -static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off, - NvmeRequest *req) -{ - uint32_t trans_len; - NvmeFwSlotInfoLog fw_log = { - .afi = 0x1, - }; - - if (off >= sizeof(fw_log)) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - strpadcpy((char *)&fw_log.frs1, sizeof(fw_log.frs1), "1.0", ' '); - trans_len = MIN(sizeof(fw_log) - off, buf_len); - - return nvme_c2h(n, (uint8_t *) &fw_log + off, trans_len, req); -} - -static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, - uint64_t off, NvmeRequest *req) -{ - uint32_t trans_len; - NvmeErrorLog errlog; - - if (off >= sizeof(errlog)) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - if (!rae) { - nvme_clear_events(n, NVME_AER_TYPE_ERROR); - } - - memset(&errlog, 0x0, sizeof(errlog)); - trans_len = MIN(sizeof(errlog) - off, buf_len); - - return nvme_c2h(n, (uint8_t *)&errlog, trans_len, req); -} - -static uint16_t nvme_changed_nslist(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, - uint64_t off, NvmeRequest *req) -{ - uint32_t nslist[1024]; - uint32_t trans_len; - int i = 0; - uint32_t nsid; - - memset(nslist, 0x0, sizeof(nslist)); - trans_len = MIN(sizeof(nslist) - off, buf_len); - - while ((nsid = find_first_bit(n->changed_nsids, NVME_CHANGED_NSID_SIZE)) != - NVME_CHANGED_NSID_SIZE) { - /* - * If more than 1024 namespaces, the first entry in the log page should - * be set to FFFFFFFFh and the others to 0 as spec. - */ - if (i == ARRAY_SIZE(nslist)) { - memset(nslist, 0x0, sizeof(nslist)); - nslist[0] = 0xffffffff; - break; - } - - nslist[i++] = nsid; - clear_bit(nsid, n->changed_nsids); - } - - /* - * Remove all the remaining list entries in case returns directly due to - * more than 1024 namespaces. - */ - if (nslist[0] == 0xffffffff) { - bitmap_zero(n->changed_nsids, NVME_CHANGED_NSID_SIZE); - } - - if (!rae) { - nvme_clear_events(n, NVME_AER_TYPE_NOTICE); - } - - return nvme_c2h(n, ((uint8_t *)nslist) + off, trans_len, req); -} - -static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len, - uint64_t off, NvmeRequest *req) -{ - NvmeEffectsLog log = {}; - const uint32_t *src_iocs = NULL; - uint32_t trans_len; - - if (off >= sizeof(log)) { - trace_pci_nvme_err_invalid_log_page_offset(off, sizeof(log)); - return NVME_INVALID_FIELD | NVME_DNR; - } - - switch (NVME_CC_CSS(n->bar.cc)) { - case NVME_CC_CSS_NVM: - src_iocs = nvme_cse_iocs_nvm; - /* fall through */ - case NVME_CC_CSS_ADMIN_ONLY: - break; - case NVME_CC_CSS_CSI: - switch (csi) { - case NVME_CSI_NVM: - src_iocs = nvme_cse_iocs_nvm; - break; - case NVME_CSI_ZONED: - src_iocs = nvme_cse_iocs_zoned; - break; - } - } - - memcpy(log.acs, nvme_cse_acs, sizeof(nvme_cse_acs)); - - if (src_iocs) { - memcpy(log.iocs, src_iocs, sizeof(log.iocs)); - } - - trans_len = MIN(sizeof(log) - off, buf_len); - - return nvme_c2h(n, ((uint8_t *)&log) + off, trans_len, req); -} - -static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeCmd *cmd = &req->cmd; - - uint32_t dw10 = le32_to_cpu(cmd->cdw10); - uint32_t dw11 = le32_to_cpu(cmd->cdw11); - uint32_t dw12 = le32_to_cpu(cmd->cdw12); - uint32_t dw13 = le32_to_cpu(cmd->cdw13); - uint8_t lid = dw10 & 0xff; - uint8_t lsp = (dw10 >> 8) & 0xf; - uint8_t rae = (dw10 >> 15) & 0x1; - uint8_t csi = le32_to_cpu(cmd->cdw14) >> 24; - uint32_t numdl, numdu; - uint64_t off, lpol, lpou; - size_t len; - uint16_t status; - - numdl = (dw10 >> 16); - numdu = (dw11 & 0xffff); - lpol = dw12; - lpou = dw13; - - len = (((numdu << 16) | numdl) + 1) << 2; - off = (lpou << 32ULL) | lpol; - - if (off & 0x3) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - trace_pci_nvme_get_log(nvme_cid(req), lid, lsp, rae, len, off); - - status = nvme_check_mdts(n, len); - if (status) { - return status; - } - - switch (lid) { - case NVME_LOG_ERROR_INFO: - return nvme_error_info(n, rae, len, off, req); - case NVME_LOG_SMART_INFO: - return nvme_smart_info(n, rae, len, off, req); - case NVME_LOG_FW_SLOT_INFO: - return nvme_fw_log_info(n, len, off, req); - case NVME_LOG_CHANGED_NSLIST: - return nvme_changed_nslist(n, rae, len, off, req); - case NVME_LOG_CMD_EFFECTS: - return nvme_cmd_effects(n, csi, len, off, req); - default: - trace_pci_nvme_err_invalid_log_page(nvme_cid(req), lid); - return NVME_INVALID_FIELD | NVME_DNR; - } -} - -static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n) -{ - n->cq[cq->cqid] = NULL; - timer_free(cq->timer); - if (msix_enabled(&n->parent_obj)) { - msix_vector_unuse(&n->parent_obj, cq->vector); - } - if (cq->cqid) { - g_free(cq); - } -} - -static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd; - NvmeCQueue *cq; - uint16_t qid = le16_to_cpu(c->qid); - - if (unlikely(!qid || nvme_check_cqid(n, qid))) { - trace_pci_nvme_err_invalid_del_cq_cqid(qid); - return NVME_INVALID_CQID | NVME_DNR; - } - - cq = n->cq[qid]; - if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) { - trace_pci_nvme_err_invalid_del_cq_notempty(qid); - return NVME_INVALID_QUEUE_DEL; - } - nvme_irq_deassert(n, cq); - trace_pci_nvme_del_cq(qid); - nvme_free_cq(cq, n); - return NVME_SUCCESS; -} - -static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr, - uint16_t cqid, uint16_t vector, uint16_t size, - uint16_t irq_enabled) -{ - int ret; - - if (msix_enabled(&n->parent_obj)) { - ret = msix_vector_use(&n->parent_obj, vector); - assert(ret == 0); - } - cq->ctrl = n; - cq->cqid = cqid; - cq->size = size; - cq->dma_addr = dma_addr; - cq->phase = 1; - cq->irq_enabled = irq_enabled; - cq->vector = vector; - cq->head = cq->tail = 0; - QTAILQ_INIT(&cq->req_list); - QTAILQ_INIT(&cq->sq_list); - n->cq[cqid] = cq; - cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq); -} - -static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeCQueue *cq; - NvmeCreateCq *c = (NvmeCreateCq *)&req->cmd; - uint16_t cqid = le16_to_cpu(c->cqid); - uint16_t vector = le16_to_cpu(c->irq_vector); - uint16_t qsize = le16_to_cpu(c->qsize); - uint16_t qflags = le16_to_cpu(c->cq_flags); - uint64_t prp1 = le64_to_cpu(c->prp1); - - trace_pci_nvme_create_cq(prp1, cqid, vector, qsize, qflags, - NVME_CQ_FLAGS_IEN(qflags) != 0); - - if (unlikely(!cqid || cqid > n->params.max_ioqpairs || - n->cq[cqid] != NULL)) { - trace_pci_nvme_err_invalid_create_cq_cqid(cqid); - return NVME_INVALID_QID | NVME_DNR; - } - if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) { - trace_pci_nvme_err_invalid_create_cq_size(qsize); - return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR; - } - if (unlikely(prp1 & (n->page_size - 1))) { - trace_pci_nvme_err_invalid_create_cq_addr(prp1); - return NVME_INVALID_PRP_OFFSET | NVME_DNR; - } - if (unlikely(!msix_enabled(&n->parent_obj) && vector)) { - trace_pci_nvme_err_invalid_create_cq_vector(vector); - return NVME_INVALID_IRQ_VECTOR | NVME_DNR; - } - if (unlikely(vector >= n->params.msix_qsize)) { - trace_pci_nvme_err_invalid_create_cq_vector(vector); - return NVME_INVALID_IRQ_VECTOR | NVME_DNR; - } - if (unlikely(!(NVME_CQ_FLAGS_PC(qflags)))) { - trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags)); - return NVME_INVALID_FIELD | NVME_DNR; - } - - cq = g_malloc0(sizeof(*cq)); - nvme_init_cq(cq, n, prp1, cqid, vector, qsize + 1, - NVME_CQ_FLAGS_IEN(qflags)); - - /* - * It is only required to set qs_created when creating a completion queue; - * creating a submission queue without a matching completion queue will - * fail. - */ - n->qs_created = true; - return NVME_SUCCESS; -} - -static uint16_t nvme_rpt_empty_id_struct(NvmeCtrl *n, NvmeRequest *req) -{ - uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {}; - - return nvme_c2h(n, id, sizeof(id), req); -} - -static inline bool nvme_csi_has_nvm_support(NvmeNamespace *ns) -{ - switch (ns->csi) { - case NVME_CSI_NVM: - case NVME_CSI_ZONED: - return true; - } - return false; -} - -static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req) -{ - trace_pci_nvme_identify_ctrl(); - - return nvme_c2h(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), req); -} - -static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeIdentify *c = (NvmeIdentify *)&req->cmd; - uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {}; - NvmeIdCtrlNvm *id_nvm = (NvmeIdCtrlNvm *)&id; - - trace_pci_nvme_identify_ctrl_csi(c->csi); - - switch (c->csi) { - case NVME_CSI_NVM: - id_nvm->vsl = n->params.vsl; - id_nvm->dmrsl = cpu_to_le32(n->dmrsl); - break; - - case NVME_CSI_ZONED: - ((NvmeIdCtrlZoned *)&id)->zasl = n->params.zasl; - break; - - default: - return NVME_INVALID_FIELD | NVME_DNR; - } - - return nvme_c2h(n, id, sizeof(id), req); -} - -static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req, bool active) -{ - NvmeNamespace *ns; - NvmeIdentify *c = (NvmeIdentify *)&req->cmd; - uint32_t nsid = le32_to_cpu(c->nsid); - - trace_pci_nvme_identify_ns(nsid); - - if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { - return NVME_INVALID_NSID | NVME_DNR; - } - - ns = nvme_ns(n, nsid); - if (unlikely(!ns)) { - if (!active) { - ns = nvme_subsys_ns(n->subsys, nsid); - if (!ns) { - return nvme_rpt_empty_id_struct(n, req); - } - } else { - return nvme_rpt_empty_id_struct(n, req); - } - } - - if (c->csi == NVME_CSI_NVM && nvme_csi_has_nvm_support(ns)) { - return nvme_c2h(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs), req); - } - - return NVME_INVALID_CMD_SET | NVME_DNR; -} - -static uint16_t nvme_identify_ns_attached_list(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeIdentify *c = (NvmeIdentify *)&req->cmd; - uint16_t min_id = le16_to_cpu(c->ctrlid); - uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {}; - uint16_t *ids = &list[1]; - NvmeNamespace *ns; - NvmeCtrl *ctrl; - int cntlid, nr_ids = 0; - - trace_pci_nvme_identify_ns_attached_list(min_id); - - if (c->nsid == NVME_NSID_BROADCAST) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - ns = nvme_subsys_ns(n->subsys, c->nsid); - if (!ns) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - for (cntlid = min_id; cntlid < ARRAY_SIZE(n->subsys->ctrls); cntlid++) { - ctrl = nvme_subsys_ctrl(n->subsys, cntlid); - if (!ctrl) { - continue; - } - - if (!nvme_ns(ctrl, c->nsid)) { - continue; - } - - ids[nr_ids++] = cntlid; - } - - list[0] = nr_ids; - - return nvme_c2h(n, (uint8_t *)list, sizeof(list), req); -} - -static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req, - bool active) -{ - NvmeNamespace *ns; - NvmeIdentify *c = (NvmeIdentify *)&req->cmd; - uint32_t nsid = le32_to_cpu(c->nsid); - - trace_pci_nvme_identify_ns_csi(nsid, c->csi); - - if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { - return NVME_INVALID_NSID | NVME_DNR; - } - - ns = nvme_ns(n, nsid); - if (unlikely(!ns)) { - if (!active) { - ns = nvme_subsys_ns(n->subsys, nsid); - if (!ns) { - return nvme_rpt_empty_id_struct(n, req); - } - } else { - return nvme_rpt_empty_id_struct(n, req); - } - } - - if (c->csi == NVME_CSI_NVM && nvme_csi_has_nvm_support(ns)) { - return nvme_rpt_empty_id_struct(n, req); - } else if (c->csi == NVME_CSI_ZONED && ns->csi == NVME_CSI_ZONED) { - return nvme_c2h(n, (uint8_t *)ns->id_ns_zoned, sizeof(NvmeIdNsZoned), - req); - } - - return NVME_INVALID_FIELD | NVME_DNR; -} - -static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req, - bool active) -{ - NvmeNamespace *ns; - NvmeIdentify *c = (NvmeIdentify *)&req->cmd; - uint32_t min_nsid = le32_to_cpu(c->nsid); - uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {}; - static const int data_len = sizeof(list); - uint32_t *list_ptr = (uint32_t *)list; - int i, j = 0; - - trace_pci_nvme_identify_nslist(min_nsid); - - /* - * Both FFFFFFFFh (NVME_NSID_BROADCAST) and FFFFFFFFEh are invalid values - * since the Active Namespace ID List should return namespaces with ids - * *higher* than the NSID specified in the command. This is also specified - * in the spec (NVM Express v1.3d, Section 5.15.4). - */ - if (min_nsid >= NVME_NSID_BROADCAST - 1) { - return NVME_INVALID_NSID | NVME_DNR; - } - - for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { - ns = nvme_ns(n, i); - if (!ns) { - if (!active) { - ns = nvme_subsys_ns(n->subsys, i); - if (!ns) { - continue; - } - } else { - continue; - } - } - if (ns->params.nsid <= min_nsid) { - continue; - } - list_ptr[j++] = cpu_to_le32(ns->params.nsid); - if (j == data_len / sizeof(uint32_t)) { - break; - } - } - - return nvme_c2h(n, list, data_len, req); -} - -static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req, - bool active) -{ - NvmeNamespace *ns; - NvmeIdentify *c = (NvmeIdentify *)&req->cmd; - uint32_t min_nsid = le32_to_cpu(c->nsid); - uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {}; - static const int data_len = sizeof(list); - uint32_t *list_ptr = (uint32_t *)list; - int i, j = 0; - - trace_pci_nvme_identify_nslist_csi(min_nsid, c->csi); - - /* - * Same as in nvme_identify_nslist(), FFFFFFFFh/FFFFFFFFEh are invalid. - */ - if (min_nsid >= NVME_NSID_BROADCAST - 1) { - return NVME_INVALID_NSID | NVME_DNR; - } - - if (c->csi != NVME_CSI_NVM && c->csi != NVME_CSI_ZONED) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { - ns = nvme_ns(n, i); - if (!ns) { - if (!active) { - ns = nvme_subsys_ns(n->subsys, i); - if (!ns) { - continue; - } - } else { - continue; - } - } - if (ns->params.nsid <= min_nsid || c->csi != ns->csi) { - continue; - } - list_ptr[j++] = cpu_to_le32(ns->params.nsid); - if (j == data_len / sizeof(uint32_t)) { - break; - } - } - - return nvme_c2h(n, list, data_len, req); -} - -static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeNamespace *ns; - NvmeIdentify *c = (NvmeIdentify *)&req->cmd; - uint32_t nsid = le32_to_cpu(c->nsid); - uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {}; - - struct data { - struct { - NvmeIdNsDescr hdr; - uint8_t v[NVME_NIDL_UUID]; - } uuid; - struct { - NvmeIdNsDescr hdr; - uint8_t v; - } csi; - }; - - struct data *ns_descrs = (struct data *)list; - - trace_pci_nvme_identify_ns_descr_list(nsid); - - if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { - return NVME_INVALID_NSID | NVME_DNR; - } - - ns = nvme_ns(n, nsid); - if (unlikely(!ns)) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - /* - * Because the NGUID and EUI64 fields are 0 in the Identify Namespace data - * structure, a Namespace UUID (nidt = 3h) must be reported in the - * Namespace Identification Descriptor. Add the namespace UUID here. - */ - ns_descrs->uuid.hdr.nidt = NVME_NIDT_UUID; - ns_descrs->uuid.hdr.nidl = NVME_NIDL_UUID; - memcpy(&ns_descrs->uuid.v, ns->params.uuid.data, NVME_NIDL_UUID); - - ns_descrs->csi.hdr.nidt = NVME_NIDT_CSI; - ns_descrs->csi.hdr.nidl = NVME_NIDL_CSI; - ns_descrs->csi.v = ns->csi; - - return nvme_c2h(n, list, sizeof(list), req); -} - -static uint16_t nvme_identify_cmd_set(NvmeCtrl *n, NvmeRequest *req) -{ - uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {}; - static const int data_len = sizeof(list); - - trace_pci_nvme_identify_cmd_set(); - - NVME_SET_CSI(*list, NVME_CSI_NVM); - NVME_SET_CSI(*list, NVME_CSI_ZONED); - - return nvme_c2h(n, list, data_len, req); -} - -static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeIdentify *c = (NvmeIdentify *)&req->cmd; - - trace_pci_nvme_identify(nvme_cid(req), c->cns, le16_to_cpu(c->ctrlid), - c->csi); - - switch (c->cns) { - case NVME_ID_CNS_NS: - return nvme_identify_ns(n, req, true); - case NVME_ID_CNS_NS_PRESENT: - return nvme_identify_ns(n, req, false); - case NVME_ID_CNS_NS_ATTACHED_CTRL_LIST: - return nvme_identify_ns_attached_list(n, req); - case NVME_ID_CNS_CS_NS: - return nvme_identify_ns_csi(n, req, true); - case NVME_ID_CNS_CS_NS_PRESENT: - return nvme_identify_ns_csi(n, req, false); - case NVME_ID_CNS_CTRL: - return nvme_identify_ctrl(n, req); - case NVME_ID_CNS_CS_CTRL: - return nvme_identify_ctrl_csi(n, req); - case NVME_ID_CNS_NS_ACTIVE_LIST: - return nvme_identify_nslist(n, req, true); - case NVME_ID_CNS_NS_PRESENT_LIST: - return nvme_identify_nslist(n, req, false); - case NVME_ID_CNS_CS_NS_ACTIVE_LIST: - return nvme_identify_nslist_csi(n, req, true); - case NVME_ID_CNS_CS_NS_PRESENT_LIST: - return nvme_identify_nslist_csi(n, req, false); - case NVME_ID_CNS_NS_DESCR_LIST: - return nvme_identify_ns_descr_list(n, req); - case NVME_ID_CNS_IO_COMMAND_SET: - return nvme_identify_cmd_set(n, req); - default: - trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns)); - return NVME_INVALID_FIELD | NVME_DNR; - } -} - -static uint16_t nvme_abort(NvmeCtrl *n, NvmeRequest *req) -{ - uint16_t sqid = le32_to_cpu(req->cmd.cdw10) & 0xffff; - - req->cqe.result = 1; - if (nvme_check_sqid(n, sqid)) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - return NVME_SUCCESS; -} - -static inline void nvme_set_timestamp(NvmeCtrl *n, uint64_t ts) -{ - trace_pci_nvme_setfeat_timestamp(ts); - - n->host_timestamp = le64_to_cpu(ts); - n->timestamp_set_qemu_clock_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); -} - -static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n) -{ - uint64_t current_time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); - uint64_t elapsed_time = current_time - n->timestamp_set_qemu_clock_ms; - - union nvme_timestamp { - struct { - uint64_t timestamp:48; - uint64_t sync:1; - uint64_t origin:3; - uint64_t rsvd1:12; - }; - uint64_t all; - }; - - union nvme_timestamp ts; - ts.all = 0; - ts.timestamp = n->host_timestamp + elapsed_time; - - /* If the host timestamp is non-zero, set the timestamp origin */ - ts.origin = n->host_timestamp ? 0x01 : 0x00; - - trace_pci_nvme_getfeat_timestamp(ts.all); - - return cpu_to_le64(ts.all); -} - -static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeRequest *req) -{ - uint64_t timestamp = nvme_get_timestamp(n); - - return nvme_c2h(n, (uint8_t *)×tamp, sizeof(timestamp), req); -} - -static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeCmd *cmd = &req->cmd; - uint32_t dw10 = le32_to_cpu(cmd->cdw10); - uint32_t dw11 = le32_to_cpu(cmd->cdw11); - uint32_t nsid = le32_to_cpu(cmd->nsid); - uint32_t result; - uint8_t fid = NVME_GETSETFEAT_FID(dw10); - NvmeGetFeatureSelect sel = NVME_GETFEAT_SELECT(dw10); - uint16_t iv; - NvmeNamespace *ns; - int i; - - static const uint32_t nvme_feature_default[NVME_FID_MAX] = { - [NVME_ARBITRATION] = NVME_ARB_AB_NOLIMIT, - }; - - trace_pci_nvme_getfeat(nvme_cid(req), nsid, fid, sel, dw11); - - if (!nvme_feature_support[fid]) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) { - if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { - /* - * The Reservation Notification Mask and Reservation Persistence - * features require a status code of Invalid Field in Command when - * NSID is FFFFFFFFh. Since the device does not support those - * features we can always return Invalid Namespace or Format as we - * should do for all other features. - */ - return NVME_INVALID_NSID | NVME_DNR; - } - - if (!nvme_ns(n, nsid)) { - return NVME_INVALID_FIELD | NVME_DNR; - } - } - - switch (sel) { - case NVME_GETFEAT_SELECT_CURRENT: - break; - case NVME_GETFEAT_SELECT_SAVED: - /* no features are saveable by the controller; fallthrough */ - case NVME_GETFEAT_SELECT_DEFAULT: - goto defaults; - case NVME_GETFEAT_SELECT_CAP: - result = nvme_feature_cap[fid]; - goto out; - } - - switch (fid) { - case NVME_TEMPERATURE_THRESHOLD: - result = 0; - - /* - * The controller only implements the Composite Temperature sensor, so - * return 0 for all other sensors. - */ - if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) { - goto out; - } - - switch (NVME_TEMP_THSEL(dw11)) { - case NVME_TEMP_THSEL_OVER: - result = n->features.temp_thresh_hi; - goto out; - case NVME_TEMP_THSEL_UNDER: - result = n->features.temp_thresh_low; - goto out; - } - - return NVME_INVALID_FIELD | NVME_DNR; - case NVME_ERROR_RECOVERY: - if (!nvme_nsid_valid(n, nsid)) { - return NVME_INVALID_NSID | NVME_DNR; - } - - ns = nvme_ns(n, nsid); - if (unlikely(!ns)) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - result = ns->features.err_rec; - goto out; - case NVME_VOLATILE_WRITE_CACHE: - result = 0; - for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { - ns = nvme_ns(n, i); - if (!ns) { - continue; - } - - result = blk_enable_write_cache(ns->blkconf.blk); - if (result) { - break; - } - } - trace_pci_nvme_getfeat_vwcache(result ? "enabled" : "disabled"); - goto out; - case NVME_ASYNCHRONOUS_EVENT_CONF: - result = n->features.async_config; - goto out; - case NVME_TIMESTAMP: - return nvme_get_feature_timestamp(n, req); - default: - break; - } - -defaults: - switch (fid) { - case NVME_TEMPERATURE_THRESHOLD: - result = 0; - - if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) { - break; - } - - if (NVME_TEMP_THSEL(dw11) == NVME_TEMP_THSEL_OVER) { - result = NVME_TEMPERATURE_WARNING; - } - - break; - case NVME_NUMBER_OF_QUEUES: - result = (n->params.max_ioqpairs - 1) | - ((n->params.max_ioqpairs - 1) << 16); - trace_pci_nvme_getfeat_numq(result); - break; - case NVME_INTERRUPT_VECTOR_CONF: - iv = dw11 & 0xffff; - if (iv >= n->params.max_ioqpairs + 1) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - result = iv; - if (iv == n->admin_cq.vector) { - result |= NVME_INTVC_NOCOALESCING; - } - break; - default: - result = nvme_feature_default[fid]; - break; - } - -out: - req->cqe.result = cpu_to_le32(result); - return NVME_SUCCESS; -} - -static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req) -{ - uint16_t ret; - uint64_t timestamp; - - ret = nvme_h2c(n, (uint8_t *)×tamp, sizeof(timestamp), req); - if (ret) { - return ret; - } - - nvme_set_timestamp(n, timestamp); - - return NVME_SUCCESS; -} - -static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeNamespace *ns = NULL; - - NvmeCmd *cmd = &req->cmd; - uint32_t dw10 = le32_to_cpu(cmd->cdw10); - uint32_t dw11 = le32_to_cpu(cmd->cdw11); - uint32_t nsid = le32_to_cpu(cmd->nsid); - uint8_t fid = NVME_GETSETFEAT_FID(dw10); - uint8_t save = NVME_SETFEAT_SAVE(dw10); - int i; - - trace_pci_nvme_setfeat(nvme_cid(req), nsid, fid, save, dw11); - - if (save && !(nvme_feature_cap[fid] & NVME_FEAT_CAP_SAVE)) { - return NVME_FID_NOT_SAVEABLE | NVME_DNR; - } - - if (!nvme_feature_support[fid]) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) { - if (nsid != NVME_NSID_BROADCAST) { - if (!nvme_nsid_valid(n, nsid)) { - return NVME_INVALID_NSID | NVME_DNR; - } - - ns = nvme_ns(n, nsid); - if (unlikely(!ns)) { - return NVME_INVALID_FIELD | NVME_DNR; - } - } - } else if (nsid && nsid != NVME_NSID_BROADCAST) { - if (!nvme_nsid_valid(n, nsid)) { - return NVME_INVALID_NSID | NVME_DNR; - } - - return NVME_FEAT_NOT_NS_SPEC | NVME_DNR; - } - - if (!(nvme_feature_cap[fid] & NVME_FEAT_CAP_CHANGE)) { - return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR; - } - - switch (fid) { - case NVME_TEMPERATURE_THRESHOLD: - if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) { - break; - } - - switch (NVME_TEMP_THSEL(dw11)) { - case NVME_TEMP_THSEL_OVER: - n->features.temp_thresh_hi = NVME_TEMP_TMPTH(dw11); - break; - case NVME_TEMP_THSEL_UNDER: - n->features.temp_thresh_low = NVME_TEMP_TMPTH(dw11); - break; - default: - return NVME_INVALID_FIELD | NVME_DNR; - } - - if ((n->temperature >= n->features.temp_thresh_hi) || - (n->temperature <= n->features.temp_thresh_low)) { - nvme_smart_event(n, NVME_AER_INFO_SMART_TEMP_THRESH); - } - - break; - case NVME_ERROR_RECOVERY: - if (nsid == NVME_NSID_BROADCAST) { - for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { - ns = nvme_ns(n, i); - - if (!ns) { - continue; - } - - if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat)) { - ns->features.err_rec = dw11; - } - } - - break; - } - - assert(ns); - if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat)) { - ns->features.err_rec = dw11; - } - break; - case NVME_VOLATILE_WRITE_CACHE: - for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { - ns = nvme_ns(n, i); - if (!ns) { - continue; - } - - if (!(dw11 & 0x1) && blk_enable_write_cache(ns->blkconf.blk)) { - blk_flush(ns->blkconf.blk); - } - - blk_set_enable_write_cache(ns->blkconf.blk, dw11 & 1); - } - - break; - - case NVME_NUMBER_OF_QUEUES: - if (n->qs_created) { - return NVME_CMD_SEQ_ERROR | NVME_DNR; - } - - /* - * NVMe v1.3, Section 5.21.1.7: FFFFh is not an allowed value for NCQR - * and NSQR. - */ - if ((dw11 & 0xffff) == 0xffff || ((dw11 >> 16) & 0xffff) == 0xffff) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - trace_pci_nvme_setfeat_numq((dw11 & 0xffff) + 1, - ((dw11 >> 16) & 0xffff) + 1, - n->params.max_ioqpairs, - n->params.max_ioqpairs); - req->cqe.result = cpu_to_le32((n->params.max_ioqpairs - 1) | - ((n->params.max_ioqpairs - 1) << 16)); - break; - case NVME_ASYNCHRONOUS_EVENT_CONF: - n->features.async_config = dw11; - break; - case NVME_TIMESTAMP: - return nvme_set_feature_timestamp(n, req); - case NVME_COMMAND_SET_PROFILE: - if (dw11 & 0x1ff) { - trace_pci_nvme_err_invalid_iocsci(dw11 & 0x1ff); - return NVME_CMD_SET_CMB_REJECTED | NVME_DNR; - } - break; - default: - return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR; - } - return NVME_SUCCESS; -} - -static uint16_t nvme_aer(NvmeCtrl *n, NvmeRequest *req) -{ - trace_pci_nvme_aer(nvme_cid(req)); - - if (n->outstanding_aers > n->params.aerl) { - trace_pci_nvme_aer_aerl_exceeded(); - return NVME_AER_LIMIT_EXCEEDED; - } - - n->aer_reqs[n->outstanding_aers] = req; - n->outstanding_aers++; - - if (!QTAILQ_EMPTY(&n->aer_queue)) { - nvme_process_aers(n); - } - - return NVME_NO_COMPLETE; -} - -static void nvme_update_dmrsl(NvmeCtrl *n) -{ - int nsid; - - for (nsid = 1; nsid <= NVME_MAX_NAMESPACES; nsid++) { - NvmeNamespace *ns = nvme_ns(n, nsid); - if (!ns) { - continue; - } - - n->dmrsl = MIN_NON_ZERO(n->dmrsl, - BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1)); - } -} - -static void nvme_select_iocs_ns(NvmeCtrl *n, NvmeNamespace *ns) -{ - ns->iocs = nvme_cse_iocs_none; - switch (ns->csi) { - case NVME_CSI_NVM: - if (NVME_CC_CSS(n->bar.cc) != NVME_CC_CSS_ADMIN_ONLY) { - ns->iocs = nvme_cse_iocs_nvm; - } - break; - case NVME_CSI_ZONED: - if (NVME_CC_CSS(n->bar.cc) == NVME_CC_CSS_CSI) { - ns->iocs = nvme_cse_iocs_zoned; - } else if (NVME_CC_CSS(n->bar.cc) == NVME_CC_CSS_NVM) { - ns->iocs = nvme_cse_iocs_nvm; - } - break; - } -} - -static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeNamespace *ns; - NvmeCtrl *ctrl; - uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {}; - uint32_t nsid = le32_to_cpu(req->cmd.nsid); - uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); - bool attach = !(dw10 & 0xf); - uint16_t *nr_ids = &list[0]; - uint16_t *ids = &list[1]; - uint16_t ret; - int i; - - trace_pci_nvme_ns_attachment(nvme_cid(req), dw10 & 0xf); - - if (!nvme_nsid_valid(n, nsid)) { - return NVME_INVALID_NSID | NVME_DNR; - } - - ns = nvme_subsys_ns(n->subsys, nsid); - if (!ns) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - ret = nvme_h2c(n, (uint8_t *)list, 4096, req); - if (ret) { - return ret; - } - - if (!*nr_ids) { - return NVME_NS_CTRL_LIST_INVALID | NVME_DNR; - } - - *nr_ids = MIN(*nr_ids, NVME_CONTROLLER_LIST_SIZE - 1); - for (i = 0; i < *nr_ids; i++) { - ctrl = nvme_subsys_ctrl(n->subsys, ids[i]); - if (!ctrl) { - return NVME_NS_CTRL_LIST_INVALID | NVME_DNR; - } - - if (attach) { - if (nvme_ns(ctrl, nsid)) { - return NVME_NS_ALREADY_ATTACHED | NVME_DNR; - } - - if (ns->attached && !ns->params.shared) { - return NVME_NS_PRIVATE | NVME_DNR; - } - - nvme_attach_ns(ctrl, ns); - nvme_select_iocs_ns(ctrl, ns); - } else { - if (!nvme_ns(ctrl, nsid)) { - return NVME_NS_NOT_ATTACHED | NVME_DNR; - } - - ctrl->namespaces[nsid] = NULL; - ns->attached--; - - nvme_update_dmrsl(ctrl); - } - - /* - * Add namespace id to the changed namespace id list for event clearing - * via Get Log Page command. - */ - if (!test_and_set_bit(nsid, ctrl->changed_nsids)) { - nvme_enqueue_event(ctrl, NVME_AER_TYPE_NOTICE, - NVME_AER_INFO_NOTICE_NS_ATTR_CHANGED, - NVME_LOG_CHANGED_NSLIST); - } - } - - return NVME_SUCCESS; -} - -static uint16_t nvme_format_ns(NvmeCtrl *n, NvmeNamespace *ns, uint8_t lbaf, - uint8_t mset, uint8_t pi, uint8_t pil, - NvmeRequest *req) -{ - int64_t len, offset; - struct nvme_aio_format_ctx *ctx; - BlockBackend *blk = ns->blkconf.blk; - uint16_t ms; - uintptr_t *num_formats = (uintptr_t *)&req->opaque; - int *count; - - if (ns->params.zoned) { - return NVME_INVALID_FORMAT | NVME_DNR; - } - - trace_pci_nvme_format_ns(nvme_cid(req), nvme_nsid(ns), lbaf, mset, pi, pil); - - if (lbaf > ns->id_ns.nlbaf) { - return NVME_INVALID_FORMAT | NVME_DNR; - } - - ms = ns->id_ns.lbaf[lbaf].ms; - - if (pi && (ms < sizeof(NvmeDifTuple))) { - return NVME_INVALID_FORMAT | NVME_DNR; - } - - if (pi && pi > NVME_ID_NS_DPS_TYPE_3) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - nvme_ns_drain(ns); - nvme_ns_shutdown(ns); - nvme_ns_cleanup(ns); - - ns->id_ns.dps = (pil << 3) | pi; - ns->id_ns.flbas = lbaf | (mset << 4); - - nvme_ns_init_format(ns); - - ns->status = NVME_FORMAT_IN_PROGRESS; - - len = ns->size; - offset = 0; - - count = g_new(int, 1); - *count = 1; - - (*num_formats)++; - - while (len) { - ctx = g_new(struct nvme_aio_format_ctx, 1); - ctx->req = req; - ctx->ns = ns; - ctx->count = count; - - size_t bytes = MIN(BDRV_REQUEST_MAX_BYTES, len); - - (*count)++; - - blk_aio_pwrite_zeroes(blk, offset, bytes, BDRV_REQ_MAY_UNMAP, - nvme_aio_format_cb, ctx); - - offset += bytes; - len -= bytes; - - } - - if (--(*count)) { - return NVME_NO_COMPLETE; - } - - g_free(count); - ns->status = 0x0; - (*num_formats)--; - - return NVME_SUCCESS; -} - -static uint16_t nvme_format(NvmeCtrl *n, NvmeRequest *req) -{ - NvmeNamespace *ns; - uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); - uint32_t nsid = le32_to_cpu(req->cmd.nsid); - uint8_t lbaf = dw10 & 0xf; - uint8_t mset = (dw10 >> 4) & 0x1; - uint8_t pi = (dw10 >> 5) & 0x7; - uint8_t pil = (dw10 >> 8) & 0x1; - uintptr_t *num_formats = (uintptr_t *)&req->opaque; - uint16_t status; - int i; - - trace_pci_nvme_format(nvme_cid(req), nsid, lbaf, mset, pi, pil); - - /* 1-initialize; see the comment in nvme_dsm */ - *num_formats = 1; - - if (nsid != NVME_NSID_BROADCAST) { - if (!nvme_nsid_valid(n, nsid)) { - return NVME_INVALID_NSID | NVME_DNR; - } - - ns = nvme_ns(n, nsid); - if (!ns) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - status = nvme_format_ns(n, ns, lbaf, mset, pi, pil, req); - if (status && status != NVME_NO_COMPLETE) { - req->status = status; - } - } else { - for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { - ns = nvme_ns(n, i); - if (!ns) { - continue; - } - - status = nvme_format_ns(n, ns, lbaf, mset, pi, pil, req); - if (status && status != NVME_NO_COMPLETE) { - req->status = status; - break; - } - } - } - - /* account for the 1-initialization */ - if (--(*num_formats)) { - return NVME_NO_COMPLETE; - } - - return req->status; -} - -static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req) -{ - trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode, - nvme_adm_opc_str(req->cmd.opcode)); - - if (!(nvme_cse_acs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) { - trace_pci_nvme_err_invalid_admin_opc(req->cmd.opcode); - return NVME_INVALID_OPCODE | NVME_DNR; - } - - /* SGLs shall not be used for Admin commands in NVMe over PCIe */ - if (NVME_CMD_FLAGS_PSDT(req->cmd.flags) != NVME_PSDT_PRP) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - switch (req->cmd.opcode) { - case NVME_ADM_CMD_DELETE_SQ: - return nvme_del_sq(n, req); - case NVME_ADM_CMD_CREATE_SQ: - return nvme_create_sq(n, req); - case NVME_ADM_CMD_GET_LOG_PAGE: - return nvme_get_log(n, req); - case NVME_ADM_CMD_DELETE_CQ: - return nvme_del_cq(n, req); - case NVME_ADM_CMD_CREATE_CQ: - return nvme_create_cq(n, req); - case NVME_ADM_CMD_IDENTIFY: - return nvme_identify(n, req); - case NVME_ADM_CMD_ABORT: - return nvme_abort(n, req); - case NVME_ADM_CMD_SET_FEATURES: - return nvme_set_feature(n, req); - case NVME_ADM_CMD_GET_FEATURES: - return nvme_get_feature(n, req); - case NVME_ADM_CMD_ASYNC_EV_REQ: - return nvme_aer(n, req); - case NVME_ADM_CMD_NS_ATTACHMENT: - return nvme_ns_attachment(n, req); - case NVME_ADM_CMD_FORMAT_NVM: - return nvme_format(n, req); - default: - assert(false); - } - - return NVME_INVALID_OPCODE | NVME_DNR; -} - -static void nvme_process_sq(void *opaque) -{ - NvmeSQueue *sq = opaque; - NvmeCtrl *n = sq->ctrl; - NvmeCQueue *cq = n->cq[sq->cqid]; - - uint16_t status; - hwaddr addr; - NvmeCmd cmd; - NvmeRequest *req; - - while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) { - addr = sq->dma_addr + sq->head * n->sqe_size; - if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) { - trace_pci_nvme_err_addr_read(addr); - trace_pci_nvme_err_cfs(); - n->bar.csts = NVME_CSTS_FAILED; - break; - } - nvme_inc_sq_head(sq); - - req = QTAILQ_FIRST(&sq->req_list); - QTAILQ_REMOVE(&sq->req_list, req, entry); - QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry); - nvme_req_clear(req); - req->cqe.cid = cmd.cid; - memcpy(&req->cmd, &cmd, sizeof(NvmeCmd)); - - status = sq->sqid ? nvme_io_cmd(n, req) : - nvme_admin_cmd(n, req); - if (status != NVME_NO_COMPLETE) { - req->status = status; - nvme_enqueue_req_completion(cq, req); - } - } -} - -static void nvme_ctrl_reset(NvmeCtrl *n) -{ - NvmeNamespace *ns; - int i; - - for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { - ns = nvme_ns(n, i); - if (!ns) { - continue; - } - - nvme_ns_drain(ns); - } - - for (i = 0; i < n->params.max_ioqpairs + 1; i++) { - if (n->sq[i] != NULL) { - nvme_free_sq(n->sq[i], n); - } - } - for (i = 0; i < n->params.max_ioqpairs + 1; i++) { - if (n->cq[i] != NULL) { - nvme_free_cq(n->cq[i], n); - } - } - - while (!QTAILQ_EMPTY(&n->aer_queue)) { - NvmeAsyncEvent *event = QTAILQ_FIRST(&n->aer_queue); - QTAILQ_REMOVE(&n->aer_queue, event, entry); - g_free(event); - } - - n->aer_queued = 0; - n->outstanding_aers = 0; - n->qs_created = false; - - n->bar.cc = 0; -} - -static void nvme_ctrl_shutdown(NvmeCtrl *n) -{ - NvmeNamespace *ns; - int i; - - if (n->pmr.dev) { - memory_region_msync(&n->pmr.dev->mr, 0, n->pmr.dev->size); - } - - for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { - ns = nvme_ns(n, i); - if (!ns) { - continue; - } - - nvme_ns_shutdown(ns); - } -} - -static void nvme_select_iocs(NvmeCtrl *n) -{ - NvmeNamespace *ns; - int i; - - for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { - ns = nvme_ns(n, i); - if (!ns) { - continue; - } - - nvme_select_iocs_ns(n, ns); - } -} - -static int nvme_start_ctrl(NvmeCtrl *n) -{ - uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12; - uint32_t page_size = 1 << page_bits; - - if (unlikely(n->cq[0])) { - trace_pci_nvme_err_startfail_cq(); - return -1; - } - if (unlikely(n->sq[0])) { - trace_pci_nvme_err_startfail_sq(); - return -1; - } - if (unlikely(!n->bar.asq)) { - trace_pci_nvme_err_startfail_nbarasq(); - return -1; - } - if (unlikely(!n->bar.acq)) { - trace_pci_nvme_err_startfail_nbaracq(); - return -1; - } - if (unlikely(n->bar.asq & (page_size - 1))) { - trace_pci_nvme_err_startfail_asq_misaligned(n->bar.asq); - return -1; - } - if (unlikely(n->bar.acq & (page_size - 1))) { - trace_pci_nvme_err_startfail_acq_misaligned(n->bar.acq); - return -1; - } - if (unlikely(!(NVME_CAP_CSS(n->bar.cap) & (1 << NVME_CC_CSS(n->bar.cc))))) { - trace_pci_nvme_err_startfail_css(NVME_CC_CSS(n->bar.cc)); - return -1; - } - if (unlikely(NVME_CC_MPS(n->bar.cc) < - NVME_CAP_MPSMIN(n->bar.cap))) { - trace_pci_nvme_err_startfail_page_too_small( - NVME_CC_MPS(n->bar.cc), - NVME_CAP_MPSMIN(n->bar.cap)); - return -1; - } - if (unlikely(NVME_CC_MPS(n->bar.cc) > - NVME_CAP_MPSMAX(n->bar.cap))) { - trace_pci_nvme_err_startfail_page_too_large( - NVME_CC_MPS(n->bar.cc), - NVME_CAP_MPSMAX(n->bar.cap)); - return -1; - } - if (unlikely(NVME_CC_IOCQES(n->bar.cc) < - NVME_CTRL_CQES_MIN(n->id_ctrl.cqes))) { - trace_pci_nvme_err_startfail_cqent_too_small( - NVME_CC_IOCQES(n->bar.cc), - NVME_CTRL_CQES_MIN(n->bar.cap)); - return -1; - } - if (unlikely(NVME_CC_IOCQES(n->bar.cc) > - NVME_CTRL_CQES_MAX(n->id_ctrl.cqes))) { - trace_pci_nvme_err_startfail_cqent_too_large( - NVME_CC_IOCQES(n->bar.cc), - NVME_CTRL_CQES_MAX(n->bar.cap)); - return -1; - } - if (unlikely(NVME_CC_IOSQES(n->bar.cc) < - NVME_CTRL_SQES_MIN(n->id_ctrl.sqes))) { - trace_pci_nvme_err_startfail_sqent_too_small( - NVME_CC_IOSQES(n->bar.cc), - NVME_CTRL_SQES_MIN(n->bar.cap)); - return -1; - } - if (unlikely(NVME_CC_IOSQES(n->bar.cc) > - NVME_CTRL_SQES_MAX(n->id_ctrl.sqes))) { - trace_pci_nvme_err_startfail_sqent_too_large( - NVME_CC_IOSQES(n->bar.cc), - NVME_CTRL_SQES_MAX(n->bar.cap)); - return -1; - } - if (unlikely(!NVME_AQA_ASQS(n->bar.aqa))) { - trace_pci_nvme_err_startfail_asqent_sz_zero(); - return -1; - } - if (unlikely(!NVME_AQA_ACQS(n->bar.aqa))) { - trace_pci_nvme_err_startfail_acqent_sz_zero(); - return -1; - } - - n->page_bits = page_bits; - n->page_size = page_size; - n->max_prp_ents = n->page_size / sizeof(uint64_t); - n->cqe_size = 1 << NVME_CC_IOCQES(n->bar.cc); - n->sqe_size = 1 << NVME_CC_IOSQES(n->bar.cc); - nvme_init_cq(&n->admin_cq, n, n->bar.acq, 0, 0, - NVME_AQA_ACQS(n->bar.aqa) + 1, 1); - nvme_init_sq(&n->admin_sq, n, n->bar.asq, 0, 0, - NVME_AQA_ASQS(n->bar.aqa) + 1); - - nvme_set_timestamp(n, 0ULL); - - QTAILQ_INIT(&n->aer_queue); - - nvme_select_iocs(n); - - return 0; -} - -static void nvme_cmb_enable_regs(NvmeCtrl *n) -{ - NVME_CMBLOC_SET_CDPCILS(n->bar.cmbloc, 1); - NVME_CMBLOC_SET_CDPMLS(n->bar.cmbloc, 1); - NVME_CMBLOC_SET_BIR(n->bar.cmbloc, NVME_CMB_BIR); - - NVME_CMBSZ_SET_SQS(n->bar.cmbsz, 1); - NVME_CMBSZ_SET_CQS(n->bar.cmbsz, 0); - NVME_CMBSZ_SET_LISTS(n->bar.cmbsz, 1); - NVME_CMBSZ_SET_RDS(n->bar.cmbsz, 1); - NVME_CMBSZ_SET_WDS(n->bar.cmbsz, 1); - NVME_CMBSZ_SET_SZU(n->bar.cmbsz, 2); /* MBs */ - NVME_CMBSZ_SET_SZ(n->bar.cmbsz, n->params.cmb_size_mb); -} - -static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data, - unsigned size) -{ - if (unlikely(offset & (sizeof(uint32_t) - 1))) { - NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32, - "MMIO write not 32-bit aligned," - " offset=0x%"PRIx64"", offset); - /* should be ignored, fall through for now */ - } - - if (unlikely(size < sizeof(uint32_t))) { - NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall, - "MMIO write smaller than 32-bits," - " offset=0x%"PRIx64", size=%u", - offset, size); - /* should be ignored, fall through for now */ - } - - switch (offset) { - case 0xc: /* INTMS */ - if (unlikely(msix_enabled(&(n->parent_obj)))) { - NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix, - "undefined access to interrupt mask set" - " when MSI-X is enabled"); - /* should be ignored, fall through for now */ - } - n->bar.intms |= data & 0xffffffff; - n->bar.intmc = n->bar.intms; - trace_pci_nvme_mmio_intm_set(data & 0xffffffff, n->bar.intmc); - nvme_irq_check(n); - break; - case 0x10: /* INTMC */ - if (unlikely(msix_enabled(&(n->parent_obj)))) { - NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix, - "undefined access to interrupt mask clr" - " when MSI-X is enabled"); - /* should be ignored, fall through for now */ - } - n->bar.intms &= ~(data & 0xffffffff); - n->bar.intmc = n->bar.intms; - trace_pci_nvme_mmio_intm_clr(data & 0xffffffff, n->bar.intmc); - nvme_irq_check(n); - break; - case 0x14: /* CC */ - trace_pci_nvme_mmio_cfg(data & 0xffffffff); - /* Windows first sends data, then sends enable bit */ - if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) && - !NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc)) - { - n->bar.cc = data; - } - - if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) { - n->bar.cc = data; - if (unlikely(nvme_start_ctrl(n))) { - trace_pci_nvme_err_startfail(); - n->bar.csts = NVME_CSTS_FAILED; - } else { - trace_pci_nvme_mmio_start_success(); - n->bar.csts = NVME_CSTS_READY; - } - } else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) { - trace_pci_nvme_mmio_stopped(); - nvme_ctrl_reset(n); - n->bar.csts &= ~NVME_CSTS_READY; - } - if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) { - trace_pci_nvme_mmio_shutdown_set(); - nvme_ctrl_shutdown(n); - n->bar.cc = data; - n->bar.csts |= NVME_CSTS_SHST_COMPLETE; - } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) { - trace_pci_nvme_mmio_shutdown_cleared(); - n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE; - n->bar.cc = data; - } - break; - case 0x1c: /* CSTS */ - if (data & (1 << 4)) { - NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported, - "attempted to W1C CSTS.NSSRO" - " but CAP.NSSRS is zero (not supported)"); - } else if (data != 0) { - NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts, - "attempted to set a read only bit" - " of controller status"); - } - break; - case 0x20: /* NSSR */ - if (data == 0x4e564d65) { - trace_pci_nvme_ub_mmiowr_ssreset_unsupported(); - } else { - /* The spec says that writes of other values have no effect */ - return; - } - break; - case 0x24: /* AQA */ - n->bar.aqa = data & 0xffffffff; - trace_pci_nvme_mmio_aqattr(data & 0xffffffff); - break; - case 0x28: /* ASQ */ - n->bar.asq = size == 8 ? data : - (n->bar.asq & ~0xffffffffULL) | (data & 0xffffffff); - trace_pci_nvme_mmio_asqaddr(data); - break; - case 0x2c: /* ASQ hi */ - n->bar.asq = (n->bar.asq & 0xffffffff) | (data << 32); - trace_pci_nvme_mmio_asqaddr_hi(data, n->bar.asq); - break; - case 0x30: /* ACQ */ - trace_pci_nvme_mmio_acqaddr(data); - n->bar.acq = size == 8 ? data : - (n->bar.acq & ~0xffffffffULL) | (data & 0xffffffff); - break; - case 0x34: /* ACQ hi */ - n->bar.acq = (n->bar.acq & 0xffffffff) | (data << 32); - trace_pci_nvme_mmio_acqaddr_hi(data, n->bar.acq); - break; - case 0x38: /* CMBLOC */ - NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved, - "invalid write to reserved CMBLOC" - " when CMBSZ is zero, ignored"); - return; - case 0x3C: /* CMBSZ */ - NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly, - "invalid write to read only CMBSZ, ignored"); - return; - case 0x50: /* CMBMSC */ - if (!NVME_CAP_CMBS(n->bar.cap)) { - return; - } - - n->bar.cmbmsc = size == 8 ? data : - (n->bar.cmbmsc & ~0xffffffff) | (data & 0xffffffff); - n->cmb.cmse = false; - - if (NVME_CMBMSC_CRE(data)) { - nvme_cmb_enable_regs(n); - - if (NVME_CMBMSC_CMSE(data)) { - hwaddr cba = NVME_CMBMSC_CBA(data) << CMBMSC_CBA_SHIFT; - if (cba + int128_get64(n->cmb.mem.size) < cba) { - NVME_CMBSTS_SET_CBAI(n->bar.cmbsts, 1); - return; - } - - n->cmb.cba = cba; - n->cmb.cmse = true; - } - } else { - n->bar.cmbsz = 0; - n->bar.cmbloc = 0; - } - - return; - case 0x54: /* CMBMSC hi */ - n->bar.cmbmsc = (n->bar.cmbmsc & 0xffffffff) | (data << 32); - return; - - case 0xe00: /* PMRCAP */ - NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly, - "invalid write to PMRCAP register, ignored"); - return; - case 0xe04: /* PMRCTL */ - n->bar.pmrctl = data; - if (NVME_PMRCTL_EN(data)) { - memory_region_set_enabled(&n->pmr.dev->mr, true); - n->bar.pmrsts = 0; - } else { - memory_region_set_enabled(&n->pmr.dev->mr, false); - NVME_PMRSTS_SET_NRDY(n->bar.pmrsts, 1); - n->pmr.cmse = false; - } - return; - case 0xe08: /* PMRSTS */ - NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly, - "invalid write to PMRSTS register, ignored"); - return; - case 0xe0C: /* PMREBS */ - NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly, - "invalid write to PMREBS register, ignored"); - return; - case 0xe10: /* PMRSWTP */ - NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly, - "invalid write to PMRSWTP register, ignored"); - return; - case 0xe14: /* PMRMSCL */ - if (!NVME_CAP_PMRS(n->bar.cap)) { - return; - } - - n->bar.pmrmsc = (n->bar.pmrmsc & ~0xffffffff) | (data & 0xffffffff); - n->pmr.cmse = false; - - if (NVME_PMRMSC_CMSE(n->bar.pmrmsc)) { - hwaddr cba = NVME_PMRMSC_CBA(n->bar.pmrmsc) << PMRMSC_CBA_SHIFT; - if (cba + int128_get64(n->pmr.dev->mr.size) < cba) { - NVME_PMRSTS_SET_CBAI(n->bar.pmrsts, 1); - return; - } - - n->pmr.cmse = true; - n->pmr.cba = cba; - } - - return; - case 0xe18: /* PMRMSCU */ - if (!NVME_CAP_PMRS(n->bar.cap)) { - return; - } - - n->bar.pmrmsc = (n->bar.pmrmsc & 0xffffffff) | (data << 32); - return; - default: - NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid, - "invalid MMIO write," - " offset=0x%"PRIx64", data=%"PRIx64"", - offset, data); - break; - } -} - -static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size) -{ - NvmeCtrl *n = (NvmeCtrl *)opaque; - uint8_t *ptr = (uint8_t *)&n->bar; - uint64_t val = 0; - - trace_pci_nvme_mmio_read(addr, size); - - if (unlikely(addr & (sizeof(uint32_t) - 1))) { - NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32, - "MMIO read not 32-bit aligned," - " offset=0x%"PRIx64"", addr); - /* should RAZ, fall through for now */ - } else if (unlikely(size < sizeof(uint32_t))) { - NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall, - "MMIO read smaller than 32-bits," - " offset=0x%"PRIx64"", addr); - /* should RAZ, fall through for now */ - } - - if (addr < sizeof(n->bar)) { - /* - * When PMRWBM bit 1 is set then read from - * from PMRSTS should ensure prior writes - * made it to persistent media - */ - if (addr == 0xe08 && - (NVME_PMRCAP_PMRWBM(n->bar.pmrcap) & 0x02)) { - memory_region_msync(&n->pmr.dev->mr, 0, n->pmr.dev->size); - } - memcpy(&val, ptr + addr, size); - } else { - NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs, - "MMIO read beyond last register," - " offset=0x%"PRIx64", returning 0", addr); - } - - return val; -} - -static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val) -{ - uint32_t qid; - - if (unlikely(addr & ((1 << 2) - 1))) { - NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned, - "doorbell write not 32-bit aligned," - " offset=0x%"PRIx64", ignoring", addr); - return; - } - - if (((addr - 0x1000) >> 2) & 1) { - /* Completion queue doorbell write */ - - uint16_t new_head = val & 0xffff; - int start_sqs; - NvmeCQueue *cq; - - qid = (addr - (0x1000 + (1 << 2))) >> 3; - if (unlikely(nvme_check_cqid(n, qid))) { - NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq, - "completion queue doorbell write" - " for nonexistent queue," - " sqid=%"PRIu32", ignoring", qid); - - /* - * NVM Express v1.3d, Section 4.1 state: "If host software writes - * an invalid value to the Submission Queue Tail Doorbell or - * Completion Queue Head Doorbell regiter and an Asynchronous Event - * Request command is outstanding, then an asynchronous event is - * posted to the Admin Completion Queue with a status code of - * Invalid Doorbell Write Value." - * - * Also note that the spec includes the "Invalid Doorbell Register" - * status code, but nowhere does it specify when to use it. - * However, it seems reasonable to use it here in a similar - * fashion. - */ - if (n->outstanding_aers) { - nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, - NVME_AER_INFO_ERR_INVALID_DB_REGISTER, - NVME_LOG_ERROR_INFO); - } - - return; - } - - cq = n->cq[qid]; - if (unlikely(new_head >= cq->size)) { - NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead, - "completion queue doorbell write value" - " beyond queue size, sqid=%"PRIu32"," - " new_head=%"PRIu16", ignoring", - qid, new_head); - - if (n->outstanding_aers) { - nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, - NVME_AER_INFO_ERR_INVALID_DB_VALUE, - NVME_LOG_ERROR_INFO); - } - - return; - } - - trace_pci_nvme_mmio_doorbell_cq(cq->cqid, new_head); - - start_sqs = nvme_cq_full(cq) ? 1 : 0; - cq->head = new_head; - if (start_sqs) { - NvmeSQueue *sq; - QTAILQ_FOREACH(sq, &cq->sq_list, entry) { - timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); - } - timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); - } - - if (cq->tail == cq->head) { - nvme_irq_deassert(n, cq); - } - } else { - /* Submission queue doorbell write */ - - uint16_t new_tail = val & 0xffff; - NvmeSQueue *sq; - - qid = (addr - 0x1000) >> 3; - if (unlikely(nvme_check_sqid(n, qid))) { - NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq, - "submission queue doorbell write" - " for nonexistent queue," - " sqid=%"PRIu32", ignoring", qid); - - if (n->outstanding_aers) { - nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, - NVME_AER_INFO_ERR_INVALID_DB_REGISTER, - NVME_LOG_ERROR_INFO); - } - - return; - } - - sq = n->sq[qid]; - if (unlikely(new_tail >= sq->size)) { - NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail, - "submission queue doorbell write value" - " beyond queue size, sqid=%"PRIu32"," - " new_tail=%"PRIu16", ignoring", - qid, new_tail); - - if (n->outstanding_aers) { - nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, - NVME_AER_INFO_ERR_INVALID_DB_VALUE, - NVME_LOG_ERROR_INFO); - } - - return; - } - - trace_pci_nvme_mmio_doorbell_sq(sq->sqid, new_tail); - - sq->tail = new_tail; - timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); - } -} - -static void nvme_mmio_write(void *opaque, hwaddr addr, uint64_t data, - unsigned size) -{ - NvmeCtrl *n = (NvmeCtrl *)opaque; - - trace_pci_nvme_mmio_write(addr, data, size); - - if (addr < sizeof(n->bar)) { - nvme_write_bar(n, addr, data, size); - } else { - nvme_process_db(n, addr, data); - } -} - -static const MemoryRegionOps nvme_mmio_ops = { - .read = nvme_mmio_read, - .write = nvme_mmio_write, - .endianness = DEVICE_LITTLE_ENDIAN, - .impl = { - .min_access_size = 2, - .max_access_size = 8, - }, -}; - -static void nvme_cmb_write(void *opaque, hwaddr addr, uint64_t data, - unsigned size) -{ - NvmeCtrl *n = (NvmeCtrl *)opaque; - stn_le_p(&n->cmb.buf[addr], size, data); -} - -static uint64_t nvme_cmb_read(void *opaque, hwaddr addr, unsigned size) -{ - NvmeCtrl *n = (NvmeCtrl *)opaque; - return ldn_le_p(&n->cmb.buf[addr], size); -} - -static const MemoryRegionOps nvme_cmb_ops = { - .read = nvme_cmb_read, - .write = nvme_cmb_write, - .endianness = DEVICE_LITTLE_ENDIAN, - .impl = { - .min_access_size = 1, - .max_access_size = 8, - }, -}; - -static void nvme_check_constraints(NvmeCtrl *n, Error **errp) -{ - NvmeParams *params = &n->params; - - if (params->num_queues) { - warn_report("num_queues is deprecated; please use max_ioqpairs " - "instead"); - - params->max_ioqpairs = params->num_queues - 1; - } - - if (n->namespace.blkconf.blk && n->subsys) { - error_setg(errp, "subsystem support is unavailable with legacy " - "namespace ('drive' property)"); - return; - } - - if (params->max_ioqpairs < 1 || - params->max_ioqpairs > NVME_MAX_IOQPAIRS) { - error_setg(errp, "max_ioqpairs must be between 1 and %d", - NVME_MAX_IOQPAIRS); - return; - } - - if (params->msix_qsize < 1 || - params->msix_qsize > PCI_MSIX_FLAGS_QSIZE + 1) { - error_setg(errp, "msix_qsize must be between 1 and %d", - PCI_MSIX_FLAGS_QSIZE + 1); - return; - } - - if (!params->serial) { - error_setg(errp, "serial property not set"); - return; - } - - if (n->pmr.dev) { - if (host_memory_backend_is_mapped(n->pmr.dev)) { - error_setg(errp, "can't use already busy memdev: %s", - object_get_canonical_path_component(OBJECT(n->pmr.dev))); - return; - } - - if (!is_power_of_2(n->pmr.dev->size)) { - error_setg(errp, "pmr backend size needs to be power of 2 in size"); - return; - } - - host_memory_backend_set_mapped(n->pmr.dev, true); - } - - if (n->params.zasl > n->params.mdts) { - error_setg(errp, "zoned.zasl (Zone Append Size Limit) must be less " - "than or equal to mdts (Maximum Data Transfer Size)"); - return; - } - - if (!n->params.vsl) { - error_setg(errp, "vsl must be non-zero"); - return; - } -} - -static void nvme_init_state(NvmeCtrl *n) -{ - /* add one to max_ioqpairs to account for the admin queue pair */ - n->reg_size = pow2ceil(sizeof(NvmeBar) + - 2 * (n->params.max_ioqpairs + 1) * NVME_DB_SIZE); - n->sq = g_new0(NvmeSQueue *, n->params.max_ioqpairs + 1); - n->cq = g_new0(NvmeCQueue *, n->params.max_ioqpairs + 1); - n->temperature = NVME_TEMPERATURE; - n->features.temp_thresh_hi = NVME_TEMPERATURE_WARNING; - n->starttime_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); - n->aer_reqs = g_new0(NvmeRequest *, n->params.aerl + 1); -} - -static void nvme_init_cmb(NvmeCtrl *n, PCIDevice *pci_dev) -{ - uint64_t cmb_size = n->params.cmb_size_mb * MiB; - - n->cmb.buf = g_malloc0(cmb_size); - memory_region_init_io(&n->cmb.mem, OBJECT(n), &nvme_cmb_ops, n, - "nvme-cmb", cmb_size); - pci_register_bar(pci_dev, NVME_CMB_BIR, - PCI_BASE_ADDRESS_SPACE_MEMORY | - PCI_BASE_ADDRESS_MEM_TYPE_64 | - PCI_BASE_ADDRESS_MEM_PREFETCH, &n->cmb.mem); - - NVME_CAP_SET_CMBS(n->bar.cap, 1); - - if (n->params.legacy_cmb) { - nvme_cmb_enable_regs(n); - n->cmb.cmse = true; - } -} - -static void nvme_init_pmr(NvmeCtrl *n, PCIDevice *pci_dev) -{ - NVME_PMRCAP_SET_RDS(n->bar.pmrcap, 1); - NVME_PMRCAP_SET_WDS(n->bar.pmrcap, 1); - NVME_PMRCAP_SET_BIR(n->bar.pmrcap, NVME_PMR_BIR); - /* Turn on bit 1 support */ - NVME_PMRCAP_SET_PMRWBM(n->bar.pmrcap, 0x02); - NVME_PMRCAP_SET_CMSS(n->bar.pmrcap, 1); - - pci_register_bar(pci_dev, NVME_PMRCAP_BIR(n->bar.pmrcap), - PCI_BASE_ADDRESS_SPACE_MEMORY | - PCI_BASE_ADDRESS_MEM_TYPE_64 | - PCI_BASE_ADDRESS_MEM_PREFETCH, &n->pmr.dev->mr); - - memory_region_set_enabled(&n->pmr.dev->mr, false); -} - -static int nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp) -{ - uint8_t *pci_conf = pci_dev->config; - uint64_t bar_size, msix_table_size, msix_pba_size; - unsigned msix_table_offset, msix_pba_offset; - int ret; - - Error *err = NULL; - - pci_conf[PCI_INTERRUPT_PIN] = 1; - pci_config_set_prog_interface(pci_conf, 0x2); - - if (n->params.use_intel_id) { - pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL); - pci_config_set_device_id(pci_conf, 0x5845); - } else { - pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_REDHAT); - pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_REDHAT_NVME); - } - - pci_config_set_class(pci_conf, PCI_CLASS_STORAGE_EXPRESS); - pcie_endpoint_cap_init(pci_dev, 0x80); - - bar_size = QEMU_ALIGN_UP(n->reg_size, 4 * KiB); - msix_table_offset = bar_size; - msix_table_size = PCI_MSIX_ENTRY_SIZE * n->params.msix_qsize; - - bar_size += msix_table_size; - bar_size = QEMU_ALIGN_UP(bar_size, 4 * KiB); - msix_pba_offset = bar_size; - msix_pba_size = QEMU_ALIGN_UP(n->params.msix_qsize, 64) / 8; - - bar_size += msix_pba_size; - bar_size = pow2ceil(bar_size); - - memory_region_init(&n->bar0, OBJECT(n), "nvme-bar0", bar_size); - memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme", - n->reg_size); - memory_region_add_subregion(&n->bar0, 0, &n->iomem); - - pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | - PCI_BASE_ADDRESS_MEM_TYPE_64, &n->bar0); - ret = msix_init(pci_dev, n->params.msix_qsize, - &n->bar0, 0, msix_table_offset, - &n->bar0, 0, msix_pba_offset, 0, &err); - if (ret < 0) { - if (ret == -ENOTSUP) { - warn_report_err(err); - } else { - error_propagate(errp, err); - return ret; - } - } - - if (n->params.cmb_size_mb) { - nvme_init_cmb(n, pci_dev); - } - - if (n->pmr.dev) { - nvme_init_pmr(n, pci_dev); - } - - return 0; -} - -static void nvme_init_subnqn(NvmeCtrl *n) -{ - NvmeSubsystem *subsys = n->subsys; - NvmeIdCtrl *id = &n->id_ctrl; - - if (!subsys) { - snprintf((char *)id->subnqn, sizeof(id->subnqn), - "nqn.2019-08.org.qemu:%s", n->params.serial); - } else { - pstrcpy((char *)id->subnqn, sizeof(id->subnqn), (char*)subsys->subnqn); - } -} - -static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) -{ - NvmeIdCtrl *id = &n->id_ctrl; - uint8_t *pci_conf = pci_dev->config; - - id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID)); - id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID)); - strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' '); - strpadcpy((char *)id->fr, sizeof(id->fr), "1.0", ' '); - strpadcpy((char *)id->sn, sizeof(id->sn), n->params.serial, ' '); - - id->cntlid = cpu_to_le16(n->cntlid); - - id->oaes = cpu_to_le32(NVME_OAES_NS_ATTR); - - id->rab = 6; - - if (n->params.use_intel_id) { - id->ieee[0] = 0xb3; - id->ieee[1] = 0x02; - id->ieee[2] = 0x00; - } else { - id->ieee[0] = 0x00; - id->ieee[1] = 0x54; - id->ieee[2] = 0x52; - } - - id->mdts = n->params.mdts; - id->ver = cpu_to_le32(NVME_SPEC_VER); - id->oacs = cpu_to_le16(NVME_OACS_NS_MGMT | NVME_OACS_FORMAT); - id->cntrltype = 0x1; - - /* - * Because the controller always completes the Abort command immediately, - * there can never be more than one concurrently executing Abort command, - * so this value is never used for anything. Note that there can easily be - * many Abort commands in the queues, but they are not considered - * "executing" until processed by nvme_abort. - * - * The specification recommends a value of 3 for Abort Command Limit (four - * concurrently outstanding Abort commands), so lets use that though it is - * inconsequential. - */ - id->acl = 3; - id->aerl = n->params.aerl; - id->frmw = (NVME_NUM_FW_SLOTS << 1) | NVME_FRMW_SLOT1_RO; - id->lpa = NVME_LPA_NS_SMART | NVME_LPA_CSE | NVME_LPA_EXTENDED; - - /* recommended default value (~70 C) */ - id->wctemp = cpu_to_le16(NVME_TEMPERATURE_WARNING); - id->cctemp = cpu_to_le16(NVME_TEMPERATURE_CRITICAL); - - id->sqes = (0x6 << 4) | 0x6; - id->cqes = (0x4 << 4) | 0x4; - id->nn = cpu_to_le32(NVME_MAX_NAMESPACES); - id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROES | NVME_ONCS_TIMESTAMP | - NVME_ONCS_FEATURES | NVME_ONCS_DSM | - NVME_ONCS_COMPARE | NVME_ONCS_COPY); - - /* - * NOTE: If this device ever supports a command set that does NOT use 0x0 - * as a Flush-equivalent operation, support for the broadcast NSID in Flush - * should probably be removed. - * - * See comment in nvme_io_cmd. - */ - id->vwc = NVME_VWC_NSID_BROADCAST_SUPPORT | NVME_VWC_PRESENT; - - id->ocfs = cpu_to_le16(NVME_OCFS_COPY_FORMAT_0); - id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN | - NVME_CTRL_SGLS_BITBUCKET); - - nvme_init_subnqn(n); - - id->psd[0].mp = cpu_to_le16(0x9c4); - id->psd[0].enlat = cpu_to_le32(0x10); - id->psd[0].exlat = cpu_to_le32(0x4); - - if (n->subsys) { - id->cmic |= NVME_CMIC_MULTI_CTRL; - } - - NVME_CAP_SET_MQES(n->bar.cap, 0x7ff); - NVME_CAP_SET_CQR(n->bar.cap, 1); - NVME_CAP_SET_TO(n->bar.cap, 0xf); - NVME_CAP_SET_CSS(n->bar.cap, NVME_CAP_CSS_NVM); - NVME_CAP_SET_CSS(n->bar.cap, NVME_CAP_CSS_CSI_SUPP); - NVME_CAP_SET_CSS(n->bar.cap, NVME_CAP_CSS_ADMIN_ONLY); - NVME_CAP_SET_MPSMAX(n->bar.cap, 4); - NVME_CAP_SET_CMBS(n->bar.cap, n->params.cmb_size_mb ? 1 : 0); - NVME_CAP_SET_PMRS(n->bar.cap, n->pmr.dev ? 1 : 0); - - n->bar.vs = NVME_SPEC_VER; - n->bar.intmc = n->bar.intms = 0; -} - -static int nvme_init_subsys(NvmeCtrl *n, Error **errp) -{ - int cntlid; - - if (!n->subsys) { - return 0; - } - - cntlid = nvme_subsys_register_ctrl(n, errp); - if (cntlid < 0) { - return -1; - } - - n->cntlid = cntlid; - - return 0; -} - -void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns) -{ - uint32_t nsid = ns->params.nsid; - assert(nsid && nsid <= NVME_MAX_NAMESPACES); - - n->namespaces[nsid] = ns; - ns->attached++; - - n->dmrsl = MIN_NON_ZERO(n->dmrsl, - BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1)); -} - -static void nvme_realize(PCIDevice *pci_dev, Error **errp) -{ - NvmeCtrl *n = NVME(pci_dev); - NvmeNamespace *ns; - Error *local_err = NULL; - - nvme_check_constraints(n, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - - qbus_create_inplace(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS, - &pci_dev->qdev, n->parent_obj.qdev.id); - - nvme_init_state(n); - if (nvme_init_pci(n, pci_dev, errp)) { - return; - } - - if (nvme_init_subsys(n, errp)) { - error_propagate(errp, local_err); - return; - } - nvme_init_ctrl(n, pci_dev); - - /* setup a namespace if the controller drive property was given */ - if (n->namespace.blkconf.blk) { - ns = &n->namespace; - ns->params.nsid = 1; - - if (nvme_ns_setup(n, ns, errp)) { - return; - } - - nvme_attach_ns(n, ns); - } -} - -static void nvme_exit(PCIDevice *pci_dev) -{ - NvmeCtrl *n = NVME(pci_dev); - NvmeNamespace *ns; - int i; - - nvme_ctrl_reset(n); - - for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { - ns = nvme_ns(n, i); - if (!ns) { - continue; - } - - nvme_ns_cleanup(ns); - } - - g_free(n->cq); - g_free(n->sq); - g_free(n->aer_reqs); - - if (n->params.cmb_size_mb) { - g_free(n->cmb.buf); - } - - if (n->pmr.dev) { - host_memory_backend_set_mapped(n->pmr.dev, false); - } - msix_uninit(pci_dev, &n->bar0, &n->bar0); - memory_region_del_subregion(&n->bar0, &n->iomem); -} - -static Property nvme_props[] = { - DEFINE_BLOCK_PROPERTIES(NvmeCtrl, namespace.blkconf), - DEFINE_PROP_LINK("pmrdev", NvmeCtrl, pmr.dev, TYPE_MEMORY_BACKEND, - HostMemoryBackend *), - DEFINE_PROP_LINK("subsys", NvmeCtrl, subsys, TYPE_NVME_SUBSYS, - NvmeSubsystem *), - DEFINE_PROP_STRING("serial", NvmeCtrl, params.serial), - DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl, params.cmb_size_mb, 0), - DEFINE_PROP_UINT32("num_queues", NvmeCtrl, params.num_queues, 0), - DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl, params.max_ioqpairs, 64), - DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl, params.msix_qsize, 65), - DEFINE_PROP_UINT8("aerl", NvmeCtrl, params.aerl, 3), - DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl, params.aer_max_queued, 64), - DEFINE_PROP_UINT8("mdts", NvmeCtrl, params.mdts, 7), - DEFINE_PROP_UINT8("vsl", NvmeCtrl, params.vsl, 7), - DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl, params.use_intel_id, false), - DEFINE_PROP_BOOL("legacy-cmb", NvmeCtrl, params.legacy_cmb, false), - DEFINE_PROP_UINT8("zoned.zasl", NvmeCtrl, params.zasl, 0), - DEFINE_PROP_END_OF_LIST(), -}; - -static void nvme_get_smart_warning(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) -{ - NvmeCtrl *n = NVME(obj); - uint8_t value = n->smart_critical_warning; - - visit_type_uint8(v, name, &value, errp); -} - -static void nvme_set_smart_warning(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) -{ - NvmeCtrl *n = NVME(obj); - uint8_t value, old_value, cap = 0, index, event; - - if (!visit_type_uint8(v, name, &value, errp)) { - return; - } - - cap = NVME_SMART_SPARE | NVME_SMART_TEMPERATURE | NVME_SMART_RELIABILITY - | NVME_SMART_MEDIA_READ_ONLY | NVME_SMART_FAILED_VOLATILE_MEDIA; - if (NVME_CAP_PMRS(n->bar.cap)) { - cap |= NVME_SMART_PMR_UNRELIABLE; - } - - if ((value & cap) != value) { - error_setg(errp, "unsupported smart critical warning bits: 0x%x", - value & ~cap); - return; - } - - old_value = n->smart_critical_warning; - n->smart_critical_warning = value; - - /* only inject new bits of smart critical warning */ - for (index = 0; index < NVME_SMART_WARN_MAX; index++) { - event = 1 << index; - if (value & ~old_value & event) - nvme_smart_event(n, event); - } -} - -static const VMStateDescription nvme_vmstate = { - .name = "nvme", - .unmigratable = 1, -}; - -static void nvme_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); - - pc->realize = nvme_realize; - pc->exit = nvme_exit; - pc->class_id = PCI_CLASS_STORAGE_EXPRESS; - pc->revision = 2; - - set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); - dc->desc = "Non-Volatile Memory Express"; - device_class_set_props(dc, nvme_props); - dc->vmsd = &nvme_vmstate; -} - -static void nvme_instance_init(Object *obj) -{ - NvmeCtrl *n = NVME(obj); - - device_add_bootindex_property(obj, &n->namespace.blkconf.bootindex, - "bootindex", "/namespace@1,0", - DEVICE(obj)); - - object_property_add(obj, "smart_critical_warning", "uint8", - nvme_get_smart_warning, - nvme_set_smart_warning, NULL, NULL); -} - -static const TypeInfo nvme_info = { - .name = TYPE_NVME, - .parent = TYPE_PCI_DEVICE, - .instance_size = sizeof(NvmeCtrl), - .instance_init = nvme_instance_init, - .class_init = nvme_class_init, - .interfaces = (InterfaceInfo[]) { - { INTERFACE_PCIE_DEVICE }, - { } - }, -}; - -static const TypeInfo nvme_bus_info = { - .name = TYPE_NVME_BUS, - .parent = TYPE_BUS, - .instance_size = sizeof(NvmeBus), -}; - -static void nvme_register_types(void) -{ - type_register_static(&nvme_info); - type_register_static(&nvme_bus_info); -} - -type_init(nvme_register_types) diff --git a/hw/block/nvme.h b/hw/block/nvme.h deleted file mode 100644 index fb028d81d1..0000000000 --- a/hw/block/nvme.h +++ /dev/null @@ -1,547 +0,0 @@ -/* - * QEMU NVM Express - * - * Copyright (c) 2012 Intel Corporation - * Copyright (c) 2021 Minwoo Im - * Copyright (c) 2021 Samsung Electronics Co., Ltd. - * - * Authors: - * Keith Busch <kbusch@kernel.org> - * Klaus Jensen <k.jensen@samsung.com> - * Gollu Appalanaidu <anaidu.gollu@samsung.com> - * Dmitry Fomichev <dmitry.fomichev@wdc.com> - * Minwoo Im <minwoo.im.dev@gmail.com> - * - * This code is licensed under the GNU GPL v2 or later. - */ - -#ifndef HW_NVME_H -#define HW_NVME_H - -#include "qemu/uuid.h" -#include "hw/pci/pci.h" -#include "hw/block/block.h" - -#include "block/nvme.h" - -#define NVME_MAX_CONTROLLERS 32 -#define NVME_MAX_NAMESPACES 256 - -typedef struct NvmeCtrl NvmeCtrl; -typedef struct NvmeNamespace NvmeNamespace; - -#define TYPE_NVME_SUBSYS "nvme-subsys" -#define NVME_SUBSYS(obj) \ - OBJECT_CHECK(NvmeSubsystem, (obj), TYPE_NVME_SUBSYS) - -typedef struct NvmeSubsystem { - DeviceState parent_obj; - uint8_t subnqn[256]; - - NvmeCtrl *ctrls[NVME_MAX_CONTROLLERS]; - NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1]; - - struct { - char *nqn; - } params; -} NvmeSubsystem; - -int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp); - -static inline NvmeCtrl *nvme_subsys_ctrl(NvmeSubsystem *subsys, - uint32_t cntlid) -{ - if (!subsys || cntlid >= NVME_MAX_CONTROLLERS) { - return NULL; - } - - return subsys->ctrls[cntlid]; -} - -static inline NvmeNamespace *nvme_subsys_ns(NvmeSubsystem *subsys, - uint32_t nsid) -{ - if (!subsys || !nsid || nsid > NVME_MAX_NAMESPACES) { - return NULL; - } - - return subsys->namespaces[nsid]; -} - -#define TYPE_NVME_NS "nvme-ns" -#define NVME_NS(obj) \ - OBJECT_CHECK(NvmeNamespace, (obj), TYPE_NVME_NS) - -typedef struct NvmeZone { - NvmeZoneDescr d; - uint64_t w_ptr; - QTAILQ_ENTRY(NvmeZone) entry; -} NvmeZone; - -typedef struct NvmeNamespaceParams { - bool detached; - bool shared; - uint32_t nsid; - QemuUUID uuid; - - uint16_t ms; - uint8_t mset; - uint8_t pi; - uint8_t pil; - - uint16_t mssrl; - uint32_t mcl; - uint8_t msrc; - - bool zoned; - bool cross_zone_read; - uint64_t zone_size_bs; - uint64_t zone_cap_bs; - uint32_t max_active_zones; - uint32_t max_open_zones; - uint32_t zd_extension_size; -} NvmeNamespaceParams; - -typedef struct NvmeNamespace { - DeviceState parent_obj; - BlockConf blkconf; - int32_t bootindex; - int64_t size; - int64_t moff; - NvmeIdNs id_ns; - NvmeLBAF lbaf; - size_t lbasz; - const uint32_t *iocs; - uint8_t csi; - uint16_t status; - int attached; - - QTAILQ_ENTRY(NvmeNamespace) entry; - - NvmeIdNsZoned *id_ns_zoned; - NvmeZone *zone_array; - QTAILQ_HEAD(, NvmeZone) exp_open_zones; - QTAILQ_HEAD(, NvmeZone) imp_open_zones; - QTAILQ_HEAD(, NvmeZone) closed_zones; - QTAILQ_HEAD(, NvmeZone) full_zones; - uint32_t num_zones; - uint64_t zone_size; - uint64_t zone_capacity; - uint32_t zone_size_log2; - uint8_t *zd_extensions; - int32_t nr_open_zones; - int32_t nr_active_zones; - - NvmeNamespaceParams params; - - struct { - uint32_t err_rec; - } features; -} NvmeNamespace; - -static inline uint32_t nvme_nsid(NvmeNamespace *ns) -{ - if (ns) { - return ns->params.nsid; - } - - return 0; -} - -static inline size_t nvme_l2b(NvmeNamespace *ns, uint64_t lba) -{ - return lba << ns->lbaf.ds; -} - -static inline size_t nvme_m2b(NvmeNamespace *ns, uint64_t lba) -{ - return ns->lbaf.ms * lba; -} - -static inline int64_t nvme_moff(NvmeNamespace *ns, uint64_t lba) -{ - return ns->moff + nvme_m2b(ns, lba); -} - -static inline bool nvme_ns_ext(NvmeNamespace *ns) -{ - return !!NVME_ID_NS_FLBAS_EXTENDED(ns->id_ns.flbas); -} - -static inline NvmeZoneState nvme_get_zone_state(NvmeZone *zone) -{ - return zone->d.zs >> 4; -} - -static inline void nvme_set_zone_state(NvmeZone *zone, NvmeZoneState state) -{ - zone->d.zs = state << 4; -} - -static inline uint64_t nvme_zone_rd_boundary(NvmeNamespace *ns, NvmeZone *zone) -{ - return zone->d.zslba + ns->zone_size; -} - -static inline uint64_t nvme_zone_wr_boundary(NvmeZone *zone) -{ - return zone->d.zslba + zone->d.zcap; -} - -static inline bool nvme_wp_is_valid(NvmeZone *zone) -{ - uint8_t st = nvme_get_zone_state(zone); - - return st != NVME_ZONE_STATE_FULL && - st != NVME_ZONE_STATE_READ_ONLY && - st != NVME_ZONE_STATE_OFFLINE; -} - -static inline uint8_t *nvme_get_zd_extension(NvmeNamespace *ns, - uint32_t zone_idx) -{ - return &ns->zd_extensions[zone_idx * ns->params.zd_extension_size]; -} - -static inline void nvme_aor_inc_open(NvmeNamespace *ns) -{ - assert(ns->nr_open_zones >= 0); - if (ns->params.max_open_zones) { - ns->nr_open_zones++; - assert(ns->nr_open_zones <= ns->params.max_open_zones); - } -} - -static inline void nvme_aor_dec_open(NvmeNamespace *ns) -{ - if (ns->params.max_open_zones) { - assert(ns->nr_open_zones > 0); - ns->nr_open_zones--; - } - assert(ns->nr_open_zones >= 0); -} - -static inline void nvme_aor_inc_active(NvmeNamespace *ns) -{ - assert(ns->nr_active_zones >= 0); - if (ns->params.max_active_zones) { - ns->nr_active_zones++; - assert(ns->nr_active_zones <= ns->params.max_active_zones); - } -} - -static inline void nvme_aor_dec_active(NvmeNamespace *ns) -{ - if (ns->params.max_active_zones) { - assert(ns->nr_active_zones > 0); - ns->nr_active_zones--; - assert(ns->nr_active_zones >= ns->nr_open_zones); - } - assert(ns->nr_active_zones >= 0); -} - -void nvme_ns_init_format(NvmeNamespace *ns); -int nvme_ns_setup(NvmeCtrl *n, NvmeNamespace *ns, Error **errp); -void nvme_ns_drain(NvmeNamespace *ns); -void nvme_ns_shutdown(NvmeNamespace *ns); -void nvme_ns_cleanup(NvmeNamespace *ns); - -typedef struct NvmeAsyncEvent { - QTAILQ_ENTRY(NvmeAsyncEvent) entry; - NvmeAerResult result; -} NvmeAsyncEvent; - -enum { - NVME_SG_ALLOC = 1 << 0, - NVME_SG_DMA = 1 << 1, -}; - -typedef struct NvmeSg { - int flags; - - union { - QEMUSGList qsg; - QEMUIOVector iov; - }; -} NvmeSg; - -typedef enum NvmeTxDirection { - NVME_TX_DIRECTION_TO_DEVICE = 0, - NVME_TX_DIRECTION_FROM_DEVICE = 1, -} NvmeTxDirection; - -typedef struct NvmeRequest { - struct NvmeSQueue *sq; - struct NvmeNamespace *ns; - BlockAIOCB *aiocb; - uint16_t status; - void *opaque; - NvmeCqe cqe; - NvmeCmd cmd; - BlockAcctCookie acct; - NvmeSg sg; - QTAILQ_ENTRY(NvmeRequest)entry; -} NvmeRequest; - -typedef struct NvmeBounceContext { - NvmeRequest *req; - - struct { - QEMUIOVector iov; - uint8_t *bounce; - } data, mdata; -} NvmeBounceContext; - -static inline const char *nvme_adm_opc_str(uint8_t opc) -{ - switch (opc) { - case NVME_ADM_CMD_DELETE_SQ: return "NVME_ADM_CMD_DELETE_SQ"; - case NVME_ADM_CMD_CREATE_SQ: return "NVME_ADM_CMD_CREATE_SQ"; - case NVME_ADM_CMD_GET_LOG_PAGE: return "NVME_ADM_CMD_GET_LOG_PAGE"; - case NVME_ADM_CMD_DELETE_CQ: return "NVME_ADM_CMD_DELETE_CQ"; - case NVME_ADM_CMD_CREATE_CQ: return "NVME_ADM_CMD_CREATE_CQ"; - case NVME_ADM_CMD_IDENTIFY: return "NVME_ADM_CMD_IDENTIFY"; - case NVME_ADM_CMD_ABORT: return "NVME_ADM_CMD_ABORT"; - case NVME_ADM_CMD_SET_FEATURES: return "NVME_ADM_CMD_SET_FEATURES"; - case NVME_ADM_CMD_GET_FEATURES: return "NVME_ADM_CMD_GET_FEATURES"; - case NVME_ADM_CMD_ASYNC_EV_REQ: return "NVME_ADM_CMD_ASYNC_EV_REQ"; - case NVME_ADM_CMD_NS_ATTACHMENT: return "NVME_ADM_CMD_NS_ATTACHMENT"; - case NVME_ADM_CMD_FORMAT_NVM: return "NVME_ADM_CMD_FORMAT_NVM"; - default: return "NVME_ADM_CMD_UNKNOWN"; - } -} - -static inline const char *nvme_io_opc_str(uint8_t opc) -{ - switch (opc) { - case NVME_CMD_FLUSH: return "NVME_NVM_CMD_FLUSH"; - case NVME_CMD_WRITE: return "NVME_NVM_CMD_WRITE"; - case NVME_CMD_READ: return "NVME_NVM_CMD_READ"; - case NVME_CMD_COMPARE: return "NVME_NVM_CMD_COMPARE"; - case NVME_CMD_WRITE_ZEROES: return "NVME_NVM_CMD_WRITE_ZEROES"; - case NVME_CMD_DSM: return "NVME_NVM_CMD_DSM"; - case NVME_CMD_VERIFY: return "NVME_NVM_CMD_VERIFY"; - case NVME_CMD_COPY: return "NVME_NVM_CMD_COPY"; - case NVME_CMD_ZONE_MGMT_SEND: return "NVME_ZONED_CMD_MGMT_SEND"; - case NVME_CMD_ZONE_MGMT_RECV: return "NVME_ZONED_CMD_MGMT_RECV"; - case NVME_CMD_ZONE_APPEND: return "NVME_ZONED_CMD_ZONE_APPEND"; - default: return "NVME_NVM_CMD_UNKNOWN"; - } -} - -typedef struct NvmeSQueue { - struct NvmeCtrl *ctrl; - uint16_t sqid; - uint16_t cqid; - uint32_t head; - uint32_t tail; - uint32_t size; - uint64_t dma_addr; - QEMUTimer *timer; - NvmeRequest *io_req; - QTAILQ_HEAD(, NvmeRequest) req_list; - QTAILQ_HEAD(, NvmeRequest) out_req_list; - QTAILQ_ENTRY(NvmeSQueue) entry; -} NvmeSQueue; - -typedef struct NvmeCQueue { - struct NvmeCtrl *ctrl; - uint8_t phase; - uint16_t cqid; - uint16_t irq_enabled; - uint32_t head; - uint32_t tail; - uint32_t vector; - uint32_t size; - uint64_t dma_addr; - QEMUTimer *timer; - QTAILQ_HEAD(, NvmeSQueue) sq_list; - QTAILQ_HEAD(, NvmeRequest) req_list; -} NvmeCQueue; - -#define TYPE_NVME_BUS "nvme-bus" -#define NVME_BUS(obj) OBJECT_CHECK(NvmeBus, (obj), TYPE_NVME_BUS) - -typedef struct NvmeBus { - BusState parent_bus; -} NvmeBus; - -#define TYPE_NVME "nvme" -#define NVME(obj) \ - OBJECT_CHECK(NvmeCtrl, (obj), TYPE_NVME) - -typedef struct NvmeParams { - char *serial; - uint32_t num_queues; /* deprecated since 5.1 */ - uint32_t max_ioqpairs; - uint16_t msix_qsize; - uint32_t cmb_size_mb; - uint8_t aerl; - uint32_t aer_max_queued; - uint8_t mdts; - uint8_t vsl; - bool use_intel_id; - uint8_t zasl; - bool legacy_cmb; -} NvmeParams; - -typedef struct NvmeCtrl { - PCIDevice parent_obj; - MemoryRegion bar0; - MemoryRegion iomem; - NvmeBar bar; - NvmeParams params; - NvmeBus bus; - - uint16_t cntlid; - bool qs_created; - uint32_t page_size; - uint16_t page_bits; - uint16_t max_prp_ents; - uint16_t cqe_size; - uint16_t sqe_size; - uint32_t reg_size; - uint32_t max_q_ents; - uint8_t outstanding_aers; - uint32_t irq_status; - uint64_t host_timestamp; /* Timestamp sent by the host */ - uint64_t timestamp_set_qemu_clock_ms; /* QEMU clock time */ - uint64_t starttime_ms; - uint16_t temperature; - uint8_t smart_critical_warning; - - struct { - MemoryRegion mem; - uint8_t *buf; - bool cmse; - hwaddr cba; - } cmb; - - struct { - HostMemoryBackend *dev; - bool cmse; - hwaddr cba; - } pmr; - - uint8_t aer_mask; - NvmeRequest **aer_reqs; - QTAILQ_HEAD(, NvmeAsyncEvent) aer_queue; - int aer_queued; - - uint32_t dmrsl; - - /* Namespace ID is started with 1 so bitmap should be 1-based */ -#define NVME_CHANGED_NSID_SIZE (NVME_MAX_NAMESPACES + 1) - DECLARE_BITMAP(changed_nsids, NVME_CHANGED_NSID_SIZE); - - NvmeSubsystem *subsys; - - NvmeNamespace namespace; - NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1]; - NvmeSQueue **sq; - NvmeCQueue **cq; - NvmeSQueue admin_sq; - NvmeCQueue admin_cq; - NvmeIdCtrl id_ctrl; - - struct { - struct { - uint16_t temp_thresh_hi; - uint16_t temp_thresh_low; - }; - uint32_t async_config; - } features; -} NvmeCtrl; - -static inline NvmeNamespace *nvme_ns(NvmeCtrl *n, uint32_t nsid) -{ - if (!nsid || nsid > NVME_MAX_NAMESPACES) { - return NULL; - } - - return n->namespaces[nsid]; -} - -static inline NvmeCQueue *nvme_cq(NvmeRequest *req) -{ - NvmeSQueue *sq = req->sq; - NvmeCtrl *n = sq->ctrl; - - return n->cq[sq->cqid]; -} - -static inline NvmeCtrl *nvme_ctrl(NvmeRequest *req) -{ - NvmeSQueue *sq = req->sq; - return sq->ctrl; -} - -static inline uint16_t nvme_cid(NvmeRequest *req) -{ - if (!req) { - return 0xffff; - } - - return le16_to_cpu(req->cqe.cid); -} - -void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns); -uint16_t nvme_bounce_data(NvmeCtrl *n, uint8_t *ptr, uint32_t len, - NvmeTxDirection dir, NvmeRequest *req); -uint16_t nvme_bounce_mdata(NvmeCtrl *n, uint8_t *ptr, uint32_t len, - NvmeTxDirection dir, NvmeRequest *req); -void nvme_rw_complete_cb(void *opaque, int ret); -uint16_t nvme_map_dptr(NvmeCtrl *n, NvmeSg *sg, size_t len, - NvmeCmd *cmd); - -/* from Linux kernel (crypto/crct10dif_common.c) */ -static const uint16_t t10_dif_crc_table[256] = { - 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B, - 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6, - 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6, - 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B, - 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1, - 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C, - 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C, - 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781, - 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8, - 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255, - 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925, - 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698, - 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472, - 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF, - 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF, - 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02, - 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA, - 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067, - 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17, - 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA, - 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640, - 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD, - 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D, - 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30, - 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759, - 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4, - 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394, - 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29, - 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3, - 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E, - 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E, - 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3 -}; - -uint16_t nvme_check_prinfo(NvmeNamespace *ns, uint16_t ctrl, uint64_t slba, - uint32_t reftag); -uint16_t nvme_dif_mangle_mdata(NvmeNamespace *ns, uint8_t *mbuf, size_t mlen, - uint64_t slba); -void nvme_dif_pract_generate_dif(NvmeNamespace *ns, uint8_t *buf, size_t len, - uint8_t *mbuf, size_t mlen, uint16_t apptag, - uint32_t reftag); -uint16_t nvme_dif_check(NvmeNamespace *ns, uint8_t *buf, size_t len, - uint8_t *mbuf, size_t mlen, uint16_t ctrl, - uint64_t slba, uint16_t apptag, - uint16_t appmask, uint32_t reftag); -uint16_t nvme_dif_rw(NvmeCtrl *n, NvmeRequest *req); - - -#endif /* HW_NVME_H */ diff --git a/hw/block/trace-events b/hw/block/trace-events index fa12e3a67a..646917d045 100644 --- a/hw/block/trace-events +++ b/hw/block/trace-events @@ -49,212 +49,6 @@ virtio_blk_submit_multireq(void *vdev, void *mrb, int start, int num_reqs, uint6 hd_geometry_lchs_guess(void *blk, int cyls, int heads, int secs) "blk %p LCHS %d %d %d" hd_geometry_guess(void *blk, uint32_t cyls, uint32_t heads, uint32_t secs, int trans) "blk %p CHS %u %u %u trans %d" -# nvme.c -# nvme traces for successful events -pci_nvme_irq_msix(uint32_t vector) "raising MSI-X IRQ vector %u" -pci_nvme_irq_pin(void) "pulsing IRQ pin" -pci_nvme_irq_masked(void) "IRQ is masked" -pci_nvme_dma_read(uint64_t prp1, uint64_t prp2) "DMA read, prp1=0x%"PRIx64" prp2=0x%"PRIx64"" -pci_nvme_map_addr(uint64_t addr, uint64_t len) "addr 0x%"PRIx64" len %"PRIu64"" -pci_nvme_map_addr_cmb(uint64_t addr, uint64_t len) "addr 0x%"PRIx64" len %"PRIu64"" -pci_nvme_map_prp(uint64_t trans_len, uint32_t len, uint64_t prp1, uint64_t prp2, int num_prps) "trans_len %"PRIu64" len %"PRIu32" prp1 0x%"PRIx64" prp2 0x%"PRIx64" num_prps %d" -pci_nvme_map_sgl(uint8_t typ, uint64_t len) "type 0x%"PRIx8" len %"PRIu64"" -pci_nvme_io_cmd(uint16_t cid, uint32_t nsid, uint16_t sqid, uint8_t opcode, const char *opname) "cid %"PRIu16" nsid %"PRIu32" sqid %"PRIu16" opc 0x%"PRIx8" opname '%s'" -pci_nvme_admin_cmd(uint16_t cid, uint16_t sqid, uint8_t opcode, const char *opname) "cid %"PRIu16" sqid %"PRIu16" opc 0x%"PRIx8" opname '%s'" -pci_nvme_flush(uint16_t cid, uint32_t nsid) "cid %"PRIu16" nsid %"PRIu32"" -pci_nvme_format(uint16_t cid, uint32_t nsid, uint8_t lbaf, uint8_t mset, uint8_t pi, uint8_t pil) "cid %"PRIu16" nsid %"PRIu32" lbaf %"PRIu8" mset %"PRIu8" pi %"PRIu8" pil %"PRIu8"" -pci_nvme_format_ns(uint16_t cid, uint32_t nsid, uint8_t lbaf, uint8_t mset, uint8_t pi, uint8_t pil) "cid %"PRIu16" nsid %"PRIu32" lbaf %"PRIu8" mset %"PRIu8" pi %"PRIu8" pil %"PRIu8"" -pci_nvme_format_cb(uint16_t cid, uint32_t nsid) "cid %"PRIu16" nsid %"PRIu32"" -pci_nvme_read(uint16_t cid, uint32_t nsid, uint32_t nlb, uint64_t count, uint64_t lba) "cid %"PRIu16" nsid %"PRIu32" nlb %"PRIu32" count %"PRIu64" lba 0x%"PRIx64"" -pci_nvme_write(uint16_t cid, const char *verb, uint32_t nsid, uint32_t nlb, uint64_t count, uint64_t lba) "cid %"PRIu16" opname '%s' nsid %"PRIu32" nlb %"PRIu32" count %"PRIu64" lba 0x%"PRIx64"" -pci_nvme_rw_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'" -pci_nvme_misc_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'" -pci_nvme_dif_rw(uint8_t pract, uint8_t prinfo) "pract 0x%"PRIx8" prinfo 0x%"PRIx8"" -pci_nvme_dif_rw_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'" -pci_nvme_dif_rw_mdata_in_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'" -pci_nvme_dif_rw_mdata_out_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'" -pci_nvme_dif_rw_check_cb(uint16_t cid, uint8_t prinfo, uint16_t apptag, uint16_t appmask, uint32_t reftag) "cid %"PRIu16" prinfo 0x%"PRIx8" apptag 0x%"PRIx16" appmask 0x%"PRIx16" reftag 0x%"PRIx32"" -pci_nvme_dif_pract_generate_dif(size_t len, size_t lba_size, size_t chksum_len, uint16_t apptag, uint32_t reftag) "len %zu lba_size %zu chksum_len %zu apptag 0x%"PRIx16" reftag 0x%"PRIx32"" -pci_nvme_dif_check(uint8_t prinfo, uint16_t chksum_len) "prinfo 0x%"PRIx8" chksum_len %"PRIu16"" -pci_nvme_dif_prchk_disabled(uint16_t apptag, uint32_t reftag) "apptag 0x%"PRIx16" reftag 0x%"PRIx32"" -pci_nvme_dif_prchk_guard(uint16_t guard, uint16_t crc) "guard 0x%"PRIx16" crc 0x%"PRIx16"" -pci_nvme_dif_prchk_apptag(uint16_t apptag, uint16_t elbat, uint16_t elbatm) "apptag 0x%"PRIx16" elbat 0x%"PRIx16" elbatm 0x%"PRIx16"" -pci_nvme_dif_prchk_reftag(uint32_t reftag, uint32_t elbrt) "reftag 0x%"PRIx32" elbrt 0x%"PRIx32"" -pci_nvme_copy(uint16_t cid, uint32_t nsid, uint16_t nr, uint8_t format) "cid %"PRIu16" nsid %"PRIu32" nr %"PRIu16" format 0x%"PRIx8"" -pci_nvme_copy_source_range(uint64_t slba, uint32_t nlb) "slba 0x%"PRIx64" nlb %"PRIu32"" -pci_nvme_copy_in_complete(uint16_t cid) "cid %"PRIu16"" -pci_nvme_copy_cb(uint16_t cid) "cid %"PRIu16"" -pci_nvme_verify(uint16_t cid, uint32_t nsid, uint64_t slba, uint32_t nlb) "cid %"PRIu16" nsid %"PRIu32" slba 0x%"PRIx64" nlb %"PRIu32"" -pci_nvme_verify_mdata_in_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'" -pci_nvme_verify_cb(uint16_t cid, uint8_t prinfo, uint16_t apptag, uint16_t appmask, uint32_t reftag) "cid %"PRIu16" prinfo 0x%"PRIx8" apptag 0x%"PRIx16" appmask 0x%"PRIx16" reftag 0x%"PRIx32"" -pci_nvme_rw_complete_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'" -pci_nvme_block_status(int64_t offset, int64_t bytes, int64_t pnum, int ret, bool zeroed) "offset %"PRId64" bytes %"PRId64" pnum %"PRId64" ret 0x%x zeroed %d" -pci_nvme_dsm(uint16_t cid, uint32_t nsid, uint32_t nr, uint32_t attr) "cid %"PRIu16" nsid %"PRIu32" nr %"PRIu32" attr 0x%"PRIx32"" -pci_nvme_dsm_deallocate(uint16_t cid, uint32_t nsid, uint64_t slba, uint32_t nlb) "cid %"PRIu16" nsid %"PRIu32" slba %"PRIu64" nlb %"PRIu32"" -pci_nvme_dsm_single_range_limit_exceeded(uint32_t nlb, uint32_t dmrsl) "nlb %"PRIu32" dmrsl %"PRIu32"" -pci_nvme_compare(uint16_t cid, uint32_t nsid, uint64_t slba, uint32_t nlb) "cid %"PRIu16" nsid %"PRIu32" slba 0x%"PRIx64" nlb %"PRIu32"" -pci_nvme_compare_data_cb(uint16_t cid) "cid %"PRIu16"" -pci_nvme_compare_mdata_cb(uint16_t cid) "cid %"PRIu16"" -pci_nvme_aio_discard_cb(uint16_t cid) "cid %"PRIu16"" -pci_nvme_aio_copy_in_cb(uint16_t cid) "cid %"PRIu16"" -pci_nvme_aio_zone_reset_cb(uint16_t cid, uint64_t zslba) "cid %"PRIu16" zslba 0x%"PRIx64"" -pci_nvme_aio_flush_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'" -pci_nvme_create_sq(uint64_t addr, uint16_t sqid, uint16_t cqid, uint16_t qsize, uint16_t qflags) "create submission queue, addr=0x%"PRIx64", sqid=%"PRIu16", cqid=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16"" -pci_nvme_create_cq(uint64_t addr, uint16_t cqid, uint16_t vector, uint16_t size, uint16_t qflags, int ien) "create completion queue, addr=0x%"PRIx64", cqid=%"PRIu16", vector=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16", ien=%d" -pci_nvme_del_sq(uint16_t qid) "deleting submission queue sqid=%"PRIu16"" -pci_nvme_del_cq(uint16_t cqid) "deleted completion queue, cqid=%"PRIu16"" -pci_nvme_identify(uint16_t cid, uint8_t cns, uint16_t ctrlid, uint8_t csi) "cid %"PRIu16" cns 0x%"PRIx8" ctrlid %"PRIu16" csi 0x%"PRIx8"" -pci_nvme_identify_ctrl(void) "identify controller" -pci_nvme_identify_ctrl_csi(uint8_t csi) "identify controller, csi=0x%"PRIx8"" -pci_nvme_identify_ns(uint32_t ns) "nsid %"PRIu32"" -pci_nvme_identify_ns_attached_list(uint16_t cntid) "cntid=%"PRIu16"" -pci_nvme_identify_ns_csi(uint32_t ns, uint8_t csi) "nsid=%"PRIu32", csi=0x%"PRIx8"" -pci_nvme_identify_nslist(uint32_t ns) "nsid %"PRIu32"" -pci_nvme_identify_nslist_csi(uint16_t ns, uint8_t csi) "nsid=%"PRIu16", csi=0x%"PRIx8"" -pci_nvme_identify_cmd_set(void) "identify i/o command set" -pci_nvme_identify_ns_descr_list(uint32_t ns) "nsid %"PRIu32"" -pci_nvme_get_log(uint16_t cid, uint8_t lid, uint8_t lsp, uint8_t rae, uint32_t len, uint64_t off) "cid %"PRIu16" lid 0x%"PRIx8" lsp 0x%"PRIx8" rae 0x%"PRIx8" len %"PRIu32" off %"PRIu64"" -pci_nvme_getfeat(uint16_t cid, uint32_t nsid, uint8_t fid, uint8_t sel, uint32_t cdw11) "cid %"PRIu16" nsid 0x%"PRIx32" fid 0x%"PRIx8" sel 0x%"PRIx8" cdw11 0x%"PRIx32"" -pci_nvme_setfeat(uint16_t cid, uint32_t nsid, uint8_t fid, uint8_t save, uint32_t cdw11) "cid %"PRIu16" nsid 0x%"PRIx32" fid 0x%"PRIx8" save 0x%"PRIx8" cdw11 0x%"PRIx32"" -pci_nvme_getfeat_vwcache(const char* result) "get feature volatile write cache, result=%s" -pci_nvme_getfeat_numq(int result) "get feature number of queues, result=%d" -pci_nvme_setfeat_numq(int reqcq, int reqsq, int gotcq, int gotsq) "requested cq_count=%d sq_count=%d, responding with cq_count=%d sq_count=%d" -pci_nvme_setfeat_timestamp(uint64_t ts) "set feature timestamp = 0x%"PRIx64"" -pci_nvme_getfeat_timestamp(uint64_t ts) "get feature timestamp = 0x%"PRIx64"" -pci_nvme_process_aers(int queued) "queued %d" -pci_nvme_aer(uint16_t cid) "cid %"PRIu16"" -pci_nvme_aer_aerl_exceeded(void) "aerl exceeded" -pci_nvme_aer_masked(uint8_t type, uint8_t mask) "type 0x%"PRIx8" mask 0x%"PRIx8"" -pci_nvme_aer_post_cqe(uint8_t typ, uint8_t info, uint8_t log_page) "type 0x%"PRIx8" info 0x%"PRIx8" lid 0x%"PRIx8"" -pci_nvme_ns_attachment(uint16_t cid, uint8_t sel) "cid %"PRIu16", sel=0x%"PRIx8"" -pci_nvme_ns_attachment_attach(uint16_t cntlid, uint32_t nsid) "cntlid=0x%"PRIx16", nsid=0x%"PRIx32"" -pci_nvme_enqueue_event(uint8_t typ, uint8_t info, uint8_t log_page) "type 0x%"PRIx8" info 0x%"PRIx8" lid 0x%"PRIx8"" -pci_nvme_enqueue_event_noqueue(int queued) "queued %d" -pci_nvme_enqueue_event_masked(uint8_t typ) "type 0x%"PRIx8"" -pci_nvme_no_outstanding_aers(void) "ignoring event; no outstanding AERs" -pci_nvme_enqueue_req_completion(uint16_t cid, uint16_t cqid, uint16_t status) "cid %"PRIu16" cqid %"PRIu16" status 0x%"PRIx16"" -pci_nvme_mmio_read(uint64_t addr, unsigned size) "addr 0x%"PRIx64" size %d" -pci_nvme_mmio_write(uint64_t addr, uint64_t data, unsigned size) "addr 0x%"PRIx64" data 0x%"PRIx64" size %d" -pci_nvme_mmio_doorbell_cq(uint16_t cqid, uint16_t new_head) "cqid %"PRIu16" new_head %"PRIu16"" -pci_nvme_mmio_doorbell_sq(uint16_t sqid, uint16_t new_tail) "sqid %"PRIu16" new_tail %"PRIu16"" -pci_nvme_mmio_intm_set(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask set, data=0x%"PRIx64", new_mask=0x%"PRIx64"" -pci_nvme_mmio_intm_clr(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask clr, data=0x%"PRIx64", new_mask=0x%"PRIx64"" -pci_nvme_mmio_cfg(uint64_t data) "wrote MMIO, config controller config=0x%"PRIx64"" -pci_nvme_mmio_aqattr(uint64_t data) "wrote MMIO, admin queue attributes=0x%"PRIx64"" -pci_nvme_mmio_asqaddr(uint64_t data) "wrote MMIO, admin submission queue address=0x%"PRIx64"" -pci_nvme_mmio_acqaddr(uint64_t data) "wrote MMIO, admin completion queue address=0x%"PRIx64"" -pci_nvme_mmio_asqaddr_hi(uint64_t data, uint64_t new_addr) "wrote MMIO, admin submission queue high half=0x%"PRIx64", new_address=0x%"PRIx64"" -pci_nvme_mmio_acqaddr_hi(uint64_t data, uint64_t new_addr) "wrote MMIO, admin completion queue high half=0x%"PRIx64", new_address=0x%"PRIx64"" -pci_nvme_mmio_start_success(void) "setting controller enable bit succeeded" -pci_nvme_mmio_stopped(void) "cleared controller enable bit" -pci_nvme_mmio_shutdown_set(void) "shutdown bit set" -pci_nvme_mmio_shutdown_cleared(void) "shutdown bit cleared" -pci_nvme_open_zone(uint64_t slba, uint32_t zone_idx, int all) "open zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32"" -pci_nvme_close_zone(uint64_t slba, uint32_t zone_idx, int all) "close zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32"" -pci_nvme_finish_zone(uint64_t slba, uint32_t zone_idx, int all) "finish zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32"" -pci_nvme_reset_zone(uint64_t slba, uint32_t zone_idx, int all) "reset zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32"" -pci_nvme_offline_zone(uint64_t slba, uint32_t zone_idx, int all) "offline zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32"" -pci_nvme_set_descriptor_extension(uint64_t slba, uint32_t zone_idx) "set zone descriptor extension, slba=%"PRIu64", idx=%"PRIu32"" -pci_nvme_zd_extension_set(uint32_t zone_idx) "set descriptor extension for zone_idx=%"PRIu32"" -pci_nvme_clear_ns_close(uint32_t state, uint64_t slba) "zone state=%"PRIu32", slba=%"PRIu64" transitioned to Closed state" -pci_nvme_clear_ns_reset(uint32_t state, uint64_t slba) "zone state=%"PRIu32", slba=%"PRIu64" transitioned to Empty state" - -# nvme traces for error conditions -pci_nvme_err_mdts(size_t len) "len %zu" -pci_nvme_err_zasl(size_t len) "len %zu" -pci_nvme_err_req_status(uint16_t cid, uint32_t nsid, uint16_t status, uint8_t opc) "cid %"PRIu16" nsid %"PRIu32" status 0x%"PRIx16" opc 0x%"PRIx8"" -pci_nvme_err_addr_read(uint64_t addr) "addr 0x%"PRIx64"" -pci_nvme_err_addr_write(uint64_t addr) "addr 0x%"PRIx64"" -pci_nvme_err_cfs(void) "controller fatal status" -pci_nvme_err_aio(uint16_t cid, const char *errname, uint16_t status) "cid %"PRIu16" err '%s' status 0x%"PRIx16"" -pci_nvme_err_copy_invalid_format(uint8_t format) "format 0x%"PRIx8"" -pci_nvme_err_invalid_sgld(uint16_t cid, uint8_t typ) "cid %"PRIu16" type 0x%"PRIx8"" -pci_nvme_err_invalid_num_sgld(uint16_t cid, uint8_t typ) "cid %"PRIu16" type 0x%"PRIx8"" -pci_nvme_err_invalid_sgl_excess_length(uint32_t residual) "residual %"PRIu32"" -pci_nvme_err_invalid_dma(void) "PRP/SGL is too small for transfer size" -pci_nvme_err_invalid_prplist_ent(uint64_t prplist) "PRP list entry is not page aligned: 0x%"PRIx64"" -pci_nvme_err_invalid_prp2_align(uint64_t prp2) "PRP2 is not page aligned: 0x%"PRIx64"" -pci_nvme_err_invalid_opc(uint8_t opc) "invalid opcode 0x%"PRIx8"" -pci_nvme_err_invalid_admin_opc(uint8_t opc) "invalid admin opcode 0x%"PRIx8"" -pci_nvme_err_invalid_lba_range(uint64_t start, uint64_t len, uint64_t limit) "Invalid LBA start=%"PRIu64" len=%"PRIu64" limit=%"PRIu64"" -pci_nvme_err_invalid_log_page_offset(uint64_t ofs, uint64_t size) "must be <= %"PRIu64", got %"PRIu64"" -pci_nvme_err_cmb_invalid_cba(uint64_t cmbmsc) "cmbmsc 0x%"PRIx64"" -pci_nvme_err_cmb_not_enabled(uint64_t cmbmsc) "cmbmsc 0x%"PRIx64"" -pci_nvme_err_unaligned_zone_cmd(uint8_t action, uint64_t slba, uint64_t zslba) "unaligned zone op 0x%"PRIx32", got slba=%"PRIu64", zslba=%"PRIu64"" -pci_nvme_err_invalid_zone_state_transition(uint8_t action, uint64_t slba, uint8_t attrs) "action=0x%"PRIx8", slba=%"PRIu64", attrs=0x%"PRIx32"" -pci_nvme_err_write_not_at_wp(uint64_t slba, uint64_t zone, uint64_t wp) "writing at slba=%"PRIu64", zone=%"PRIu64", but wp=%"PRIu64"" -pci_nvme_err_append_not_at_start(uint64_t slba, uint64_t zone) "appending at slba=%"PRIu64", but zone=%"PRIu64"" -pci_nvme_err_zone_is_full(uint64_t zslba) "zslba 0x%"PRIx64"" -pci_nvme_err_zone_is_read_only(uint64_t zslba) "zslba 0x%"PRIx64"" -pci_nvme_err_zone_is_offline(uint64_t zslba) "zslba 0x%"PRIx64"" -pci_nvme_err_zone_boundary(uint64_t slba, uint32_t nlb, uint64_t zcap) "lba 0x%"PRIx64" nlb %"PRIu32" zcap 0x%"PRIx64"" -pci_nvme_err_zone_invalid_write(uint64_t slba, uint64_t wp) "lba 0x%"PRIx64" wp 0x%"PRIx64"" -pci_nvme_err_zone_write_not_ok(uint64_t slba, uint32_t nlb, uint16_t status) "slba=%"PRIu64", nlb=%"PRIu32", status=0x%"PRIx16"" -pci_nvme_err_zone_read_not_ok(uint64_t slba, uint32_t nlb, uint16_t status) "slba=%"PRIu64", nlb=%"PRIu32", status=0x%"PRIx16"" -pci_nvme_err_insuff_active_res(uint32_t max_active) "max_active=%"PRIu32" zone limit exceeded" -pci_nvme_err_insuff_open_res(uint32_t max_open) "max_open=%"PRIu32" zone limit exceeded" -pci_nvme_err_zd_extension_map_error(uint32_t zone_idx) "can't map descriptor extension for zone_idx=%"PRIu32"" -pci_nvme_err_invalid_iocsci(uint32_t idx) "unsupported command set combination index %"PRIu32"" -pci_nvme_err_invalid_del_sq(uint16_t qid) "invalid submission queue deletion, sid=%"PRIu16"" -pci_nvme_err_invalid_create_sq_cqid(uint16_t cqid) "failed creating submission queue, invalid cqid=%"PRIu16"" -pci_nvme_err_invalid_create_sq_sqid(uint16_t sqid) "failed creating submission queue, invalid sqid=%"PRIu16"" -pci_nvme_err_invalid_create_sq_size(uint16_t qsize) "failed creating submission queue, invalid qsize=%"PRIu16"" -pci_nvme_err_invalid_create_sq_addr(uint64_t addr) "failed creating submission queue, addr=0x%"PRIx64"" -pci_nvme_err_invalid_create_sq_qflags(uint16_t qflags) "failed creating submission queue, qflags=%"PRIu16"" -pci_nvme_err_invalid_del_cq_cqid(uint16_t cqid) "failed deleting completion queue, cqid=%"PRIu16"" -pci_nvme_err_invalid_del_cq_notempty(uint16_t cqid) "failed deleting completion queue, it is not empty, cqid=%"PRIu16"" -pci_nvme_err_invalid_create_cq_cqid(uint16_t cqid) "failed creating completion queue, cqid=%"PRIu16"" -pci_nvme_err_invalid_create_cq_size(uint16_t size) "failed creating completion queue, size=%"PRIu16"" -pci_nvme_err_invalid_create_cq_addr(uint64_t addr) "failed creating completion queue, addr=0x%"PRIx64"" -pci_nvme_err_invalid_create_cq_vector(uint16_t vector) "failed creating completion queue, vector=%"PRIu16"" -pci_nvme_err_invalid_create_cq_qflags(uint16_t qflags) "failed creating completion queue, qflags=%"PRIu16"" -pci_nvme_err_invalid_identify_cns(uint16_t cns) "identify, invalid cns=0x%"PRIx16"" -pci_nvme_err_invalid_getfeat(int dw10) "invalid get features, dw10=0x%"PRIx32"" -pci_nvme_err_invalid_setfeat(uint32_t dw10) "invalid set features, dw10=0x%"PRIx32"" -pci_nvme_err_invalid_log_page(uint16_t cid, uint16_t lid) "cid %"PRIu16" lid 0x%"PRIx16"" -pci_nvme_err_startfail_cq(void) "nvme_start_ctrl failed because there are non-admin completion queues" -pci_nvme_err_startfail_sq(void) "nvme_start_ctrl failed because there are non-admin submission queues" -pci_nvme_err_startfail_nbarasq(void) "nvme_start_ctrl failed because the admin submission queue address is null" -pci_nvme_err_startfail_nbaracq(void) "nvme_start_ctrl failed because the admin completion queue address is null" -pci_nvme_err_startfail_asq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin submission queue address is misaligned: 0x%"PRIx64"" -pci_nvme_err_startfail_acq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin completion queue address is misaligned: 0x%"PRIx64"" -pci_nvme_err_startfail_page_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the page size is too small: log2size=%u, min=%u" -pci_nvme_err_startfail_page_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the page size is too large: log2size=%u, max=%u" -pci_nvme_err_startfail_cqent_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the completion queue entry size is too small: log2size=%u, min=%u" -pci_nvme_err_startfail_cqent_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the completion queue entry size is too large: log2size=%u, max=%u" -pci_nvme_err_startfail_sqent_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the submission queue entry size is too small: log2size=%u, min=%u" -pci_nvme_err_startfail_sqent_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the submission queue entry size is too large: log2size=%u, max=%u" -pci_nvme_err_startfail_css(uint8_t css) "nvme_start_ctrl failed because invalid command set selected:%u" -pci_nvme_err_startfail_asqent_sz_zero(void) "nvme_start_ctrl failed because the admin submission queue size is zero" -pci_nvme_err_startfail_acqent_sz_zero(void) "nvme_start_ctrl failed because the admin completion queue size is zero" -pci_nvme_err_startfail_zasl_too_small(uint32_t zasl, uint32_t pagesz) "nvme_start_ctrl failed because zone append size limit %"PRIu32" is too small, needs to be >= %"PRIu32"" -pci_nvme_err_startfail(void) "setting controller enable bit failed" -pci_nvme_err_invalid_mgmt_action(uint8_t action) "action=0x%"PRIx8"" - -# Traces for undefined behavior -pci_nvme_ub_mmiowr_misaligned32(uint64_t offset) "MMIO write not 32-bit aligned, offset=0x%"PRIx64"" -pci_nvme_ub_mmiowr_toosmall(uint64_t offset, unsigned size) "MMIO write smaller than 32 bits, offset=0x%"PRIx64", size=%u" -pci_nvme_ub_mmiowr_intmask_with_msix(void) "undefined access to interrupt mask set when MSI-X is enabled" -pci_nvme_ub_mmiowr_ro_csts(void) "attempted to set a read only bit of controller status" -pci_nvme_ub_mmiowr_ssreset_w1c_unsupported(void) "attempted to W1C CSTS.NSSRO but CAP.NSSRS is zero (not supported)" -pci_nvme_ub_mmiowr_ssreset_unsupported(void) "attempted NVM subsystem reset but CAP.NSSRS is zero (not supported)" -pci_nvme_ub_mmiowr_cmbloc_reserved(void) "invalid write to reserved CMBLOC when CMBSZ is zero, ignored" -pci_nvme_ub_mmiowr_cmbsz_readonly(void) "invalid write to read only CMBSZ, ignored" -pci_nvme_ub_mmiowr_pmrcap_readonly(void) "invalid write to read only PMRCAP, ignored" -pci_nvme_ub_mmiowr_pmrsts_readonly(void) "invalid write to read only PMRSTS, ignored" -pci_nvme_ub_mmiowr_pmrebs_readonly(void) "invalid write to read only PMREBS, ignored" -pci_nvme_ub_mmiowr_pmrswtp_readonly(void) "invalid write to read only PMRSWTP, ignored" -pci_nvme_ub_mmiowr_invalid(uint64_t offset, uint64_t data) "invalid MMIO write, offset=0x%"PRIx64", data=0x%"PRIx64"" -pci_nvme_ub_mmiord_misaligned32(uint64_t offset) "MMIO read not 32-bit aligned, offset=0x%"PRIx64"" -pci_nvme_ub_mmiord_toosmall(uint64_t offset) "MMIO read smaller than 32-bits, offset=0x%"PRIx64"" -pci_nvme_ub_mmiord_invalid_ofs(uint64_t offset) "MMIO read beyond last register, offset=0x%"PRIx64", returning 0" -pci_nvme_ub_db_wr_misaligned(uint64_t offset) "doorbell write not 32-bit aligned, offset=0x%"PRIx64", ignoring" -pci_nvme_ub_db_wr_invalid_cq(uint32_t qid) "completion queue doorbell write for nonexistent queue, cqid=%"PRIu32", ignoring" -pci_nvme_ub_db_wr_invalid_cqhead(uint32_t qid, uint16_t new_head) "completion queue doorbell write value beyond queue size, cqid=%"PRIu32", new_head=%"PRIu16", ignoring" -pci_nvme_ub_db_wr_invalid_sq(uint32_t qid) "submission queue doorbell write for nonexistent queue, sqid=%"PRIu32", ignoring" -pci_nvme_ub_db_wr_invalid_sqtail(uint32_t qid, uint16_t new_tail) "submission queue doorbell write value beyond queue size, sqid=%"PRIu32", new_head=%"PRIu16", ignoring" -pci_nvme_ub_unknown_css_value(void) "unknown value in cc.css field" - # xen-block.c xen_block_realize(const char *type, uint32_t disk, uint32_t partition) "%s d%up%u" xen_block_connect(const char *type, uint32_t disk, uint32_t partition) "%s d%up%u" |