aboutsummaryrefslogtreecommitdiff
path: root/hw/block/nvme.c
diff options
context:
space:
mode:
Diffstat (limited to 'hw/block/nvme.c')
-rw-r--r--hw/block/nvme.c101
1 files changed, 76 insertions, 25 deletions
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 7f1c8dd775..1e13d25b08 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -27,7 +27,9 @@
* zoned=<true|false[optional]>
*
* Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
- * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
+ * offset 0 in BAR2 and supports only WDS, RDS and SQS for now. By default, the
+ * device will use the "v1.4 CMB scheme" - use the `legacy-cmb` parameter to
+ * always enable the CMBLOC and CMBSZ registers (v1.3 behavior).
*
* Enabling pmr emulation can be achieved by pointing to memory-backend-file.
* For example:
@@ -260,17 +262,22 @@ static int nvme_aor_check(NvmeNamespace *ns, uint32_t act, uint32_t opn)
static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
{
- hwaddr low = n->ctrl_mem.addr;
- hwaddr hi = n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size);
+ hwaddr hi, lo;
- return addr >= low && addr < hi;
+ if (!n->cmb.cmse) {
+ return false;
+ }
+
+ lo = n->params.legacy_cmb ? n->cmb.mem.addr : n->cmb.cba;
+ hi = lo + int128_get64(n->cmb.mem.size);
+
+ return addr >= lo && addr < hi;
}
static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr)
{
- assert(nvme_addr_is_cmb(n, addr));
-
- return &n->cmbuf[addr - n->ctrl_mem.addr];
+ hwaddr base = n->params.legacy_cmb ? n->cmb.mem.addr : n->cmb.cba;
+ return &n->cmb.buf[addr - base];
}
static bool nvme_addr_is_pmr(NvmeCtrl *n, hwaddr addr)
@@ -3768,6 +3775,19 @@ static int nvme_start_ctrl(NvmeCtrl *n)
return 0;
}
+static void nvme_cmb_enable_regs(NvmeCtrl *n)
+{
+ NVME_CMBLOC_SET_BIR(n->bar.cmbloc, NVME_CMB_BIR);
+
+ NVME_CMBSZ_SET_SQS(n->bar.cmbsz, 1);
+ NVME_CMBSZ_SET_CQS(n->bar.cmbsz, 0);
+ NVME_CMBSZ_SET_LISTS(n->bar.cmbsz, 1);
+ NVME_CMBSZ_SET_RDS(n->bar.cmbsz, 1);
+ NVME_CMBSZ_SET_WDS(n->bar.cmbsz, 1);
+ NVME_CMBSZ_SET_SZU(n->bar.cmbsz, 2); /* MBs */
+ NVME_CMBSZ_SET_SZ(n->bar.cmbsz, n->params.cmb_size_mb);
+}
+
static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
unsigned size)
{
@@ -3895,6 +3915,38 @@ static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly,
"invalid write to read only CMBSZ, ignored");
return;
+ case 0x50: /* CMBMSC */
+ if (!NVME_CAP_CMBS(n->bar.cap)) {
+ return;
+ }
+
+ n->bar.cmbmsc = size == 8 ? data :
+ (n->bar.cmbmsc & ~0xffffffff) | (data & 0xffffffff);
+ n->cmb.cmse = false;
+
+ if (NVME_CMBMSC_CRE(data)) {
+ nvme_cmb_enable_regs(n);
+
+ if (NVME_CMBMSC_CMSE(data)) {
+ hwaddr cba = NVME_CMBMSC_CBA(data) << CMBMSC_CBA_SHIFT;
+ if (cba + int128_get64(n->cmb.mem.size) < cba) {
+ NVME_CMBSTS_SET_CBAI(n->bar.cmbsts, 1);
+ return;
+ }
+
+ n->cmb.cba = cba;
+ n->cmb.cmse = true;
+ }
+ } else {
+ n->bar.cmbsz = 0;
+ n->bar.cmbloc = 0;
+ }
+
+ return;
+ case 0x54: /* CMBMSC hi */
+ n->bar.cmbmsc = (n->bar.cmbmsc & 0xffffffff) | (data << 32);
+ return;
+
case 0xE00: /* PMRCAP */
NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly,
"invalid write to PMRCAP register, ignored");
@@ -4151,13 +4203,13 @@ static void nvme_cmb_write(void *opaque, hwaddr addr, uint64_t data,
unsigned size)
{
NvmeCtrl *n = (NvmeCtrl *)opaque;
- stn_le_p(&n->cmbuf[addr], size, data);
+ stn_le_p(&n->cmb.buf[addr], size, data);
}
static uint64_t nvme_cmb_read(void *opaque, hwaddr addr, unsigned size)
{
NvmeCtrl *n = (NvmeCtrl *)opaque;
- return ldn_le_p(&n->cmbuf[addr], size);
+ return ldn_le_p(&n->cmb.buf[addr], size);
}
static const MemoryRegionOps nvme_cmb_ops = {
@@ -4280,24 +4332,22 @@ int nvme_register_namespace(NvmeCtrl *n, NvmeNamespace *ns, Error **errp)
static void nvme_init_cmb(NvmeCtrl *n, PCIDevice *pci_dev)
{
- NVME_CMBLOC_SET_BIR(n->bar.cmbloc, NVME_CMB_BIR);
- NVME_CMBLOC_SET_OFST(n->bar.cmbloc, 0);
+ uint64_t cmb_size = n->params.cmb_size_mb * MiB;
- NVME_CMBSZ_SET_SQS(n->bar.cmbsz, 1);
- NVME_CMBSZ_SET_CQS(n->bar.cmbsz, 0);
- NVME_CMBSZ_SET_LISTS(n->bar.cmbsz, 1);
- NVME_CMBSZ_SET_RDS(n->bar.cmbsz, 1);
- NVME_CMBSZ_SET_WDS(n->bar.cmbsz, 1);
- NVME_CMBSZ_SET_SZU(n->bar.cmbsz, 2); /* MBs */
- NVME_CMBSZ_SET_SZ(n->bar.cmbsz, n->params.cmb_size_mb);
-
- n->cmbuf = g_malloc0(NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
- memory_region_init_io(&n->ctrl_mem, OBJECT(n), &nvme_cmb_ops, n,
- "nvme-cmb", NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
- pci_register_bar(pci_dev, NVME_CMBLOC_BIR(n->bar.cmbloc),
+ n->cmb.buf = g_malloc0(cmb_size);
+ memory_region_init_io(&n->cmb.mem, OBJECT(n), &nvme_cmb_ops, n,
+ "nvme-cmb", cmb_size);
+ pci_register_bar(pci_dev, NVME_CMB_BIR,
PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_TYPE_64 |
- PCI_BASE_ADDRESS_MEM_PREFETCH, &n->ctrl_mem);
+ PCI_BASE_ADDRESS_MEM_PREFETCH, &n->cmb.mem);
+
+ NVME_CAP_SET_CMBS(n->bar.cap, 1);
+
+ if (n->params.legacy_cmb) {
+ nvme_cmb_enable_regs(n);
+ n->cmb.cmse = true;
+ }
}
static void nvme_init_pmr(NvmeCtrl *n, PCIDevice *pci_dev)
@@ -4509,7 +4559,7 @@ static void nvme_exit(PCIDevice *pci_dev)
g_free(n->aer_reqs);
if (n->params.cmb_size_mb) {
- g_free(n->cmbuf);
+ g_free(n->cmb.buf);
}
if (n->pmr.dev) {
@@ -4531,6 +4581,7 @@ static Property nvme_props[] = {
DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl, params.aer_max_queued, 64),
DEFINE_PROP_UINT8("mdts", NvmeCtrl, params.mdts, 7),
DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl, params.use_intel_id, false),
+ DEFINE_PROP_BOOL("legacy-cmb", NvmeCtrl, params.legacy_cmb, false),
DEFINE_PROP_SIZE32("zoned.append_size_limit", NvmeCtrl, params.zasl_bs,
NVME_DEFAULT_MAX_ZA_SIZE),
DEFINE_PROP_END_OF_LIST(),