aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
Diffstat (limited to 'hw')
-rw-r--r--hw/arm/omap1.c1
-rw-r--r--hw/arm/sbsa-ref.c43
-rw-r--r--hw/arm/virt-acpi-build.c3
-rw-r--r--hw/audio/hda-codec.c4
-rw-r--r--hw/block/nvme.c1136
-rw-r--r--hw/block/nvme.h26
-rw-r--r--hw/block/pflash_cfi01.c1
-rw-r--r--hw/block/trace-events31
-rw-r--r--hw/core/sysbus.c3
-rw-r--r--hw/display/cirrus_vga.c1
-rw-r--r--hw/display/qxl-logger.c2
-rw-r--r--hw/display/vga.c1
-rw-r--r--hw/gpio/max7310.c3
-rw-r--r--hw/i386/amd_iommu.c2
-rw-r--r--hw/i386/intel_iommu.c3
-rw-r--r--hw/i386/kvm/ioapic.c2
-rw-r--r--hw/i386/pc.c8
-rw-r--r--hw/i386/x86-iommu.c2
-rw-r--r--hw/i386/x86.c43
-rw-r--r--hw/ide/ahci.c5
-rw-r--r--hw/ide/atapi.c8
-rw-r--r--hw/ide/core.c2
-rw-r--r--hw/ide/pci.c2
-rw-r--r--hw/input/pxa2xx_keypad.c10
-rw-r--r--hw/intc/armv7m_nvic.c1
-rw-r--r--hw/intc/exynos4210_combiner.c1
-rw-r--r--hw/isa/isa-superio.c2
-rw-r--r--hw/misc/imx_ccm.c2
-rw-r--r--hw/misc/mac_via.c14
-rw-r--r--hw/misc/macio/cuda.c6
-rw-r--r--hw/misc/macio/pmu.c6
-rw-r--r--hw/misc/meson.build2
-rw-r--r--hw/misc/mos6522.c8
-rw-r--r--hw/misc/sbsa_ec.c98
-rw-r--r--hw/net/can/can_sja1000.c2
-rw-r--r--hw/net/lan9118.c2
-rw-r--r--hw/net/virtio-net.c1
-rw-r--r--hw/net/xilinx_axienet.c23
-rw-r--r--hw/rdma/vmw/pvrdma_main.c2
-rw-r--r--hw/rx/rx-gdbsim.c4
-rw-r--r--hw/s390x/virtio-ccw.c1
-rw-r--r--hw/scsi/scsi-disk.c44
-rw-r--r--hw/usb/bus.c4
-rw-r--r--hw/usb/ccid-card-emulated.c1
-rw-r--r--hw/vfio/platform.c2
-rw-r--r--hw/virtio/vhost-user.c2
-rw-r--r--hw/virtio/virtio-pci.c1
47 files changed, 1238 insertions, 333 deletions
diff --git a/hw/arm/omap1.c b/hw/arm/omap1.c
index 6ba0df6b6d..02c0f66431 100644
--- a/hw/arm/omap1.c
+++ b/hw/arm/omap1.c
@@ -1774,7 +1774,6 @@ static uint64_t omap_clkdsp_read(void *opaque, hwaddr addr,
return s->clkm.dsp_rstct2;
case 0x18: /* DSP_SYSST */
- cpu = CPU(s->cpu);
return (s->clkm.clocking_scheme << 11) | s->clkm.cold_start |
(cpu->halted << 6); /* Quite useless... */
}
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
index 2a7d9a61fc..47b5286d46 100644
--- a/hw/arm/sbsa-ref.c
+++ b/hw/arm/sbsa-ref.c
@@ -62,6 +62,7 @@ enum {
SBSA_CPUPERIPHS,
SBSA_GIC_DIST,
SBSA_GIC_REDIST,
+ SBSA_SECURE_EC,
SBSA_SMMU,
SBSA_UART,
SBSA_RTC,
@@ -107,6 +108,7 @@ static const MemMapEntry sbsa_ref_memmap[] = {
[SBSA_CPUPERIPHS] = { 0x40000000, 0x00040000 },
[SBSA_GIC_DIST] = { 0x40060000, 0x00010000 },
[SBSA_GIC_REDIST] = { 0x40080000, 0x04000000 },
+ [SBSA_SECURE_EC] = { 0x50000000, 0x00001000 },
[SBSA_UART] = { 0x60000000, 0x00001000 },
[SBSA_RTC] = { 0x60010000, 0x00001000 },
[SBSA_GPIO] = { 0x60020000, 0x00001000 },
@@ -138,6 +140,12 @@ static const int sbsa_ref_irqmap[] = {
[SBSA_EHCI] = 11,
};
+static uint64_t sbsa_ref_cpu_mp_affinity(SBSAMachineState *sms, int idx)
+{
+ uint8_t clustersz = ARM_DEFAULT_CPUS_PER_CLUSTER;
+ return arm_cpu_mp_affinity(idx, clustersz);
+}
+
/*
* Firmware on this machine only uses ACPI table to load OS, these limited
* device tree nodes are just to let firmware know the info which varies from
@@ -183,14 +191,31 @@ static void create_fdt(SBSAMachineState *sms)
g_free(matrix);
}
+ /*
+ * From Documentation/devicetree/bindings/arm/cpus.yaml
+ * On ARM v8 64-bit systems this property is required
+ * and matches the MPIDR_EL1 register affinity bits.
+ *
+ * * If cpus node's #address-cells property is set to 2
+ *
+ * The first reg cell bits [7:0] must be set to
+ * bits [39:32] of MPIDR_EL1.
+ *
+ * The second reg cell bits [23:0] must be set to
+ * bits [23:0] of MPIDR_EL1.
+ */
qemu_fdt_add_subnode(sms->fdt, "/cpus");
+ qemu_fdt_setprop_cell(sms->fdt, "/cpus", "#address-cells", 2);
+ qemu_fdt_setprop_cell(sms->fdt, "/cpus", "#size-cells", 0x0);
for (cpu = sms->smp_cpus - 1; cpu >= 0; cpu--) {
char *nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu));
CPUState *cs = CPU(armcpu);
+ uint64_t mpidr = sbsa_ref_cpu_mp_affinity(sms, cpu);
qemu_fdt_add_subnode(sms->fdt, nodename);
+ qemu_fdt_setprop_u64(sms->fdt, nodename, "reg", mpidr);
if (ms->possible_cpus->cpus[cs->cpu_index].props.has_node_id) {
qemu_fdt_setprop_cell(sms->fdt, nodename, "numa-node-id",
@@ -585,6 +610,16 @@ static void *sbsa_ref_dtb(const struct arm_boot_info *binfo, int *fdt_size)
return board->fdt;
}
+static void create_secure_ec(MemoryRegion *mem)
+{
+ hwaddr base = sbsa_ref_memmap[SBSA_SECURE_EC].base;
+ DeviceState *dev = qdev_new("sbsa-ec");
+ SysBusDevice *s = SYS_BUS_DEVICE(dev);
+
+ memory_region_add_subregion(mem, base,
+ sysbus_mmio_get_region(s, 0));
+}
+
static void sbsa_ref_init(MachineState *machine)
{
unsigned int smp_cpus = machine->smp.cpus;
@@ -708,6 +743,8 @@ static void sbsa_ref_init(MachineState *machine)
create_pcie(sms);
+ create_secure_ec(secure_sysmem);
+
sms->bootinfo.ram_size = machine->ram_size;
sms->bootinfo.nb_cpus = smp_cpus;
sms->bootinfo.board_id = -1;
@@ -717,12 +754,6 @@ static void sbsa_ref_init(MachineState *machine)
arm_load_kernel(ARM_CPU(first_cpu), machine, &sms->bootinfo);
}
-static uint64_t sbsa_ref_cpu_mp_affinity(SBSAMachineState *sms, int idx)
-{
- uint8_t clustersz = ARM_DEFAULT_CPUS_PER_CLUSTER;
- return arm_cpu_mp_affinity(idx, clustersz);
-}
-
static const CPUArchIdList *sbsa_ref_possible_cpu_arch_ids(MachineState *ms)
{
unsigned int max_cpus = ms->smp.max_cpus;
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 0a482ff6f7..9efd7a3881 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -633,12 +633,11 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
int madt_start = table_data->len;
const MemMapEntry *memmap = vms->memmap;
const int *irqmap = vms->irqmap;
- AcpiMultipleApicTable *madt;
AcpiMadtGenericDistributor *gicd;
AcpiMadtGenericMsiFrame *gic_msi;
int i;
- madt = acpi_data_push(table_data, sizeof *madt);
+ acpi_data_push(table_data, sizeof(AcpiMultipleApicTable));
gicd = acpi_data_push(table_data, sizeof *gicd);
gicd->type = ACPI_APIC_GENERIC_DISTRIBUTOR;
diff --git a/hw/audio/hda-codec.c b/hw/audio/hda-codec.c
index cbd92b72f2..2d16448181 100644
--- a/hw/audio/hda-codec.c
+++ b/hw/audio/hda-codec.c
@@ -898,6 +898,7 @@ static void hda_audio_base_class_init(ObjectClass *klass, void *data)
static const TypeInfo hda_audio_info = {
.name = TYPE_HDA_AUDIO,
.parent = TYPE_HDA_CODEC_DEVICE,
+ .instance_size = sizeof(HDAAudioState),
.class_init = hda_audio_base_class_init,
.abstract = true,
};
@@ -914,7 +915,6 @@ static void hda_audio_output_class_init(ObjectClass *klass, void *data)
static const TypeInfo hda_audio_output_info = {
.name = "hda-output",
.parent = TYPE_HDA_AUDIO,
- .instance_size = sizeof(HDAAudioState),
.class_init = hda_audio_output_class_init,
};
@@ -930,7 +930,6 @@ static void hda_audio_duplex_class_init(ObjectClass *klass, void *data)
static const TypeInfo hda_audio_duplex_info = {
.name = "hda-duplex",
.parent = TYPE_HDA_AUDIO,
- .instance_size = sizeof(HDAAudioState),
.class_init = hda_audio_duplex_class_init,
};
@@ -946,7 +945,6 @@ static void hda_audio_micro_class_init(ObjectClass *klass, void *data)
static const TypeInfo hda_audio_micro_info = {
.name = "hda-micro",
.parent = TYPE_HDA_AUDIO,
- .instance_size = sizeof(HDAAudioState),
.class_init = hda_audio_micro_class_init,
};
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 3426e17e65..63078f6009 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -11,7 +11,7 @@
/**
* Reference Specs: http://www.nvmexpress.org, 1.2, 1.1, 1.0e
*
- * http://www.nvmexpress.org/resources/
+ * https://nvmexpress.org/developers/nvme-specification/
*/
/**
@@ -20,7 +20,9 @@
* -device nvme,drive=<drive_id>,serial=<serial>,id=<id[optional]>, \
* cmb_size_mb=<cmb_size_mb[optional]>, \
* [pmrdev=<mem_backend_file_id>,] \
- * max_ioqpairs=<N[optional]>
+ * max_ioqpairs=<N[optional]>, \
+ * aerl=<N[optional]>, aer_max_queued=<N[optional]>, \
+ * mdts=<N[optional]>
*
* Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
* offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
@@ -32,6 +34,20 @@
* For example:
* -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
* size=<size> .... -device nvme,...,pmrdev=<mem_id>
+ *
+ *
+ * nvme device parameters
+ * ~~~~~~~~~~~~~~~~~~~~~~
+ * - `aerl`
+ * The Asynchronous Event Request Limit (AERL). Indicates the maximum number
+ * of concurrently outstanding Asynchronous Event Request commands suppoert
+ * by the controller. This is a 0's based value.
+ *
+ * - `aer_max_queued`
+ * This is the maximum number of events that the device will enqueue for
+ * completion when there are no oustanding AERs. When the maximum number of
+ * enqueued events are reached, subsequent events will be dropped.
+ *
*/
#include "qemu/osdep.h"
@@ -55,10 +71,14 @@
#include "nvme.h"
#define NVME_MAX_IOQPAIRS 0xffff
-#define NVME_REG_SIZE 0x1000
#define NVME_DB_SIZE 4
+#define NVME_SPEC_VER 0x00010300
#define NVME_CMB_BIR 2
#define NVME_PMR_BIR 2
+#define NVME_TEMPERATURE 0x143
+#define NVME_TEMPERATURE_WARNING 0x157
+#define NVME_TEMPERATURE_CRITICAL 0x175
+#define NVME_NUM_FW_SLOTS 1
#define NVME_GUEST_ERR(trace, fmt, ...) \
do { \
@@ -67,8 +87,44 @@
" in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
} while (0)
+static const bool nvme_feature_support[NVME_FID_MAX] = {
+ [NVME_ARBITRATION] = true,
+ [NVME_POWER_MANAGEMENT] = true,
+ [NVME_TEMPERATURE_THRESHOLD] = true,
+ [NVME_ERROR_RECOVERY] = true,
+ [NVME_VOLATILE_WRITE_CACHE] = true,
+ [NVME_NUMBER_OF_QUEUES] = true,
+ [NVME_INTERRUPT_COALESCING] = true,
+ [NVME_INTERRUPT_VECTOR_CONF] = true,
+ [NVME_WRITE_ATOMICITY] = true,
+ [NVME_ASYNCHRONOUS_EVENT_CONF] = true,
+ [NVME_TIMESTAMP] = true,
+};
+
+static const uint32_t nvme_feature_cap[NVME_FID_MAX] = {
+ [NVME_TEMPERATURE_THRESHOLD] = NVME_FEAT_CAP_CHANGE,
+ [NVME_VOLATILE_WRITE_CACHE] = NVME_FEAT_CAP_CHANGE,
+ [NVME_NUMBER_OF_QUEUES] = NVME_FEAT_CAP_CHANGE,
+ [NVME_ASYNCHRONOUS_EVENT_CONF] = NVME_FEAT_CAP_CHANGE,
+ [NVME_TIMESTAMP] = NVME_FEAT_CAP_CHANGE,
+};
+
static void nvme_process_sq(void *opaque);
+static uint16_t nvme_cid(NvmeRequest *req)
+{
+ if (!req) {
+ return 0xffff;
+ }
+
+ return le16_to_cpu(req->cqe.cid);
+}
+
+static uint16_t nvme_sqid(NvmeRequest *req)
+{
+ return le16_to_cpu(req->sq->sqid);
+}
+
static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
{
hwaddr low = n->ctrl_mem.addr;
@@ -77,10 +133,17 @@ static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
return addr >= low && addr < hi;
}
+static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr)
+{
+ assert(nvme_addr_is_cmb(n, addr));
+
+ return &n->cmbuf[addr - n->ctrl_mem.addr];
+}
+
static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
{
if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr)) {
- memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size);
+ memcpy(buf, nvme_addr_to_cmb(n, addr), size);
return;
}
@@ -163,36 +226,125 @@ static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq)
}
}
-static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
- uint64_t prp2, uint32_t len, NvmeCtrl *n)
+static void nvme_req_clear(NvmeRequest *req)
+{
+ req->ns = NULL;
+ memset(&req->cqe, 0x0, sizeof(req->cqe));
+}
+
+static void nvme_req_exit(NvmeRequest *req)
+{
+ if (req->qsg.sg) {
+ qemu_sglist_destroy(&req->qsg);
+ }
+
+ if (req->iov.iov) {
+ qemu_iovec_destroy(&req->iov);
+ }
+}
+
+static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr,
+ size_t len)
+{
+ if (!len) {
+ return NVME_SUCCESS;
+ }
+
+ trace_pci_nvme_map_addr_cmb(addr, len);
+
+ if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len - 1)) {
+ return NVME_DATA_TRAS_ERROR;
+ }
+
+ qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len);
+
+ return NVME_SUCCESS;
+}
+
+static uint16_t nvme_map_addr(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov,
+ hwaddr addr, size_t len)
+{
+ if (!len) {
+ return NVME_SUCCESS;
+ }
+
+ trace_pci_nvme_map_addr(addr, len);
+
+ if (nvme_addr_is_cmb(n, addr)) {
+ if (qsg && qsg->sg) {
+ return NVME_INVALID_USE_OF_CMB | NVME_DNR;
+ }
+
+ assert(iov);
+
+ if (!iov->iov) {
+ qemu_iovec_init(iov, 1);
+ }
+
+ return nvme_map_addr_cmb(n, iov, addr, len);
+ }
+
+ if (iov && iov->iov) {
+ return NVME_INVALID_USE_OF_CMB | NVME_DNR;
+ }
+
+ assert(qsg);
+
+ if (!qsg->sg) {
+ pci_dma_sglist_init(qsg, &n->parent_obj, 1);
+ }
+
+ qemu_sglist_add(qsg, addr, len);
+
+ return NVME_SUCCESS;
+}
+
+static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2,
+ uint32_t len, NvmeRequest *req)
{
hwaddr trans_len = n->page_size - (prp1 % n->page_size);
trans_len = MIN(len, trans_len);
int num_prps = (len >> n->page_bits) + 1;
+ uint16_t status;
+ bool prp_list_in_cmb = false;
+
+ QEMUSGList *qsg = &req->qsg;
+ QEMUIOVector *iov = &req->iov;
+
+ trace_pci_nvme_map_prp(trans_len, len, prp1, prp2, num_prps);
if (unlikely(!prp1)) {
trace_pci_nvme_err_invalid_prp();
return NVME_INVALID_FIELD | NVME_DNR;
- } else if (n->bar.cmbsz && prp1 >= n->ctrl_mem.addr &&
- prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
- qsg->nsg = 0;
+ }
+
+ if (nvme_addr_is_cmb(n, prp1)) {
qemu_iovec_init(iov, num_prps);
- qemu_iovec_add(iov, (void *)&n->cmbuf[prp1 - n->ctrl_mem.addr], trans_len);
} else {
pci_dma_sglist_init(qsg, &n->parent_obj, num_prps);
- qemu_sglist_add(qsg, prp1, trans_len);
}
+
+ status = nvme_map_addr(n, qsg, iov, prp1, trans_len);
+ if (status) {
+ return status;
+ }
+
len -= trans_len;
if (len) {
if (unlikely(!prp2)) {
trace_pci_nvme_err_invalid_prp2_missing();
- goto unmap;
+ return NVME_INVALID_FIELD | NVME_DNR;
}
+
if (len > n->page_size) {
uint64_t prp_list[n->max_prp_ents];
uint32_t nents, prp_trans;
int i = 0;
+ if (nvme_addr_is_cmb(n, prp2)) {
+ prp_list_in_cmb = true;
+ }
+
nents = (len + n->page_size - 1) >> n->page_bits;
prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
@@ -202,7 +354,11 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
if (i == n->max_prp_ents - 1 && len > n->page_size) {
if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
- goto unmap;
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ if (prp_list_in_cmb != nvme_addr_is_cmb(n, prp_ent)) {
+ return NVME_INVALID_USE_OF_CMB | NVME_DNR;
}
i = 0;
@@ -215,89 +371,87 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
- goto unmap;
+ return NVME_INVALID_FIELD | NVME_DNR;
}
trans_len = MIN(len, n->page_size);
- if (qsg->nsg){
- qemu_sglist_add(qsg, prp_ent, trans_len);
- } else {
- qemu_iovec_add(iov, (void *)&n->cmbuf[prp_ent - n->ctrl_mem.addr], trans_len);
+ status = nvme_map_addr(n, qsg, iov, prp_ent, trans_len);
+ if (status) {
+ return status;
}
+
len -= trans_len;
i++;
}
} else {
if (unlikely(prp2 & (n->page_size - 1))) {
trace_pci_nvme_err_invalid_prp2_align(prp2);
- goto unmap;
+ return NVME_INVALID_FIELD | NVME_DNR;
}
- if (qsg->nsg) {
- qemu_sglist_add(qsg, prp2, len);
- } else {
- qemu_iovec_add(iov, (void *)&n->cmbuf[prp2 - n->ctrl_mem.addr], trans_len);
+ status = nvme_map_addr(n, qsg, iov, prp2, len);
+ if (status) {
+ return status;
}
}
}
- return NVME_SUCCESS;
- unmap:
- qemu_sglist_destroy(qsg);
- return NVME_INVALID_FIELD | NVME_DNR;
+ return NVME_SUCCESS;
}
-static uint16_t nvme_dma_write_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
- uint64_t prp1, uint64_t prp2)
+static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
+ uint64_t prp1, uint64_t prp2, DMADirection dir,
+ NvmeRequest *req)
{
- QEMUSGList qsg;
- QEMUIOVector iov;
uint16_t status = NVME_SUCCESS;
- if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
- return NVME_INVALID_FIELD | NVME_DNR;
- }
- if (qsg.nsg > 0) {
- if (dma_buf_write(ptr, len, &qsg)) {
- status = NVME_INVALID_FIELD | NVME_DNR;
- }
- qemu_sglist_destroy(&qsg);
- } else {
- if (qemu_iovec_to_buf(&iov, 0, ptr, len) != len) {
- status = NVME_INVALID_FIELD | NVME_DNR;
- }
- qemu_iovec_destroy(&iov);
+ status = nvme_map_prp(n, prp1, prp2, len, req);
+ if (status) {
+ return status;
}
- return status;
-}
-static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
- uint64_t prp1, uint64_t prp2)
-{
- QEMUSGList qsg;
- QEMUIOVector iov;
- uint16_t status = NVME_SUCCESS;
+ /* assert that only one of qsg and iov carries data */
+ assert((req->qsg.nsg > 0) != (req->iov.niov > 0));
- trace_pci_nvme_dma_read(prp1, prp2);
+ if (req->qsg.nsg > 0) {
+ uint64_t residual;
- if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
- return NVME_INVALID_FIELD | NVME_DNR;
- }
- if (qsg.nsg > 0) {
- if (unlikely(dma_buf_read(ptr, len, &qsg))) {
+ if (dir == DMA_DIRECTION_TO_DEVICE) {
+ residual = dma_buf_write(ptr, len, &req->qsg);
+ } else {
+ residual = dma_buf_read(ptr, len, &req->qsg);
+ }
+
+ if (unlikely(residual)) {
trace_pci_nvme_err_invalid_dma();
status = NVME_INVALID_FIELD | NVME_DNR;
}
- qemu_sglist_destroy(&qsg);
} else {
- if (unlikely(qemu_iovec_from_buf(&iov, 0, ptr, len) != len)) {
+ size_t bytes;
+
+ if (dir == DMA_DIRECTION_TO_DEVICE) {
+ bytes = qemu_iovec_to_buf(&req->iov, 0, ptr, len);
+ } else {
+ bytes = qemu_iovec_from_buf(&req->iov, 0, ptr, len);
+ }
+
+ if (unlikely(bytes != len)) {
trace_pci_nvme_err_invalid_dma();
status = NVME_INVALID_FIELD | NVME_DNR;
}
- qemu_iovec_destroy(&iov);
}
+
return status;
}
+static uint16_t nvme_map_dptr(NvmeCtrl *n, size_t len, NvmeRequest *req)
+{
+ NvmeCmd *cmd = &req->cmd;
+ uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
+ uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
+
+ return nvme_map_prp(n, prp1, prp2, len, req);
+}
+
static void nvme_post_cqes(void *opaque)
{
NvmeCQueue *cq = opaque;
@@ -321,6 +475,7 @@ static void nvme_post_cqes(void *opaque)
nvme_inc_cq_tail(cq);
pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
sizeof(req->cqe));
+ nvme_req_exit(req);
QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
}
if (cq->tail != cq->head) {
@@ -331,11 +486,115 @@ static void nvme_post_cqes(void *opaque)
static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req)
{
assert(cq->cqid == req->sq->cqid);
+ trace_pci_nvme_enqueue_req_completion(nvme_cid(req), cq->cqid,
+ req->status);
QTAILQ_REMOVE(&req->sq->out_req_list, req, entry);
QTAILQ_INSERT_TAIL(&cq->req_list, req, entry);
timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
}
+static void nvme_process_aers(void *opaque)
+{
+ NvmeCtrl *n = opaque;
+ NvmeAsyncEvent *event, *next;
+
+ trace_pci_nvme_process_aers(n->aer_queued);
+
+ QTAILQ_FOREACH_SAFE(event, &n->aer_queue, entry, next) {
+ NvmeRequest *req;
+ NvmeAerResult *result;
+
+ /* can't post cqe if there is nothing to complete */
+ if (!n->outstanding_aers) {
+ trace_pci_nvme_no_outstanding_aers();
+ break;
+ }
+
+ /* ignore if masked (cqe posted, but event not cleared) */
+ if (n->aer_mask & (1 << event->result.event_type)) {
+ trace_pci_nvme_aer_masked(event->result.event_type, n->aer_mask);
+ continue;
+ }
+
+ QTAILQ_REMOVE(&n->aer_queue, event, entry);
+ n->aer_queued--;
+
+ n->aer_mask |= 1 << event->result.event_type;
+ n->outstanding_aers--;
+
+ req = n->aer_reqs[n->outstanding_aers];
+
+ result = (NvmeAerResult *) &req->cqe.result;
+ result->event_type = event->result.event_type;
+ result->event_info = event->result.event_info;
+ result->log_page = event->result.log_page;
+ g_free(event);
+
+ req->status = NVME_SUCCESS;
+
+ trace_pci_nvme_aer_post_cqe(result->event_type, result->event_info,
+ result->log_page);
+
+ nvme_enqueue_req_completion(&n->admin_cq, req);
+ }
+}
+
+static void nvme_enqueue_event(NvmeCtrl *n, uint8_t event_type,
+ uint8_t event_info, uint8_t log_page)
+{
+ NvmeAsyncEvent *event;
+
+ trace_pci_nvme_enqueue_event(event_type, event_info, log_page);
+
+ if (n->aer_queued == n->params.aer_max_queued) {
+ trace_pci_nvme_enqueue_event_noqueue(n->aer_queued);
+ return;
+ }
+
+ event = g_new(NvmeAsyncEvent, 1);
+ event->result = (NvmeAerResult) {
+ .event_type = event_type,
+ .event_info = event_info,
+ .log_page = log_page,
+ };
+
+ QTAILQ_INSERT_TAIL(&n->aer_queue, event, entry);
+ n->aer_queued++;
+
+ nvme_process_aers(n);
+}
+
+static void nvme_clear_events(NvmeCtrl *n, uint8_t event_type)
+{
+ n->aer_mask &= ~(1 << event_type);
+ if (!QTAILQ_EMPTY(&n->aer_queue)) {
+ nvme_process_aers(n);
+ }
+}
+
+static inline uint16_t nvme_check_mdts(NvmeCtrl *n, size_t len)
+{
+ uint8_t mdts = n->params.mdts;
+
+ if (mdts && len > n->page_size << mdts) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ return NVME_SUCCESS;
+}
+
+static inline uint16_t nvme_check_bounds(NvmeCtrl *n, NvmeNamespace *ns,
+ uint64_t slba, uint32_t nlb)
+{
+ uint64_t nsze = le64_to_cpu(ns->id_ns.nsze);
+
+ if (unlikely(UINT64_MAX - slba < nlb || slba + nlb > nsze)) {
+ return NVME_LBA_RANGE | NVME_DNR;
+ }
+
+ return NVME_SUCCESS;
+}
+
static void nvme_rw_cb(void *opaque, int ret)
{
NvmeRequest *req = opaque;
@@ -343,6 +602,8 @@ static void nvme_rw_cb(void *opaque, int ret)
NvmeCtrl *n = sq->ctrl;
NvmeCQueue *cq = n->cq[sq->cqid];
+ trace_pci_nvme_rw_cb(nvme_cid(req));
+
if (!ret) {
block_acct_done(blk_get_stats(n->conf.blk), &req->acct);
req->status = NVME_SUCCESS;
@@ -350,16 +611,12 @@ static void nvme_rw_cb(void *opaque, int ret)
block_acct_failed(blk_get_stats(n->conf.blk), &req->acct);
req->status = NVME_INTERNAL_DEV_ERROR;
}
- if (req->has_sg) {
- qemu_sglist_destroy(&req->qsg);
- }
+
nvme_enqueue_req_completion(cq, req);
}
-static uint16_t nvme_flush(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
- NvmeRequest *req)
+static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
{
- req->has_sg = false;
block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0,
BLOCK_ACCT_FLUSH);
req->aiocb = blk_aio_flush(n->conf.blk, nvme_rw_cb, req);
@@ -367,23 +624,26 @@ static uint16_t nvme_flush(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
return NVME_NO_COMPLETE;
}
-static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
- NvmeRequest *req)
+static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req)
{
- NvmeRwCmd *rw = (NvmeRwCmd *)cmd;
+ NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
+ NvmeNamespace *ns = req->ns;
const uint8_t lba_index = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
const uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds;
uint64_t slba = le64_to_cpu(rw->slba);
uint32_t nlb = le16_to_cpu(rw->nlb) + 1;
uint64_t offset = slba << data_shift;
uint32_t count = nlb << data_shift;
+ uint16_t status;
+
+ trace_pci_nvme_write_zeroes(nvme_cid(req), slba, nlb);
- if (unlikely(slba + nlb > ns->id_ns.nsze)) {
+ status = nvme_check_bounds(n, ns, slba, nlb);
+ if (status) {
trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
- return NVME_LBA_RANGE | NVME_DNR;
+ return status;
}
- req->has_sg = false;
block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0,
BLOCK_ACCT_WRITE);
req->aiocb = blk_aio_pwrite_zeroes(n->conf.blk, offset, count,
@@ -391,14 +651,12 @@ static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
return NVME_NO_COMPLETE;
}
-static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
- NvmeRequest *req)
+static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req)
{
- NvmeRwCmd *rw = (NvmeRwCmd *)cmd;
+ NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
+ NvmeNamespace *ns = req->ns;
uint32_t nlb = le32_to_cpu(rw->nlb) + 1;
uint64_t slba = le64_to_cpu(rw->slba);
- uint64_t prp1 = le64_to_cpu(rw->prp1);
- uint64_t prp2 = le64_to_cpu(rw->prp2);
uint8_t lba_index = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds;
@@ -406,30 +664,40 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
uint64_t data_offset = slba << data_shift;
int is_write = rw->opcode == NVME_CMD_WRITE ? 1 : 0;
enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ;
+ uint16_t status;
trace_pci_nvme_rw(is_write ? "write" : "read", nlb, data_size, slba);
- if (unlikely((slba + nlb) > ns->id_ns.nsze)) {
+ status = nvme_check_mdts(n, data_size);
+ if (status) {
+ trace_pci_nvme_err_mdts(nvme_cid(req), data_size);
block_acct_invalid(blk_get_stats(n->conf.blk), acct);
+ return status;
+ }
+
+ status = nvme_check_bounds(n, ns, slba, nlb);
+ if (status) {
trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
- return NVME_LBA_RANGE | NVME_DNR;
+ block_acct_invalid(blk_get_stats(n->conf.blk), acct);
+ return status;
}
- if (nvme_map_prp(&req->qsg, &req->iov, prp1, prp2, data_size, n)) {
+ if (nvme_map_dptr(n, data_size, req)) {
block_acct_invalid(blk_get_stats(n->conf.blk), acct);
return NVME_INVALID_FIELD | NVME_DNR;
}
- dma_acct_start(n->conf.blk, &req->acct, &req->qsg, acct);
if (req->qsg.nsg > 0) {
- req->has_sg = true;
+ block_acct_start(blk_get_stats(n->conf.blk), &req->acct, req->qsg.size,
+ acct);
req->aiocb = is_write ?
dma_blk_write(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE,
nvme_rw_cb, req) :
dma_blk_read(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE,
nvme_rw_cb, req);
} else {
- req->has_sg = false;
+ block_acct_start(blk_get_stats(n->conf.blk), &req->acct, req->iov.size,
+ acct);
req->aiocb = is_write ?
blk_aio_pwritev(n->conf.blk, data_offset, &req->iov, 0, nvme_rw_cb,
req) :
@@ -440,27 +708,29 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
return NVME_NO_COMPLETE;
}
-static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
+static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
{
- NvmeNamespace *ns;
- uint32_t nsid = le32_to_cpu(cmd->nsid);
+ uint32_t nsid = le32_to_cpu(req->cmd.nsid);
+
+ trace_pci_nvme_io_cmd(nvme_cid(req), nsid, nvme_sqid(req),
+ req->cmd.opcode);
if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
trace_pci_nvme_err_invalid_ns(nsid, n->num_namespaces);
return NVME_INVALID_NSID | NVME_DNR;
}
- ns = &n->namespaces[nsid - 1];
- switch (cmd->opcode) {
+ req->ns = &n->namespaces[nsid - 1];
+ switch (req->cmd.opcode) {
case NVME_CMD_FLUSH:
- return nvme_flush(n, ns, cmd, req);
- case NVME_CMD_WRITE_ZEROS:
- return nvme_write_zeros(n, ns, cmd, req);
+ return nvme_flush(n, req);
+ case NVME_CMD_WRITE_ZEROES:
+ return nvme_write_zeroes(n, req);
case NVME_CMD_WRITE:
case NVME_CMD_READ:
- return nvme_rw(n, ns, cmd, req);
+ return nvme_rw(n, req);
default:
- trace_pci_nvme_err_invalid_opc(cmd->opcode);
+ trace_pci_nvme_err_invalid_opc(req->cmd.opcode);
return NVME_INVALID_OPCODE | NVME_DNR;
}
}
@@ -476,10 +746,10 @@ static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n)
}
}
-static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd)
+static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeRequest *req)
{
- NvmeDeleteQ *c = (NvmeDeleteQ *)cmd;
- NvmeRequest *req, *next;
+ NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd;
+ NvmeRequest *r, *next;
NvmeSQueue *sq;
NvmeCQueue *cq;
uint16_t qid = le16_to_cpu(c->qid);
@@ -493,19 +763,19 @@ static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd)
sq = n->sq[qid];
while (!QTAILQ_EMPTY(&sq->out_req_list)) {
- req = QTAILQ_FIRST(&sq->out_req_list);
- assert(req->aiocb);
- blk_aio_cancel(req->aiocb);
+ r = QTAILQ_FIRST(&sq->out_req_list);
+ assert(r->aiocb);
+ blk_aio_cancel(r->aiocb);
}
if (!nvme_check_cqid(n, sq->cqid)) {
cq = n->cq[sq->cqid];
QTAILQ_REMOVE(&cq->sq_list, sq, entry);
nvme_post_cqes(cq);
- QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
- if (req->sq == sq) {
- QTAILQ_REMOVE(&cq->req_list, req, entry);
- QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
+ QTAILQ_FOREACH_SAFE(r, &cq->req_list, entry, next) {
+ if (r->sq == sq) {
+ QTAILQ_REMOVE(&cq->req_list, r, entry);
+ QTAILQ_INSERT_TAIL(&sq->req_list, r, entry);
}
}
}
@@ -526,7 +796,7 @@ static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr,
sq->size = size;
sq->cqid = cqid;
sq->head = sq->tail = 0;
- sq->io_req = g_new(NvmeRequest, sq->size);
+ sq->io_req = g_new0(NvmeRequest, sq->size);
QTAILQ_INIT(&sq->req_list);
QTAILQ_INIT(&sq->out_req_list);
@@ -542,10 +812,10 @@ static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr,
n->sq[sqid] = sq;
}
-static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
+static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeRequest *req)
{
NvmeSQueue *sq;
- NvmeCreateSq *c = (NvmeCreateSq *)cmd;
+ NvmeCreateSq *c = (NvmeCreateSq *)&req->cmd;
uint16_t cqid = le16_to_cpu(c->cqid);
uint16_t sqid = le16_to_cpu(c->sqid);
@@ -580,6 +850,162 @@ static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
return NVME_SUCCESS;
}
+static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
+ uint64_t off, NvmeRequest *req)
+{
+ NvmeCmd *cmd = &req->cmd;
+ uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
+ uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
+ uint32_t nsid = le32_to_cpu(cmd->nsid);
+
+ uint32_t trans_len;
+ time_t current_ms;
+ uint64_t units_read = 0, units_written = 0;
+ uint64_t read_commands = 0, write_commands = 0;
+ NvmeSmartLog smart;
+ BlockAcctStats *s;
+
+ if (nsid && nsid != 0xffffffff) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ s = blk_get_stats(n->conf.blk);
+
+ units_read = s->nr_bytes[BLOCK_ACCT_READ] >> BDRV_SECTOR_BITS;
+ units_written = s->nr_bytes[BLOCK_ACCT_WRITE] >> BDRV_SECTOR_BITS;
+ read_commands = s->nr_ops[BLOCK_ACCT_READ];
+ write_commands = s->nr_ops[BLOCK_ACCT_WRITE];
+
+ if (off > sizeof(smart)) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ trans_len = MIN(sizeof(smart) - off, buf_len);
+
+ memset(&smart, 0x0, sizeof(smart));
+
+ smart.data_units_read[0] = cpu_to_le64(DIV_ROUND_UP(units_read, 1000));
+ smart.data_units_written[0] = cpu_to_le64(DIV_ROUND_UP(units_written,
+ 1000));
+ smart.host_read_commands[0] = cpu_to_le64(read_commands);
+ smart.host_write_commands[0] = cpu_to_le64(write_commands);
+
+ smart.temperature = cpu_to_le16(n->temperature);
+
+ if ((n->temperature >= n->features.temp_thresh_hi) ||
+ (n->temperature <= n->features.temp_thresh_low)) {
+ smart.critical_warning |= NVME_SMART_TEMPERATURE;
+ }
+
+ current_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
+ smart.power_on_hours[0] =
+ cpu_to_le64((((current_ms - n->starttime_ms) / 1000) / 60) / 60);
+
+ if (!rae) {
+ nvme_clear_events(n, NVME_AER_TYPE_SMART);
+ }
+
+ return nvme_dma_prp(n, (uint8_t *) &smart + off, trans_len, prp1, prp2,
+ DMA_DIRECTION_FROM_DEVICE, req);
+}
+
+static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off,
+ NvmeRequest *req)
+{
+ uint32_t trans_len;
+ NvmeCmd *cmd = &req->cmd;
+ uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
+ uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
+ NvmeFwSlotInfoLog fw_log = {
+ .afi = 0x1,
+ };
+
+ strpadcpy((char *)&fw_log.frs1, sizeof(fw_log.frs1), "1.0", ' ');
+
+ if (off > sizeof(fw_log)) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ trans_len = MIN(sizeof(fw_log) - off, buf_len);
+
+ return nvme_dma_prp(n, (uint8_t *) &fw_log + off, trans_len, prp1, prp2,
+ DMA_DIRECTION_FROM_DEVICE, req);
+}
+
+static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
+ uint64_t off, NvmeRequest *req)
+{
+ uint32_t trans_len;
+ NvmeCmd *cmd = &req->cmd;
+ uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
+ uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
+ NvmeErrorLog errlog;
+
+ if (!rae) {
+ nvme_clear_events(n, NVME_AER_TYPE_ERROR);
+ }
+
+ if (off > sizeof(errlog)) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ memset(&errlog, 0x0, sizeof(errlog));
+
+ trans_len = MIN(sizeof(errlog) - off, buf_len);
+
+ return nvme_dma_prp(n, (uint8_t *)&errlog, trans_len, prp1, prp2,
+ DMA_DIRECTION_FROM_DEVICE, req);
+}
+
+static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
+{
+ NvmeCmd *cmd = &req->cmd;
+
+ uint32_t dw10 = le32_to_cpu(cmd->cdw10);
+ uint32_t dw11 = le32_to_cpu(cmd->cdw11);
+ uint32_t dw12 = le32_to_cpu(cmd->cdw12);
+ uint32_t dw13 = le32_to_cpu(cmd->cdw13);
+ uint8_t lid = dw10 & 0xff;
+ uint8_t lsp = (dw10 >> 8) & 0xf;
+ uint8_t rae = (dw10 >> 15) & 0x1;
+ uint32_t numdl, numdu;
+ uint64_t off, lpol, lpou;
+ size_t len;
+ uint16_t status;
+
+ numdl = (dw10 >> 16);
+ numdu = (dw11 & 0xffff);
+ lpol = dw12;
+ lpou = dw13;
+
+ len = (((numdu << 16) | numdl) + 1) << 2;
+ off = (lpou << 32ULL) | lpol;
+
+ if (off & 0x3) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ trace_pci_nvme_get_log(nvme_cid(req), lid, lsp, rae, len, off);
+
+ status = nvme_check_mdts(n, len);
+ if (status) {
+ trace_pci_nvme_err_mdts(nvme_cid(req), len);
+ return status;
+ }
+
+ switch (lid) {
+ case NVME_LOG_ERROR_INFO:
+ return nvme_error_info(n, rae, len, off, req);
+ case NVME_LOG_SMART_INFO:
+ return nvme_smart_info(n, rae, len, off, req);
+ case NVME_LOG_FW_SLOT_INFO:
+ return nvme_fw_log_info(n, len, off, req);
+ default:
+ trace_pci_nvme_err_invalid_log_page(nvme_cid(req), lid);
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+}
+
static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
{
n->cq[cq->cqid] = NULL;
@@ -591,9 +1017,9 @@ static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
}
}
-static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeCmd *cmd)
+static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeRequest *req)
{
- NvmeDeleteQ *c = (NvmeDeleteQ *)cmd;
+ NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd;
NvmeCQueue *cq;
uint16_t qid = le16_to_cpu(c->qid);
@@ -634,10 +1060,10 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq);
}
-static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
+static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req)
{
NvmeCQueue *cq;
- NvmeCreateCq *c = (NvmeCreateCq *)cmd;
+ NvmeCreateCq *c = (NvmeCreateCq *)&req->cmd;
uint16_t cqid = le16_to_cpu(c->cqid);
uint16_t vector = le16_to_cpu(c->irq_vector);
uint16_t qsize = le16_to_cpu(c->qsize);
@@ -675,23 +1101,32 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
cq = g_malloc0(sizeof(*cq));
nvme_init_cq(cq, n, prp1, cqid, vector, qsize + 1,
NVME_CQ_FLAGS_IEN(qflags));
+
+ /*
+ * It is only required to set qs_created when creating a completion queue;
+ * creating a submission queue without a matching completion queue will
+ * fail.
+ */
+ n->qs_created = true;
return NVME_SUCCESS;
}
-static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeIdentify *c)
+static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req)
{
+ NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
uint64_t prp1 = le64_to_cpu(c->prp1);
uint64_t prp2 = le64_to_cpu(c->prp2);
trace_pci_nvme_identify_ctrl();
- return nvme_dma_read_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl),
- prp1, prp2);
+ return nvme_dma_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), prp1,
+ prp2, DMA_DIRECTION_FROM_DEVICE, req);
}
-static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
+static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
{
NvmeNamespace *ns;
+ NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
uint32_t nsid = le32_to_cpu(c->nsid);
uint64_t prp1 = le64_to_cpu(c->prp1);
uint64_t prp2 = le64_to_cpu(c->prp2);
@@ -705,12 +1140,13 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
ns = &n->namespaces[nsid - 1];
- return nvme_dma_read_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns),
- prp1, prp2);
+ return nvme_dma_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns), prp1,
+ prp2, DMA_DIRECTION_FROM_DEVICE, req);
}
-static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
+static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
{
+ NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
static const int data_len = NVME_IDENTIFY_DATA_SIZE;
uint32_t min_nsid = le32_to_cpu(c->nsid);
uint64_t prp1 = le64_to_cpu(c->prp1);
@@ -721,6 +1157,16 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
trace_pci_nvme_identify_nslist(min_nsid);
+ /*
+ * Both 0xffffffff (NVME_NSID_BROADCAST) and 0xfffffffe are invalid values
+ * since the Active Namespace ID List should return namespaces with ids
+ * *higher* than the NSID specified in the command. This is also specified
+ * in the spec (NVM Express v1.3d, Section 5.15.4).
+ */
+ if (min_nsid >= NVME_NSID_BROADCAST - 1) {
+ return NVME_INVALID_NSID | NVME_DNR;
+ }
+
list = g_malloc0(data_len);
for (i = 0; i < n->num_namespaces; i++) {
if (i < min_nsid) {
@@ -731,28 +1177,84 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
break;
}
}
- ret = nvme_dma_read_prp(n, (uint8_t *)list, data_len, prp1, prp2);
+ ret = nvme_dma_prp(n, (uint8_t *)list, data_len, prp1, prp2,
+ DMA_DIRECTION_FROM_DEVICE, req);
g_free(list);
return ret;
}
-static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
+static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req)
{
- NvmeIdentify *c = (NvmeIdentify *)cmd;
+ NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
+ uint32_t nsid = le32_to_cpu(c->nsid);
+ uint64_t prp1 = le64_to_cpu(c->prp1);
+ uint64_t prp2 = le64_to_cpu(c->prp2);
+
+ uint8_t list[NVME_IDENTIFY_DATA_SIZE];
+
+ struct data {
+ struct {
+ NvmeIdNsDescr hdr;
+ uint8_t v[16];
+ } uuid;
+ };
+
+ struct data *ns_descrs = (struct data *)list;
+
+ trace_pci_nvme_identify_ns_descr_list(nsid);
+
+ if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
+ trace_pci_nvme_err_invalid_ns(nsid, n->num_namespaces);
+ return NVME_INVALID_NSID | NVME_DNR;
+ }
+
+ memset(list, 0x0, sizeof(list));
+
+ /*
+ * Because the NGUID and EUI64 fields are 0 in the Identify Namespace data
+ * structure, a Namespace UUID (nidt = 0x3) must be reported in the
+ * Namespace Identification Descriptor. Add a very basic Namespace UUID
+ * here.
+ */
+ ns_descrs->uuid.hdr.nidt = NVME_NIDT_UUID;
+ ns_descrs->uuid.hdr.nidl = NVME_NIDT_UUID_LEN;
+ stl_be_p(&ns_descrs->uuid.v, nsid);
+
+ return nvme_dma_prp(n, list, NVME_IDENTIFY_DATA_SIZE, prp1, prp2,
+ DMA_DIRECTION_FROM_DEVICE, req);
+}
+
+static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req)
+{
+ NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
switch (le32_to_cpu(c->cns)) {
case NVME_ID_CNS_NS:
- return nvme_identify_ns(n, c);
+ return nvme_identify_ns(n, req);
case NVME_ID_CNS_CTRL:
- return nvme_identify_ctrl(n, c);
+ return nvme_identify_ctrl(n, req);
case NVME_ID_CNS_NS_ACTIVE_LIST:
- return nvme_identify_nslist(n, c);
+ return nvme_identify_nslist(n, req);
+ case NVME_ID_CNS_NS_DESCR_LIST:
+ return nvme_identify_ns_descr_list(n, req);
default:
trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
return NVME_INVALID_FIELD | NVME_DNR;
}
}
+static uint16_t nvme_abort(NvmeCtrl *n, NvmeRequest *req)
+{
+ uint16_t sqid = le32_to_cpu(req->cmd.cdw10) & 0xffff;
+
+ req->cqe.result = 1;
+ if (nvme_check_sqid(n, sqid)) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ return NVME_SUCCESS;
+}
+
static inline void nvme_set_timestamp(NvmeCtrl *n, uint64_t ts)
{
trace_pci_nvme_setfeat_timestamp(ts);
@@ -793,52 +1295,150 @@ static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n)
return cpu_to_le64(ts.all);
}
-static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd)
+static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
{
- uint64_t prp1 = le64_to_cpu(cmd->prp1);
- uint64_t prp2 = le64_to_cpu(cmd->prp2);
+ NvmeCmd *cmd = &req->cmd;
+ uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
+ uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
uint64_t timestamp = nvme_get_timestamp(n);
- return nvme_dma_read_prp(n, (uint8_t *)&timestamp,
- sizeof(timestamp), prp1, prp2);
+ return nvme_dma_prp(n, (uint8_t *)&timestamp, sizeof(timestamp), prp1,
+ prp2, DMA_DIRECTION_FROM_DEVICE, req);
}
-static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
+static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req)
{
+ NvmeCmd *cmd = &req->cmd;
uint32_t dw10 = le32_to_cpu(cmd->cdw10);
+ uint32_t dw11 = le32_to_cpu(cmd->cdw11);
+ uint32_t nsid = le32_to_cpu(cmd->nsid);
uint32_t result;
+ uint8_t fid = NVME_GETSETFEAT_FID(dw10);
+ NvmeGetFeatureSelect sel = NVME_GETFEAT_SELECT(dw10);
+ uint16_t iv;
+
+ static const uint32_t nvme_feature_default[NVME_FID_MAX] = {
+ [NVME_ARBITRATION] = NVME_ARB_AB_NOLIMIT,
+ };
+
+ trace_pci_nvme_getfeat(nvme_cid(req), fid, sel, dw11);
+
+ if (!nvme_feature_support[fid]) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) {
+ if (!nsid || nsid > n->num_namespaces) {
+ /*
+ * The Reservation Notification Mask and Reservation Persistence
+ * features require a status code of Invalid Field in Command when
+ * NSID is 0xFFFFFFFF. Since the device does not support those
+ * features we can always return Invalid Namespace or Format as we
+ * should do for all other features.
+ */
+ return NVME_INVALID_NSID | NVME_DNR;
+ }
+ }
+
+ switch (sel) {
+ case NVME_GETFEAT_SELECT_CURRENT:
+ break;
+ case NVME_GETFEAT_SELECT_SAVED:
+ /* no features are saveable by the controller; fallthrough */
+ case NVME_GETFEAT_SELECT_DEFAULT:
+ goto defaults;
+ case NVME_GETFEAT_SELECT_CAP:
+ result = nvme_feature_cap[fid];
+ goto out;
+ }
+
+ switch (fid) {
+ case NVME_TEMPERATURE_THRESHOLD:
+ result = 0;
+
+ /*
+ * The controller only implements the Composite Temperature sensor, so
+ * return 0 for all other sensors.
+ */
+ if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) {
+ goto out;
+ }
- switch (dw10) {
+ switch (NVME_TEMP_THSEL(dw11)) {
+ case NVME_TEMP_THSEL_OVER:
+ result = n->features.temp_thresh_hi;
+ goto out;
+ case NVME_TEMP_THSEL_UNDER:
+ result = n->features.temp_thresh_low;
+ goto out;
+ }
+
+ return NVME_INVALID_FIELD | NVME_DNR;
case NVME_VOLATILE_WRITE_CACHE:
result = blk_enable_write_cache(n->conf.blk);
trace_pci_nvme_getfeat_vwcache(result ? "enabled" : "disabled");
+ goto out;
+ case NVME_ASYNCHRONOUS_EVENT_CONF:
+ result = n->features.async_config;
+ goto out;
+ case NVME_TIMESTAMP:
+ return nvme_get_feature_timestamp(n, req);
+ default:
+ break;
+ }
+
+defaults:
+ switch (fid) {
+ case NVME_TEMPERATURE_THRESHOLD:
+ result = 0;
+
+ if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) {
+ break;
+ }
+
+ if (NVME_TEMP_THSEL(dw11) == NVME_TEMP_THSEL_OVER) {
+ result = NVME_TEMPERATURE_WARNING;
+ }
+
break;
case NVME_NUMBER_OF_QUEUES:
- result = cpu_to_le32((n->params.max_ioqpairs - 1) |
- ((n->params.max_ioqpairs - 1) << 16));
+ result = (n->params.max_ioqpairs - 1) |
+ ((n->params.max_ioqpairs - 1) << 16);
trace_pci_nvme_getfeat_numq(result);
break;
- case NVME_TIMESTAMP:
- return nvme_get_feature_timestamp(n, cmd);
+ case NVME_INTERRUPT_VECTOR_CONF:
+ iv = dw11 & 0xffff;
+ if (iv >= n->params.max_ioqpairs + 1) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ result = iv;
+ if (iv == n->admin_cq.vector) {
+ result |= NVME_INTVC_NOCOALESCING;
+ }
+
+ break;
default:
- trace_pci_nvme_err_invalid_getfeat(dw10);
- return NVME_INVALID_FIELD | NVME_DNR;
+ result = nvme_feature_default[fid];
+ break;
}
- req->cqe.result = result;
+out:
+ req->cqe.result = cpu_to_le32(result);
return NVME_SUCCESS;
}
-static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd)
+static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
{
uint16_t ret;
uint64_t timestamp;
- uint64_t prp1 = le64_to_cpu(cmd->prp1);
- uint64_t prp2 = le64_to_cpu(cmd->prp2);
+ NvmeCmd *cmd = &req->cmd;
+ uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
+ uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
- ret = nvme_dma_write_prp(n, (uint8_t *)&timestamp,
- sizeof(timestamp), prp1, prp2);
+ ret = nvme_dma_prp(n, (uint8_t *)&timestamp, sizeof(timestamp), prp1,
+ prp2, DMA_DIRECTION_TO_DEVICE, req);
if (ret != NVME_SUCCESS) {
return ret;
}
@@ -848,16 +1448,88 @@ static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd)
return NVME_SUCCESS;
}
-static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
+static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req)
{
+ NvmeCmd *cmd = &req->cmd;
uint32_t dw10 = le32_to_cpu(cmd->cdw10);
uint32_t dw11 = le32_to_cpu(cmd->cdw11);
+ uint32_t nsid = le32_to_cpu(cmd->nsid);
+ uint8_t fid = NVME_GETSETFEAT_FID(dw10);
+ uint8_t save = NVME_SETFEAT_SAVE(dw10);
+
+ trace_pci_nvme_setfeat(nvme_cid(req), fid, save, dw11);
- switch (dw10) {
+ if (save) {
+ return NVME_FID_NOT_SAVEABLE | NVME_DNR;
+ }
+
+ if (!nvme_feature_support[fid]) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) {
+ if (!nsid || (nsid != NVME_NSID_BROADCAST &&
+ nsid > n->num_namespaces)) {
+ return NVME_INVALID_NSID | NVME_DNR;
+ }
+ } else if (nsid && nsid != NVME_NSID_BROADCAST) {
+ if (nsid > n->num_namespaces) {
+ return NVME_INVALID_NSID | NVME_DNR;
+ }
+
+ return NVME_FEAT_NOT_NS_SPEC | NVME_DNR;
+ }
+
+ if (!(nvme_feature_cap[fid] & NVME_FEAT_CAP_CHANGE)) {
+ return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR;
+ }
+
+ switch (fid) {
+ case NVME_TEMPERATURE_THRESHOLD:
+ if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) {
+ break;
+ }
+
+ switch (NVME_TEMP_THSEL(dw11)) {
+ case NVME_TEMP_THSEL_OVER:
+ n->features.temp_thresh_hi = NVME_TEMP_TMPTH(dw11);
+ break;
+ case NVME_TEMP_THSEL_UNDER:
+ n->features.temp_thresh_low = NVME_TEMP_TMPTH(dw11);
+ break;
+ default:
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ if (((n->temperature >= n->features.temp_thresh_hi) ||
+ (n->temperature <= n->features.temp_thresh_low)) &&
+ NVME_AEC_SMART(n->features.async_config) & NVME_SMART_TEMPERATURE) {
+ nvme_enqueue_event(n, NVME_AER_TYPE_SMART,
+ NVME_AER_INFO_SMART_TEMP_THRESH,
+ NVME_LOG_SMART_INFO);
+ }
+
+ break;
case NVME_VOLATILE_WRITE_CACHE:
+ if (!(dw11 & 0x1) && blk_enable_write_cache(n->conf.blk)) {
+ blk_flush(n->conf.blk);
+ }
+
blk_set_enable_write_cache(n->conf.blk, dw11 & 1);
break;
case NVME_NUMBER_OF_QUEUES:
+ if (n->qs_created) {
+ return NVME_CMD_SEQ_ERROR | NVME_DNR;
+ }
+
+ /*
+ * NVMe v1.3, Section 5.21.1.7: 0xffff is not an allowed value for NCQR
+ * and NSQR.
+ */
+ if ((dw11 & 0xffff) == 0xffff || ((dw11 >> 16) & 0xffff) == 0xffff) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
trace_pci_nvme_setfeat_numq((dw11 & 0xFFFF) + 1,
((dw11 >> 16) & 0xFFFF) + 1,
n->params.max_ioqpairs,
@@ -865,34 +1537,63 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
req->cqe.result = cpu_to_le32((n->params.max_ioqpairs - 1) |
((n->params.max_ioqpairs - 1) << 16));
break;
+ case NVME_ASYNCHRONOUS_EVENT_CONF:
+ n->features.async_config = dw11;
+ break;
case NVME_TIMESTAMP:
- return nvme_set_feature_timestamp(n, cmd);
+ return nvme_set_feature_timestamp(n, req);
default:
- trace_pci_nvme_err_invalid_setfeat(dw10);
- return NVME_INVALID_FIELD | NVME_DNR;
+ return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR;
}
return NVME_SUCCESS;
}
-static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
+static uint16_t nvme_aer(NvmeCtrl *n, NvmeRequest *req)
+{
+ trace_pci_nvme_aer(nvme_cid(req));
+
+ if (n->outstanding_aers > n->params.aerl) {
+ trace_pci_nvme_aer_aerl_exceeded();
+ return NVME_AER_LIMIT_EXCEEDED;
+ }
+
+ n->aer_reqs[n->outstanding_aers] = req;
+ n->outstanding_aers++;
+
+ if (!QTAILQ_EMPTY(&n->aer_queue)) {
+ nvme_process_aers(n);
+ }
+
+ return NVME_NO_COMPLETE;
+}
+
+static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
{
- switch (cmd->opcode) {
+ trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode);
+
+ switch (req->cmd.opcode) {
case NVME_ADM_CMD_DELETE_SQ:
- return nvme_del_sq(n, cmd);
+ return nvme_del_sq(n, req);
case NVME_ADM_CMD_CREATE_SQ:
- return nvme_create_sq(n, cmd);
+ return nvme_create_sq(n, req);
+ case NVME_ADM_CMD_GET_LOG_PAGE:
+ return nvme_get_log(n, req);
case NVME_ADM_CMD_DELETE_CQ:
- return nvme_del_cq(n, cmd);
+ return nvme_del_cq(n, req);
case NVME_ADM_CMD_CREATE_CQ:
- return nvme_create_cq(n, cmd);
+ return nvme_create_cq(n, req);
case NVME_ADM_CMD_IDENTIFY:
- return nvme_identify(n, cmd);
+ return nvme_identify(n, req);
+ case NVME_ADM_CMD_ABORT:
+ return nvme_abort(n, req);
case NVME_ADM_CMD_SET_FEATURES:
- return nvme_set_feature(n, cmd, req);
+ return nvme_set_feature(n, req);
case NVME_ADM_CMD_GET_FEATURES:
- return nvme_get_feature(n, cmd, req);
+ return nvme_get_feature(n, req);
+ case NVME_ADM_CMD_ASYNC_EV_REQ:
+ return nvme_aer(n, req);
default:
- trace_pci_nvme_err_invalid_admin_opc(cmd->opcode);
+ trace_pci_nvme_err_invalid_admin_opc(req->cmd.opcode);
return NVME_INVALID_OPCODE | NVME_DNR;
}
}
@@ -916,11 +1617,12 @@ static void nvme_process_sq(void *opaque)
req = QTAILQ_FIRST(&sq->req_list);
QTAILQ_REMOVE(&sq->req_list, req, entry);
QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry);
- memset(&req->cqe, 0, sizeof(req->cqe));
+ nvme_req_clear(req);
req->cqe.cid = cmd.cid;
+ memcpy(&req->cmd, &cmd, sizeof(NvmeCmd));
- status = sq->sqid ? nvme_io_cmd(n, &cmd, req) :
- nvme_admin_cmd(n, &cmd, req);
+ status = sq->sqid ? nvme_io_cmd(n, req) :
+ nvme_admin_cmd(n, req);
if (status != NVME_NO_COMPLETE) {
req->status = status;
nvme_enqueue_req_completion(cq, req);
@@ -945,6 +1647,16 @@ static void nvme_clear_ctrl(NvmeCtrl *n)
}
}
+ while (!QTAILQ_EMPTY(&n->aer_queue)) {
+ NvmeAsyncEvent *event = QTAILQ_FIRST(&n->aer_queue);
+ QTAILQ_REMOVE(&n->aer_queue, event, entry);
+ g_free(event);
+ }
+
+ n->aer_queued = 0;
+ n->outstanding_aers = 0;
+ n->qs_created = false;
+
blk_flush(n->conf.blk);
n->bar.cc = 0;
}
@@ -1041,6 +1753,8 @@ static int nvme_start_ctrl(NvmeCtrl *n)
nvme_set_timestamp(n, 0ULL);
+ QTAILQ_INIT(&n->aer_queue);
+
return 0;
}
@@ -1204,6 +1918,8 @@ static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size)
uint8_t *ptr = (uint8_t *)&n->bar;
uint64_t val = 0;
+ trace_pci_nvme_mmio_read(addr);
+
if (unlikely(addr & (sizeof(uint32_t) - 1))) {
NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32,
"MMIO read not 32-bit aligned,"
@@ -1260,6 +1976,26 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
"completion queue doorbell write"
" for nonexistent queue,"
" sqid=%"PRIu32", ignoring", qid);
+
+ /*
+ * NVM Express v1.3d, Section 4.1 state: "If host software writes
+ * an invalid value to the Submission Queue Tail Doorbell or
+ * Completion Queue Head Doorbell regiter and an Asynchronous Event
+ * Request command is outstanding, then an asynchronous event is
+ * posted to the Admin Completion Queue with a status code of
+ * Invalid Doorbell Write Value."
+ *
+ * Also note that the spec includes the "Invalid Doorbell Register"
+ * status code, but nowhere does it specify when to use it.
+ * However, it seems reasonable to use it here in a similar
+ * fashion.
+ */
+ if (n->outstanding_aers) {
+ nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
+ NVME_AER_INFO_ERR_INVALID_DB_REGISTER,
+ NVME_LOG_ERROR_INFO);
+ }
+
return;
}
@@ -1270,9 +2006,18 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
" beyond queue size, sqid=%"PRIu32","
" new_head=%"PRIu16", ignoring",
qid, new_head);
+
+ if (n->outstanding_aers) {
+ nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
+ NVME_AER_INFO_ERR_INVALID_DB_VALUE,
+ NVME_LOG_ERROR_INFO);
+ }
+
return;
}
+ trace_pci_nvme_mmio_doorbell_cq(cq->cqid, new_head);
+
start_sqs = nvme_cq_full(cq) ? 1 : 0;
cq->head = new_head;
if (start_sqs) {
@@ -1298,6 +2043,13 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
"submission queue doorbell write"
" for nonexistent queue,"
" sqid=%"PRIu32", ignoring", qid);
+
+ if (n->outstanding_aers) {
+ nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
+ NVME_AER_INFO_ERR_INVALID_DB_REGISTER,
+ NVME_LOG_ERROR_INFO);
+ }
+
return;
}
@@ -1308,9 +2060,18 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
" beyond queue size, sqid=%"PRIu32","
" new_tail=%"PRIu16", ignoring",
qid, new_tail);
+
+ if (n->outstanding_aers) {
+ nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
+ NVME_AER_INFO_ERR_INVALID_DB_VALUE,
+ NVME_LOG_ERROR_INFO);
+ }
+
return;
}
+ trace_pci_nvme_mmio_doorbell_sq(sq->sqid, new_tail);
+
sq->tail = new_tail;
timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
}
@@ -1320,9 +2081,12 @@ static void nvme_mmio_write(void *opaque, hwaddr addr, uint64_t data,
unsigned size)
{
NvmeCtrl *n = (NvmeCtrl *)opaque;
+
+ trace_pci_nvme_mmio_write(addr, data);
+
if (addr < sizeof(n->bar)) {
nvme_write_bar(n, addr, data, size);
- } else if (addr >= 0x1000) {
+ } else {
nvme_process_db(n, addr, data);
}
}
@@ -1415,11 +2179,15 @@ static void nvme_init_state(NvmeCtrl *n)
{
n->num_namespaces = 1;
/* add one to max_ioqpairs to account for the admin queue pair */
- n->reg_size = pow2ceil(NVME_REG_SIZE +
+ n->reg_size = pow2ceil(sizeof(NvmeBar) +
2 * (n->params.max_ioqpairs + 1) * NVME_DB_SIZE);
n->namespaces = g_new0(NvmeNamespace, n->num_namespaces);
n->sq = g_new0(NvmeSQueue *, n->params.max_ioqpairs + 1);
n->cq = g_new0(NvmeCQueue *, n->params.max_ioqpairs + 1);
+ n->temperature = NVME_TEMPERATURE;
+ n->features.temp_thresh_hi = NVME_TEMPERATURE_WARNING;
+ n->starttime_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
+ n->aer_reqs = g_new0(NvmeRequest *, n->params.aerl + 1);
}
static void nvme_init_blk(NvmeCtrl *n, Error **errp)
@@ -1459,7 +2227,7 @@ static void nvme_init_cmb(NvmeCtrl *n, PCIDevice *pci_dev)
NVME_CMBSZ_SET_SQS(n->bar.cmbsz, 1);
NVME_CMBSZ_SET_CQS(n->bar.cmbsz, 0);
- NVME_CMBSZ_SET_LISTS(n->bar.cmbsz, 0);
+ NVME_CMBSZ_SET_LISTS(n->bar.cmbsz, 1);
NVME_CMBSZ_SET_RDS(n->bar.cmbsz, 1);
NVME_CMBSZ_SET_WDS(n->bar.cmbsz, 1);
NVME_CMBSZ_SET_SZU(n->bar.cmbsz, 2); /* MBs */
@@ -1551,6 +2319,7 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
{
NvmeIdCtrl *id = &n->id_ctrl;
uint8_t *pci_conf = pci_dev->config;
+ char *subnqn;
id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
@@ -1561,13 +2330,40 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
id->ieee[0] = 0x00;
id->ieee[1] = 0x02;
id->ieee[2] = 0xb3;
+ id->mdts = n->params.mdts;
+ id->ver = cpu_to_le32(NVME_SPEC_VER);
id->oacs = cpu_to_le16(0);
- id->frmw = 7 << 1;
- id->lpa = 1 << 0;
+
+ /*
+ * Because the controller always completes the Abort command immediately,
+ * there can never be more than one concurrently executing Abort command,
+ * so this value is never used for anything. Note that there can easily be
+ * many Abort commands in the queues, but they are not considered
+ * "executing" until processed by nvme_abort.
+ *
+ * The specification recommends a value of 3 for Abort Command Limit (four
+ * concurrently outstanding Abort commands), so lets use that though it is
+ * inconsequential.
+ */
+ id->acl = 3;
+ id->aerl = n->params.aerl;
+ id->frmw = (NVME_NUM_FW_SLOTS << 1) | NVME_FRMW_SLOT1_RO;
+ id->lpa = NVME_LPA_EXTENDED;
+
+ /* recommended default value (~70 C) */
+ id->wctemp = cpu_to_le16(NVME_TEMPERATURE_WARNING);
+ id->cctemp = cpu_to_le16(NVME_TEMPERATURE_CRITICAL);
+
id->sqes = (0x6 << 4) | 0x6;
id->cqes = (0x4 << 4) | 0x4;
id->nn = cpu_to_le32(n->num_namespaces);
- id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROS | NVME_ONCS_TIMESTAMP);
+ id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROES | NVME_ONCS_TIMESTAMP |
+ NVME_ONCS_FEATURES);
+
+ subnqn = g_strdup_printf("nqn.2019-08.org.qemu:%s", n->params.serial);
+ strpadcpy((char *)id->subnqn, sizeof(id->subnqn), subnqn, '\0');
+ g_free(subnqn);
+
id->psd[0].mp = cpu_to_le16(0x9c4);
id->psd[0].enlat = cpu_to_le32(0x10);
id->psd[0].exlat = cpu_to_le32(0x4);
@@ -1582,7 +2378,7 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
NVME_CAP_SET_CSS(n->bar.cap, 1);
NVME_CAP_SET_MPSMAX(n->bar.cap, 4);
- n->bar.vs = 0x00010200;
+ n->bar.vs = NVME_SPEC_VER;
n->bar.intmc = n->bar.intms = 0;
}
@@ -1631,6 +2427,7 @@ static void nvme_exit(PCIDevice *pci_dev)
g_free(n->namespaces);
g_free(n->cq);
g_free(n->sq);
+ g_free(n->aer_reqs);
if (n->params.cmb_size_mb) {
g_free(n->cmbuf);
@@ -1651,6 +2448,9 @@ static Property nvme_props[] = {
DEFINE_PROP_UINT32("num_queues", NvmeCtrl, params.num_queues, 0),
DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl, params.max_ioqpairs, 64),
DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl, params.msix_qsize, 65),
+ DEFINE_PROP_UINT8("aerl", NvmeCtrl, params.aerl, 3),
+ DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl, params.aer_max_queued, 64),
+ DEFINE_PROP_UINT8("mdts", NvmeCtrl, params.mdts, 7),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/block/nvme.h b/hw/block/nvme.h
index 1d30c0bca2..52ba794f2e 100644
--- a/hw/block/nvme.h
+++ b/hw/block/nvme.h
@@ -9,19 +9,23 @@ typedef struct NvmeParams {
uint32_t max_ioqpairs;
uint16_t msix_qsize;
uint32_t cmb_size_mb;
+ uint8_t aerl;
+ uint32_t aer_max_queued;
+ uint8_t mdts;
} NvmeParams;
typedef struct NvmeAsyncEvent {
- QSIMPLEQ_ENTRY(NvmeAsyncEvent) entry;
+ QTAILQ_ENTRY(NvmeAsyncEvent) entry;
NvmeAerResult result;
} NvmeAsyncEvent;
typedef struct NvmeRequest {
struct NvmeSQueue *sq;
+ struct NvmeNamespace *ns;
BlockAIOCB *aiocb;
uint16_t status;
- bool has_sg;
NvmeCqe cqe;
+ NvmeCmd cmd;
BlockAcctCookie acct;
QEMUSGList qsg;
QEMUIOVector iov;
@@ -77,6 +81,14 @@ static inline uint8_t nvme_ns_lbads(NvmeNamespace *ns)
#define NVME(obj) \
OBJECT_CHECK(NvmeCtrl, (obj), TYPE_NVME)
+typedef struct NvmeFeatureVal {
+ struct {
+ uint16_t temp_thresh_hi;
+ uint16_t temp_thresh_low;
+ };
+ uint32_t async_config;
+} NvmeFeatureVal;
+
typedef struct NvmeCtrl {
PCIDevice parent_obj;
MemoryRegion iomem;
@@ -85,6 +97,7 @@ typedef struct NvmeCtrl {
BlockConf conf;
NvmeParams params;
+ bool qs_created;
uint32_t page_size;
uint16_t page_bits;
uint16_t max_prp_ents;
@@ -94,19 +107,28 @@ typedef struct NvmeCtrl {
uint32_t num_namespaces;
uint32_t max_q_ents;
uint64_t ns_size;
+ uint8_t outstanding_aers;
uint8_t *cmbuf;
uint32_t irq_status;
uint64_t host_timestamp; /* Timestamp sent by the host */
uint64_t timestamp_set_qemu_clock_ms; /* QEMU clock time */
+ uint64_t starttime_ms;
+ uint16_t temperature;
HostMemoryBackend *pmrdev;
+ uint8_t aer_mask;
+ NvmeRequest **aer_reqs;
+ QTAILQ_HEAD(, NvmeAsyncEvent) aer_queue;
+ int aer_queued;
+
NvmeNamespace *namespaces;
NvmeSQueue **sq;
NvmeCQueue **cq;
NvmeSQueue admin_sq;
NvmeCQueue admin_cq;
NvmeIdCtrl id_ctrl;
+ NvmeFeatureVal features;
} NvmeCtrl;
/* calculate the number of LBAs that the namespace can accomodate */
diff --git a/hw/block/pflash_cfi01.c b/hw/block/pflash_cfi01.c
index 8ab1d66310..f0fcd63f84 100644
--- a/hw/block/pflash_cfi01.c
+++ b/hw/block/pflash_cfi01.c
@@ -213,7 +213,6 @@ static uint32_t pflash_devid_query(PFlashCFI01 *pfl, hwaddr offset)
default:
trace_pflash_device_info(offset);
return 0;
- break;
}
/* Replicate responses for each device in bank. */
if (pfl->device_width < pfl->bank_width) {
diff --git a/hw/block/trace-events b/hw/block/trace-events
index 958fcc5508..72cf2d15cb 100644
--- a/hw/block/trace-events
+++ b/hw/block/trace-events
@@ -33,19 +33,44 @@ pci_nvme_irq_msix(uint32_t vector) "raising MSI-X IRQ vector %u"
pci_nvme_irq_pin(void) "pulsing IRQ pin"
pci_nvme_irq_masked(void) "IRQ is masked"
pci_nvme_dma_read(uint64_t prp1, uint64_t prp2) "DMA read, prp1=0x%"PRIx64" prp2=0x%"PRIx64""
+pci_nvme_map_addr(uint64_t addr, uint64_t len) "addr 0x%"PRIx64" len %"PRIu64""
+pci_nvme_map_addr_cmb(uint64_t addr, uint64_t len) "addr 0x%"PRIx64" len %"PRIu64""
+pci_nvme_map_prp(uint64_t trans_len, uint32_t len, uint64_t prp1, uint64_t prp2, int num_prps) "trans_len %"PRIu64" len %"PRIu32" prp1 0x%"PRIx64" prp2 0x%"PRIx64" num_prps %d"
+pci_nvme_io_cmd(uint16_t cid, uint32_t nsid, uint16_t sqid, uint8_t opcode) "cid %"PRIu16" nsid %"PRIu32" sqid %"PRIu16" opc 0x%"PRIx8""
+pci_nvme_admin_cmd(uint16_t cid, uint16_t sqid, uint8_t opcode) "cid %"PRIu16" sqid %"PRIu16" opc 0x%"PRIx8""
pci_nvme_rw(const char *verb, uint32_t blk_count, uint64_t byte_count, uint64_t lba) "%s %"PRIu32" blocks (%"PRIu64" bytes) from LBA %"PRIu64""
+pci_nvme_rw_cb(uint16_t cid) "cid %"PRIu16""
+pci_nvme_write_zeroes(uint16_t cid, uint64_t slba, uint32_t nlb) "cid %"PRIu16" slba %"PRIu64" nlb %"PRIu32""
pci_nvme_create_sq(uint64_t addr, uint16_t sqid, uint16_t cqid, uint16_t qsize, uint16_t qflags) "create submission queue, addr=0x%"PRIx64", sqid=%"PRIu16", cqid=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16""
pci_nvme_create_cq(uint64_t addr, uint16_t cqid, uint16_t vector, uint16_t size, uint16_t qflags, int ien) "create completion queue, addr=0x%"PRIx64", cqid=%"PRIu16", vector=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16", ien=%d"
pci_nvme_del_sq(uint16_t qid) "deleting submission queue sqid=%"PRIu16""
pci_nvme_del_cq(uint16_t cqid) "deleted completion queue, cqid=%"PRIu16""
pci_nvme_identify_ctrl(void) "identify controller"
-pci_nvme_identify_ns(uint16_t ns) "identify namespace, nsid=%"PRIu16""
-pci_nvme_identify_nslist(uint16_t ns) "identify namespace list, nsid=%"PRIu16""
+pci_nvme_identify_ns(uint32_t ns) "nsid %"PRIu32""
+pci_nvme_identify_nslist(uint32_t ns) "nsid %"PRIu32""
+pci_nvme_identify_ns_descr_list(uint32_t ns) "nsid %"PRIu32""
+pci_nvme_get_log(uint16_t cid, uint8_t lid, uint8_t lsp, uint8_t rae, uint32_t len, uint64_t off) "cid %"PRIu16" lid 0x%"PRIx8" lsp 0x%"PRIx8" rae 0x%"PRIx8" len %"PRIu32" off %"PRIu64""
+pci_nvme_getfeat(uint16_t cid, uint8_t fid, uint8_t sel, uint32_t cdw11) "cid %"PRIu16" fid 0x%"PRIx8" sel 0x%"PRIx8" cdw11 0x%"PRIx32""
+pci_nvme_setfeat(uint16_t cid, uint8_t fid, uint8_t save, uint32_t cdw11) "cid %"PRIu16" fid 0x%"PRIx8" save 0x%"PRIx8" cdw11 0x%"PRIx32""
pci_nvme_getfeat_vwcache(const char* result) "get feature volatile write cache, result=%s"
pci_nvme_getfeat_numq(int result) "get feature number of queues, result=%d"
pci_nvme_setfeat_numq(int reqcq, int reqsq, int gotcq, int gotsq) "requested cq_count=%d sq_count=%d, responding with cq_count=%d sq_count=%d"
pci_nvme_setfeat_timestamp(uint64_t ts) "set feature timestamp = 0x%"PRIx64""
pci_nvme_getfeat_timestamp(uint64_t ts) "get feature timestamp = 0x%"PRIx64""
+pci_nvme_process_aers(int queued) "queued %d"
+pci_nvme_aer(uint16_t cid) "cid %"PRIu16""
+pci_nvme_aer_aerl_exceeded(void) "aerl exceeded"
+pci_nvme_aer_masked(uint8_t type, uint8_t mask) "type 0x%"PRIx8" mask 0x%"PRIx8""
+pci_nvme_aer_post_cqe(uint8_t typ, uint8_t info, uint8_t log_page) "type 0x%"PRIx8" info 0x%"PRIx8" lid 0x%"PRIx8""
+pci_nvme_enqueue_event(uint8_t typ, uint8_t info, uint8_t log_page) "type 0x%"PRIx8" info 0x%"PRIx8" lid 0x%"PRIx8""
+pci_nvme_enqueue_event_noqueue(int queued) "queued %d"
+pci_nvme_enqueue_event_masked(uint8_t typ) "type 0x%"PRIx8""
+pci_nvme_no_outstanding_aers(void) "ignoring event; no outstanding AERs"
+pci_nvme_enqueue_req_completion(uint16_t cid, uint16_t cqid, uint16_t status) "cid %"PRIu16" cqid %"PRIu16" status 0x%"PRIx16""
+pci_nvme_mmio_read(uint64_t addr) "addr 0x%"PRIx64""
+pci_nvme_mmio_write(uint64_t addr, uint64_t data) "addr 0x%"PRIx64" data 0x%"PRIx64""
+pci_nvme_mmio_doorbell_cq(uint16_t cqid, uint16_t new_head) "cqid %"PRIu16" new_head %"PRIu16""
+pci_nvme_mmio_doorbell_sq(uint16_t sqid, uint16_t new_tail) "cqid %"PRIu16" new_tail %"PRIu16""
pci_nvme_mmio_intm_set(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask set, data=0x%"PRIx64", new_mask=0x%"PRIx64""
pci_nvme_mmio_intm_clr(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask clr, data=0x%"PRIx64", new_mask=0x%"PRIx64""
pci_nvme_mmio_cfg(uint64_t data) "wrote MMIO, config controller config=0x%"PRIx64""
@@ -60,6 +85,7 @@ pci_nvme_mmio_shutdown_set(void) "shutdown bit set"
pci_nvme_mmio_shutdown_cleared(void) "shutdown bit cleared"
# nvme traces for error conditions
+pci_nvme_err_mdts(uint16_t cid, size_t len) "cid %"PRIu16" len %zu"
pci_nvme_err_invalid_dma(void) "PRP/SGL is too small for transfer size"
pci_nvme_err_invalid_prplist_ent(uint64_t prplist) "PRP list entry is null or not page aligned: 0x%"PRIx64""
pci_nvme_err_invalid_prp2_align(uint64_t prp2) "PRP2 is not page aligned: 0x%"PRIx64""
@@ -85,6 +111,7 @@ pci_nvme_err_invalid_create_cq_qflags(uint16_t qflags) "failed creating completi
pci_nvme_err_invalid_identify_cns(uint16_t cns) "identify, invalid cns=0x%"PRIx16""
pci_nvme_err_invalid_getfeat(int dw10) "invalid get features, dw10=0x%"PRIx32""
pci_nvme_err_invalid_setfeat(uint32_t dw10) "invalid set features, dw10=0x%"PRIx32""
+pci_nvme_err_invalid_log_page(uint16_t cid, uint16_t lid) "cid %"PRIu16" lid 0x%"PRIx16""
pci_nvme_err_startfail_cq(void) "nvme_start_ctrl failed because there are non-admin completion queues"
pci_nvme_err_startfail_sq(void) "nvme_start_ctrl failed because there are non-admin submission queues"
pci_nvme_err_startfail_nbarasq(void) "nvme_start_ctrl failed because the admin submission queue address is null"
diff --git a/hw/core/sysbus.c b/hw/core/sysbus.c
index 70239b7e7d..294f90b7de 100644
--- a/hw/core/sysbus.c
+++ b/hw/core/sysbus.c
@@ -64,7 +64,7 @@ void foreach_dynamic_sysbus_device(FindSysbusDeviceFunc *func, void *opaque)
.opaque = opaque,
};
- /* Loop through all sysbus devices that were spawened outside the machine */
+ /* Loop through all sysbus devices that were spawned outside the machine */
container = container_get(qdev_get_machine(), "/peripheral");
find_sysbus_device(container, &find);
container = container_get(qdev_get_machine(), "/peripheral-anon");
@@ -199,6 +199,7 @@ void sysbus_init_mmio(SysBusDevice *dev, MemoryRegion *memory)
MemoryRegion *sysbus_mmio_get_region(SysBusDevice *dev, int n)
{
+ assert(n >= 0 && n < QDEV_MAX_MMIO);
return dev->mmio[n].memory;
}
diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c
index 212d6f5e61..02d9ed0bd4 100644
--- a/hw/display/cirrus_vga.c
+++ b/hw/display/cirrus_vga.c
@@ -1637,7 +1637,6 @@ static int cirrus_vga_read_cr(CirrusVGAState * s, unsigned reg_index)
return s->vga.cr[s->vga.cr_index];
case 0x26: // Attribute Controller Index Readback (R)
return s->vga.ar_index & 0x3f;
- break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
"cirrus: inport cr_index 0x%02x\n", reg_index);
diff --git a/hw/display/qxl-logger.c b/hw/display/qxl-logger.c
index 2ec6d8fa3d..c15175bce3 100644
--- a/hw/display/qxl-logger.c
+++ b/hw/display/qxl-logger.c
@@ -161,7 +161,6 @@ static int qxl_log_cmd_draw(PCIQXLDevice *qxl, QXLDrawable *draw, int group_id)
switch (draw->type) {
case QXL_DRAW_COPY:
return qxl_log_cmd_draw_copy(qxl, &draw->u.copy, group_id);
- break;
}
return 0;
}
@@ -180,7 +179,6 @@ static int qxl_log_cmd_draw_compat(PCIQXLDevice *qxl, QXLCompatDrawable *draw,
switch (draw->type) {
case QXL_DRAW_COPY:
return qxl_log_cmd_draw_copy(qxl, &draw->u.copy, group_id);
- break;
}
return 0;
}
diff --git a/hw/display/vga.c b/hw/display/vga.c
index 061fd9ab8f..836ad50c7b 100644
--- a/hw/display/vga.c
+++ b/hw/display/vga.c
@@ -1674,7 +1674,6 @@ static void vga_draw_graphic(VGACommonState *s, int full_update)
if (!(s->cr[VGA_CRTC_MODE] & 2)) {
addr = (addr & ~0x8000) | ((y1 & 2) << 14);
}
- update = full_update;
page0 = addr & s->vbe_size_mask;
page1 = (addr + bwidth - 1) & s->vbe_size_mask;
if (full_update) {
diff --git a/hw/gpio/max7310.c b/hw/gpio/max7310.c
index bebb4030d2..4f78774dc8 100644
--- a/hw/gpio/max7310.c
+++ b/hw/gpio/max7310.c
@@ -51,11 +51,9 @@ static uint8_t max7310_rx(I2CSlave *i2c)
switch (s->command) {
case 0x00: /* Input port */
return s->level ^ s->polarity;
- break;
case 0x01: /* Output port */
return s->level & ~s->direction;
- break;
case 0x02: /* Polarity inversion */
return s->polarity;
@@ -65,7 +63,6 @@ static uint8_t max7310_rx(I2CSlave *i2c)
case 0x04: /* Timeout */
return s->status;
- break;
case 0xff: /* Reserved */
return 0xff;
diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
index 18411f1dec..74a93a5d93 100644
--- a/hw/i386/amd_iommu.c
+++ b/hw/i386/amd_iommu.c
@@ -1600,7 +1600,7 @@ static void amdvi_instance_init(Object *klass)
static void amdvi_class_init(ObjectClass *klass, void* data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- X86IOMMUClass *dc_class = X86_IOMMU_CLASS(klass);
+ X86IOMMUClass *dc_class = X86_IOMMU_DEVICE_CLASS(klass);
dc->reset = amdvi_reset;
dc->vmsd = &vmstate_amdvi;
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 5284bb68b6..749eb6ad63 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -3168,7 +3168,6 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
index, entry->irte.sid_vtype);
/* Take this as verification failure. */
return -VTD_FR_IR_SID_ERR;
- break;
}
}
@@ -3854,7 +3853,7 @@ static void vtd_realize(DeviceState *dev, Error **errp)
static void vtd_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- X86IOMMUClass *x86_class = X86_IOMMU_CLASS(klass);
+ X86IOMMUClass *x86_class = X86_IOMMU_DEVICE_CLASS(klass);
dc->reset = vtd_reset;
dc->vmsd = &vtd_vmstate;
diff --git a/hw/i386/kvm/ioapic.c b/hw/i386/kvm/ioapic.c
index 4ba8e47251..c5528df942 100644
--- a/hw/i386/kvm/ioapic.c
+++ b/hw/i386/kvm/ioapic.c
@@ -97,7 +97,7 @@ static void kvm_ioapic_put(IOAPICCommonState *s)
ret = kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, &chip);
if (ret < 0) {
- fprintf(stderr, "KVM_GET_IRQCHIP failed: %s\n", strerror(ret));
+ fprintf(stderr, "KVM_SET_IRQCHIP failed: %s\n", strerror(ret));
abort();
}
}
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 5d8d5ef8b3..d11daacc23 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -1501,8 +1501,6 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev,
init_topo_info(&topo_info, x86ms);
env->nr_dies = x86ms->smp_dies;
- env->nr_nodes = topo_info.nodes_per_pkg;
- env->pkg_offset = x86ms->apicid_pkg_offset(&topo_info);
/*
* If APIC ID is not set,
@@ -1557,14 +1555,14 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev,
topo_ids.die_id = cpu->die_id;
topo_ids.core_id = cpu->core_id;
topo_ids.smt_id = cpu->thread_id;
- cpu->apic_id = x86ms->apicid_from_topo_ids(&topo_info, &topo_ids);
+ cpu->apic_id = x86_apicid_from_topo_ids(&topo_info, &topo_ids);
}
cpu_slot = pc_find_cpu_slot(MACHINE(pcms), cpu->apic_id, &idx);
if (!cpu_slot) {
MachineState *ms = MACHINE(pcms);
- x86ms->topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
+ x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
error_setg(errp,
"Invalid CPU [socket: %u, die: %u, core: %u, thread: %u] with"
" APIC ID %" PRIu32 ", valid index range 0:%d",
@@ -1585,7 +1583,7 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev,
/* TODO: move socket_id/core_id/thread_id checks into x86_cpu_realizefn()
* once -smp refactoring is complete and there will be CPU private
* CPUState::nr_cores and CPUState::nr_threads fields instead of globals */
- x86ms->topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
+ x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
if (cpu->socket_id != -1 && cpu->socket_id != topo_ids.pkg_id) {
error_setg(errp, "property socket-id: %u doesn't match set apic-id:"
" 0x%x (socket-id: %u)", cpu->socket_id, cpu->apic_id,
diff --git a/hw/i386/x86-iommu.c b/hw/i386/x86-iommu.c
index 4104060e68..5f4301639c 100644
--- a/hw/i386/x86-iommu.c
+++ b/hw/i386/x86-iommu.c
@@ -107,7 +107,7 @@ IommuType x86_iommu_get_type(void)
static void x86_iommu_realize(DeviceState *dev, Error **errp)
{
X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev);
- X86IOMMUClass *x86_class = X86_IOMMU_GET_CLASS(dev);
+ X86IOMMUClass *x86_class = X86_IOMMU_DEVICE_GET_CLASS(dev);
MachineState *ms = MACHINE(qdev_get_machine());
MachineClass *mc = MACHINE_GET_CLASS(ms);
PCMachineState *pcms =
diff --git a/hw/i386/x86.c b/hw/i386/x86.c
index cf384b9743..c1954db152 100644
--- a/hw/i386/x86.c
+++ b/hw/i386/x86.c
@@ -62,29 +62,12 @@ inline void init_topo_info(X86CPUTopoInfo *topo_info,
{
MachineState *ms = MACHINE(x86ms);
- topo_info->nodes_per_pkg = ms->numa_state->num_nodes / ms->smp.sockets;
topo_info->dies_per_pkg = x86ms->smp_dies;
topo_info->cores_per_die = ms->smp.cores;
topo_info->threads_per_core = ms->smp.threads;
}
/*
- * Set up with the new EPYC topology handlers
- *
- * AMD uses different apic id encoding for EPYC based cpus. Override
- * the default topo handlers with EPYC encoding handlers.
- */
-static void x86_set_epyc_topo_handlers(MachineState *machine)
-{
- X86MachineState *x86ms = X86_MACHINE(machine);
-
- x86ms->apicid_from_cpu_idx = x86_apicid_from_cpu_idx_epyc;
- x86ms->topo_ids_from_apicid = x86_topo_ids_from_apicid_epyc;
- x86ms->apicid_from_topo_ids = x86_apicid_from_topo_ids_epyc;
- x86ms->apicid_pkg_offset = apicid_pkg_offset_epyc;
-}
-
-/*
* Calculates initial APIC ID for a specific CPU index
*
* Currently we need to be able to calculate the APIC ID from the CPU index
@@ -102,7 +85,7 @@ uint32_t x86_cpu_apic_id_from_index(X86MachineState *x86ms,
init_topo_info(&topo_info, x86ms);
- correct_id = x86ms->apicid_from_cpu_idx(&topo_info, cpu_index);
+ correct_id = x86_apicid_from_cpu_idx(&topo_info, cpu_index);
if (x86mc->compat_apic_id_mode) {
if (cpu_index != correct_id && !warned && !qtest_enabled()) {
error_report("APIC IDs set in compatibility mode, "
@@ -136,11 +119,6 @@ void x86_cpus_init(X86MachineState *x86ms, int default_cpu_version)
MachineState *ms = MACHINE(x86ms);
MachineClass *mc = MACHINE_GET_CLASS(x86ms);
- /* Check for apicid encoding */
- if (cpu_x86_use_epyc_apic_id_encoding(ms->cpu_type)) {
- x86_set_epyc_topo_handlers(ms);
- }
-
x86_cpu_set_default_version(default_cpu_version);
/*
@@ -154,12 +132,6 @@ void x86_cpus_init(X86MachineState *x86ms, int default_cpu_version)
x86ms->apic_id_limit = x86_cpu_apic_id_from_index(x86ms,
ms->smp.max_cpus - 1) + 1;
possible_cpus = mc->possible_cpu_arch_ids(ms);
-
- for (i = 0; i < ms->possible_cpus->len; i++) {
- ms->possible_cpus->cpus[i].arch_id =
- x86_cpu_apic_id_from_index(x86ms, i);
- }
-
for (i = 0; i < ms->smp.cpus; i++) {
x86_cpu_new(x86ms, possible_cpus->cpus[i].arch_id, &error_fatal);
}
@@ -184,7 +156,8 @@ int64_t x86_get_default_cpu_node_id(const MachineState *ms, int idx)
init_topo_info(&topo_info, x86ms);
assert(idx < ms->possible_cpus->len);
- x86_topo_ids_from_idx(&topo_info, idx, &topo_ids);
+ x86_topo_ids_from_apicid(ms->possible_cpus->cpus[idx].arch_id,
+ &topo_info, &topo_ids);
return topo_ids.pkg_id % ms->numa_state->num_nodes;
}
@@ -215,7 +188,10 @@ const CPUArchIdList *x86_possible_cpu_arch_ids(MachineState *ms)
ms->possible_cpus->cpus[i].type = ms->cpu_type;
ms->possible_cpus->cpus[i].vcpus_count = 1;
- x86_topo_ids_from_idx(&topo_info, i, &topo_ids);
+ ms->possible_cpus->cpus[i].arch_id =
+ x86_cpu_apic_id_from_index(x86ms, i);
+ x86_topo_ids_from_apicid(ms->possible_cpus->cpus[i].arch_id,
+ &topo_info, &topo_ids);
ms->possible_cpus->cpus[i].props.has_socket_id = true;
ms->possible_cpus->cpus[i].props.socket_id = topo_ids.pkg_id;
if (x86ms->smp_dies > 1) {
@@ -919,11 +895,6 @@ static void x86_machine_initfn(Object *obj)
x86ms->smm = ON_OFF_AUTO_AUTO;
x86ms->acpi = ON_OFF_AUTO_AUTO;
x86ms->smp_dies = 1;
-
- x86ms->apicid_from_cpu_idx = x86_apicid_from_cpu_idx;
- x86ms->topo_ids_from_apicid = x86_topo_ids_from_apicid;
- x86ms->apicid_from_topo_ids = x86_apicid_from_topo_ids;
- x86ms->apicid_pkg_offset = apicid_pkg_offset;
}
static void x86_machine_class_init(ObjectClass *oc, void *data)
diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c
index 009120f88b..b696c6291a 100644
--- a/hw/ide/ahci.c
+++ b/hw/ide/ahci.c
@@ -1151,7 +1151,7 @@ static void process_ncq_command(AHCIState *s, int port, uint8_t *cmd_fis,
if (!ncq_tfs->sector_count) {
ncq_tfs->sector_count = 0x10000;
}
- size = ncq_tfs->sector_count * 512;
+ size = ncq_tfs->sector_count * BDRV_SECTOR_SIZE;
ahci_populate_sglist(ad, &ncq_tfs->sglist, ncq_tfs->cmdh, size, 0);
if (ncq_tfs->sglist.size < size) {
@@ -1703,7 +1703,8 @@ static int ahci_state_post_load(void *opaque, int version_id)
return -1;
}
ahci_populate_sglist(ncq_tfs->drive, &ncq_tfs->sglist,
- ncq_tfs->cmdh, ncq_tfs->sector_count * 512,
+ ncq_tfs->cmdh,
+ ncq_tfs->sector_count * BDRV_SECTOR_SIZE,
0);
if (ncq_tfs->sector_count != ncq_tfs->sglist.size >> 9) {
return -1;
diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c
index 17a9d635d8..14a2b0bb2f 100644
--- a/hw/ide/atapi.c
+++ b/hw/ide/atapi.c
@@ -824,9 +824,9 @@ static void cmd_get_configuration(IDEState *s, uint8_t *buf)
*
* Only a problem if the feature/profiles grow.
*/
- if (max_len > 512) {
+ if (max_len > BDRV_SECTOR_SIZE) {
/* XXX: assume 1 sector */
- max_len = 512;
+ max_len = BDRV_SECTOR_SIZE;
}
memset(buf, 0, max_len);
@@ -1186,8 +1186,8 @@ static void cmd_read_dvd_structure(IDEState *s, uint8_t* buf)
}
}
- memset(buf, 0, max_len > IDE_DMA_BUF_SECTORS * 512 + 4 ?
- IDE_DMA_BUF_SECTORS * 512 + 4 : max_len);
+ memset(buf, 0, max_len > IDE_DMA_BUF_SECTORS * BDRV_SECTOR_SIZE + 4 ?
+ IDE_DMA_BUF_SECTORS * BDRV_SECTOR_SIZE + 4 : max_len);
switch (format) {
case 0x00 ... 0x7f:
diff --git a/hw/ide/core.c b/hw/ide/core.c
index d997a78e47..f76f7e5234 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -709,7 +709,7 @@ void ide_cancel_dma_sync(IDEState *s)
/*
* We can't cancel Scatter Gather DMA in the middle of the
* operation or a partial (not full) DMA transfer would reach
- * the storage so we wait for completion instead (we beahve
+ * the storage so we wait for completion instead (we behave
* like if the DMA was completed by the time the guest trying
* to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
* set).
diff --git a/hw/ide/pci.c b/hw/ide/pci.c
index 5e85c4ad17..b50091b615 100644
--- a/hw/ide/pci.c
+++ b/hw/ide/pci.c
@@ -138,7 +138,7 @@ static int32_t bmdma_prepare_buf(const IDEDMA *dma, int32_t limit)
int l, len;
pci_dma_sglist_init(&s->sg, pci_dev,
- s->nsector / (BMDMA_PAGE_SIZE / 512) + 1);
+ s->nsector / (BMDMA_PAGE_SIZE / BDRV_SECTOR_SIZE) + 1);
s->io_buffer_size = 0;
for(;;) {
if (bm->cur_prd_len == 0) {
diff --git a/hw/input/pxa2xx_keypad.c b/hw/input/pxa2xx_keypad.c
index 62aa6f6b15..7f2f739fb3 100644
--- a/hw/input/pxa2xx_keypad.c
+++ b/hw/input/pxa2xx_keypad.c
@@ -192,10 +192,8 @@ static uint64_t pxa2xx_keypad_read(void *opaque, hwaddr offset,
s->kpc &= ~(KPC_DI);
qemu_irq_lower(s->irq);
return tmp;
- break;
case KPDK:
return s->kpdk;
- break;
case KPREC:
tmp = s->kprec;
if(tmp & KPREC_OF1)
@@ -207,31 +205,23 @@ static uint64_t pxa2xx_keypad_read(void *opaque, hwaddr offset,
if(tmp & KPREC_UF0)
s->kprec &= ~(KPREC_UF0);
return tmp;
- break;
case KPMK:
tmp = s->kpmk;
if(tmp & KPMK_MKP)
s->kpmk &= ~(KPMK_MKP);
return tmp;
- break;
case KPAS:
return s->kpas;
- break;
case KPASMKP0:
return s->kpasmkp[0];
- break;
case KPASMKP1:
return s->kpasmkp[1];
- break;
case KPASMKP2:
return s->kpasmkp[2];
- break;
case KPASMKP3:
return s->kpasmkp[3];
- break;
case KPKDI:
return s->kpkdi;
- break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
"%s: Bad read offset 0x%"HWADDR_PRIx"\n",
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
index 277a98b87b..7876c1ba07 100644
--- a/hw/intc/armv7m_nvic.c
+++ b/hw/intc/armv7m_nvic.c
@@ -1290,7 +1290,6 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
case 0xd90: /* MPU_TYPE */
/* Unified MPU; if the MPU is not present this value is zero */
return cpu->pmsav7_dregion << 8;
- break;
case 0xd94: /* MPU_CTRL */
return cpu->env.v7m.mpu_ctrl[attrs.secure];
case 0xd98: /* MPU_RNR */
diff --git a/hw/intc/exynos4210_combiner.c b/hw/intc/exynos4210_combiner.c
index b8561e4180..59dd27fb16 100644
--- a/hw/intc/exynos4210_combiner.c
+++ b/hw/intc/exynos4210_combiner.c
@@ -229,7 +229,6 @@ exynos4210_combiner_read(void *opaque, hwaddr offset, unsigned size)
TARGET_FMT_plx "offset\n", offset);
}
val = s->reg_set[offset >> 2];
- return 0;
}
return val;
}
diff --git a/hw/isa/isa-superio.c b/hw/isa/isa-superio.c
index e2e47d8fd9..179c185695 100644
--- a/hw/isa/isa-superio.c
+++ b/hw/isa/isa-superio.c
@@ -158,8 +158,8 @@ static void isa_superio_realize(DeviceState *dev, Error **errp)
if (k->ide.get_irq) {
qdev_prop_set_uint32(d, "irq", k->ide.get_irq(sio, 0));
}
- isa_realize_and_unref(isa, bus, &error_fatal);
object_property_add_child(OBJECT(sio), "isa-ide", OBJECT(isa));
+ isa_realize_and_unref(isa, bus, &error_fatal);
sio->ide = isa;
trace_superio_create_ide(0,
k->ide.get_iobase ?
diff --git a/hw/misc/imx_ccm.c b/hw/misc/imx_ccm.c
index 2f81b0ad73..52882071d3 100644
--- a/hw/misc/imx_ccm.c
+++ b/hw/misc/imx_ccm.c
@@ -32,7 +32,7 @@
uint32_t imx_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock)
{
uint32_t freq = 0;
- IMXCCMClass *klass = IMX_GET_CLASS(dev);
+ IMXCCMClass *klass = IMX_CCM_GET_CLASS(dev);
if (klass->get_clock_frequency) {
freq = klass->get_clock_frequency(dev, clock);
diff --git a/hw/misc/mac_via.c b/hw/misc/mac_via.c
index d76d7b28d3..6db62dab7d 100644
--- a/hw/misc/mac_via.c
+++ b/hw/misc/mac_via.c
@@ -328,7 +328,7 @@ static void via1_VBL(void *opaque)
{
MOS6522Q800VIA1State *v1s = opaque;
MOS6522State *s = MOS6522(v1s);
- MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(s);
+ MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s);
s->ifr |= VIA1_IRQ_VBLANK;
mdc->update_irq(s);
@@ -340,7 +340,7 @@ static void via1_one_second(void *opaque)
{
MOS6522Q800VIA1State *v1s = opaque;
MOS6522State *s = MOS6522(v1s);
- MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(s);
+ MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s);
s->ifr |= VIA1_IRQ_ONE_SECOND;
mdc->update_irq(s);
@@ -352,7 +352,7 @@ static void via1_irq_request(void *opaque, int irq, int level)
{
MOS6522Q800VIA1State *v1s = opaque;
MOS6522State *s = MOS6522(v1s);
- MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(s);
+ MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s);
if (level) {
s->ifr |= 1 << irq;
@@ -367,7 +367,7 @@ static void via2_irq_request(void *opaque, int irq, int level)
{
MOS6522Q800VIA2State *v2s = opaque;
MOS6522State *s = MOS6522(v2s);
- MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(s);
+ MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s);
if (level) {
s->ifr |= 1 << irq;
@@ -1183,7 +1183,7 @@ static TypeInfo mac_via_info = {
static void mos6522_q800_via1_reset(DeviceState *dev)
{
MOS6522State *ms = MOS6522(dev);
- MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(ms);
+ MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms);
mdc->parent_reset(dev);
@@ -1226,7 +1226,7 @@ static void mos6522_q800_via2_portB_write(MOS6522State *s)
static void mos6522_q800_via2_reset(DeviceState *dev)
{
MOS6522State *ms = MOS6522(dev);
- MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(ms);
+ MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms);
mdc->parent_reset(dev);
@@ -1246,7 +1246,7 @@ static void mos6522_q800_via2_init(Object *obj)
static void mos6522_q800_via2_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
- MOS6522DeviceClass *mdc = MOS6522_DEVICE_CLASS(oc);
+ MOS6522DeviceClass *mdc = MOS6522_CLASS(oc);
dc->reset = mos6522_q800_via2_reset;
mdc->portB_write = mos6522_q800_via2_portB_write;
diff --git a/hw/misc/macio/cuda.c b/hw/misc/macio/cuda.c
index 286e7a55f4..edbd4186b2 100644
--- a/hw/misc/macio/cuda.c
+++ b/hw/misc/macio/cuda.c
@@ -96,7 +96,7 @@ static void cuda_set_sr_int(void *opaque)
CUDAState *s = opaque;
MOS6522CUDAState *mcs = &s->mos6522_cuda;
MOS6522State *ms = MOS6522(mcs);
- MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(ms);
+ MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms);
mdc->set_sr_int(ms);
}
@@ -592,7 +592,7 @@ static void mos6522_cuda_portB_write(MOS6522State *s)
static void mos6522_cuda_reset(DeviceState *dev)
{
MOS6522State *ms = MOS6522(dev);
- MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(ms);
+ MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms);
mdc->parent_reset(dev);
@@ -603,7 +603,7 @@ static void mos6522_cuda_reset(DeviceState *dev)
static void mos6522_cuda_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
- MOS6522DeviceClass *mdc = MOS6522_DEVICE_CLASS(oc);
+ MOS6522DeviceClass *mdc = MOS6522_CLASS(oc);
dc->reset = mos6522_cuda_reset;
mdc->portB_write = mos6522_cuda_portB_write;
diff --git a/hw/misc/macio/pmu.c b/hw/misc/macio/pmu.c
index 09022995ad..71924d4768 100644
--- a/hw/misc/macio/pmu.c
+++ b/hw/misc/macio/pmu.c
@@ -75,7 +75,7 @@ static void via_set_sr_int(void *opaque)
PMUState *s = opaque;
MOS6522PMUState *mps = MOS6522_PMU(&s->mos6522_pmu);
MOS6522State *ms = MOS6522(mps);
- MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(ms);
+ MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms);
mdc->set_sr_int(ms);
}
@@ -834,7 +834,7 @@ static void mos6522_pmu_reset(DeviceState *dev)
MOS6522State *ms = MOS6522(dev);
MOS6522PMUState *mps = container_of(ms, MOS6522PMUState, parent_obj);
PMUState *s = container_of(mps, PMUState, mos6522_pmu);
- MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(ms);
+ MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms);
mdc->parent_reset(dev);
@@ -847,7 +847,7 @@ static void mos6522_pmu_reset(DeviceState *dev)
static void mos6522_pmu_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
- MOS6522DeviceClass *mdc = MOS6522_DEVICE_CLASS(oc);
+ MOS6522DeviceClass *mdc = MOS6522_CLASS(oc);
dc->reset = mos6522_pmu_reset;
mdc->portB_write = mos6522_pmu_portB_write;
diff --git a/hw/misc/meson.build b/hw/misc/meson.build
index 84fed0494d..e1576b81cf 100644
--- a/hw/misc/meson.build
+++ b/hw/misc/meson.build
@@ -97,3 +97,5 @@ specific_ss.add(when: 'CONFIG_MAC_VIA', if_true: files('mac_via.c'))
specific_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('mips_cmgcr.c', 'mips_cpc.c'))
specific_ss.add(when: 'CONFIG_MIPS_ITU', if_true: files('mips_itu.c'))
+
+specific_ss.add(when: 'CONFIG_SBSA_REF', if_true: files('sbsa_ec.c'))
diff --git a/hw/misc/mos6522.c b/hw/misc/mos6522.c
index 19e154b870..ac4cd1d58e 100644
--- a/hw/misc/mos6522.c
+++ b/hw/misc/mos6522.c
@@ -54,7 +54,7 @@ static void mos6522_update_irq(MOS6522State *s)
static uint64_t get_counter_value(MOS6522State *s, MOS6522Timer *ti)
{
- MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(s);
+ MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s);
if (ti->index == 0) {
return mdc->get_timer1_counter_value(s, ti);
@@ -65,7 +65,7 @@ static uint64_t get_counter_value(MOS6522State *s, MOS6522Timer *ti)
static uint64_t get_load_time(MOS6522State *s, MOS6522Timer *ti)
{
- MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(s);
+ MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s);
if (ti->index == 0) {
return mdc->get_timer1_load_time(s, ti);
@@ -313,7 +313,7 @@ uint64_t mos6522_read(void *opaque, hwaddr addr, unsigned size)
void mos6522_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
{
MOS6522State *s = opaque;
- MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(s);
+ MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s);
trace_mos6522_write(addr, val);
@@ -498,7 +498,7 @@ static Property mos6522_properties[] = {
static void mos6522_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
- MOS6522DeviceClass *mdc = MOS6522_DEVICE_CLASS(oc);
+ MOS6522DeviceClass *mdc = MOS6522_CLASS(oc);
dc->reset = mos6522_reset;
dc->vmsd = &vmstate_mos6522;
diff --git a/hw/misc/sbsa_ec.c b/hw/misc/sbsa_ec.c
new file mode 100644
index 0000000000..9a7d7f914a
--- /dev/null
+++ b/hw/misc/sbsa_ec.c
@@ -0,0 +1,98 @@
+/*
+ * ARM SBSA Reference Platform Embedded Controller
+ *
+ * A device to allow PSCI running in the secure side of sbsa-ref machine
+ * to communicate platform power states to qemu.
+ *
+ * Copyright (c) 2020 Nuvia Inc
+ * Written by Graeme Gregory <graeme@nuviainc.com>
+ *
+ * SPDX-License-Identifer: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "qemu/log.h"
+#include "hw/sysbus.h"
+#include "sysemu/runstate.h"
+
+typedef struct {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+} SECUREECState;
+
+#define TYPE_SBSA_EC "sbsa-ec"
+#define SECURE_EC(obj) OBJECT_CHECK(SECUREECState, (obj), TYPE_SBSA_EC)
+
+enum sbsa_ec_powerstates {
+ SBSA_EC_CMD_POWEROFF = 0x01,
+ SBSA_EC_CMD_REBOOT = 0x02,
+};
+
+static uint64_t sbsa_ec_read(void *opaque, hwaddr offset, unsigned size)
+{
+ /* No use for this currently */
+ qemu_log_mask(LOG_GUEST_ERROR, "sbsa-ec: no readable registers");
+ return 0;
+}
+
+static void sbsa_ec_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ if (offset == 0) { /* PSCI machine power command register */
+ switch (value) {
+ case SBSA_EC_CMD_POWEROFF:
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+ break;
+ case SBSA_EC_CMD_REBOOT:
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "sbsa-ec: unknown power command");
+ }
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "sbsa-ec: unknown EC register");
+ }
+}
+
+static const MemoryRegionOps sbsa_ec_ops = {
+ .read = sbsa_ec_read,
+ .write = sbsa_ec_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+};
+
+static void sbsa_ec_init(Object *obj)
+{
+ SECUREECState *s = SECURE_EC(obj);
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
+
+ memory_region_init_io(&s->iomem, obj, &sbsa_ec_ops, s, "sbsa-ec",
+ 0x1000);
+ sysbus_init_mmio(dev, &s->iomem);
+}
+
+static void sbsa_ec_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ /* No vmstate or reset required: device has no internal state */
+ dc->user_creatable = false;
+}
+
+static const TypeInfo sbsa_ec_info = {
+ .name = TYPE_SBSA_EC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SECUREECState),
+ .instance_init = sbsa_ec_init,
+ .class_init = sbsa_ec_class_init,
+};
+
+static void sbsa_ec_register_type(void)
+{
+ type_register_static(&sbsa_ec_info);
+}
+
+type_init(sbsa_ec_register_type);
diff --git a/hw/net/can/can_sja1000.c b/hw/net/can/can_sja1000.c
index ea915a023a..299932998a 100644
--- a/hw/net/can/can_sja1000.c
+++ b/hw/net/can/can_sja1000.c
@@ -523,6 +523,7 @@ void can_sja_mem_write(CanSJA1000State *s, hwaddr addr, uint64_t val,
break;
case 16: /* RX frame information addr16-28. */
s->status_pel |= (1 << 5); /* Set transmit status. */
+ /* fallthrough */
case 17 ... 28:
if (s->mode & 0x01) { /* Reset mode */
if (addr < 24) {
@@ -620,6 +621,7 @@ void can_sja_mem_write(CanSJA1000State *s, hwaddr addr, uint64_t val,
break;
case 10:
s->status_bas |= (1 << 5); /* Set transmit status. */
+ /* fallthrough */
case 11 ... 19:
if ((s->control & 0x01) == 0) { /* Operation mode */
s->tx_buff[addr - 10] = val; /* Store to TX buffer directly. */
diff --git a/hw/net/lan9118.c b/hw/net/lan9118.c
index 8e2a432179..e35f00fb9f 100644
--- a/hw/net/lan9118.c
+++ b/hw/net/lan9118.c
@@ -931,10 +931,8 @@ static uint32_t do_mac_read(lan9118_state *s, int reg)
| (s->conf.macaddr.a[2] << 16) | (s->conf.macaddr.a[3] << 24);
case MAC_HASHH:
return s->mac_hashh;
- break;
case MAC_HASHL:
return s->mac_hashl;
- break;
case MAC_MII_ACC:
return s->mac_mii_acc;
case MAC_MII_DATA:
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index a1fe9e9285..cb0d27084c 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -2075,7 +2075,6 @@ static int virtio_net_rsc_tcp_ctrl_check(VirtioNetRscChain *chain,
tcp_flag = htons(tcp->th_offset_flags);
tcp_hdr = (tcp_flag & VIRTIO_NET_TCP_HDR_LENGTH) >> 10;
tcp_flag &= VIRTIO_NET_TCP_FLAG;
- tcp_flag = htons(tcp->th_offset_flags) & 0x3F;
if (tcp_flag & TH_SYN) {
chain->stat.tcp_syn++;
return RSC_BYPASS;
diff --git a/hw/net/xilinx_axienet.c b/hw/net/xilinx_axienet.c
index 1e48eb70c9..2e89f236b4 100644
--- a/hw/net/xilinx_axienet.c
+++ b/hw/net/xilinx_axienet.c
@@ -54,7 +54,6 @@
TYPE_XILINX_AXI_ENET_CONTROL_STREAM)
/* Advertisement control register. */
-#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
@@ -169,28 +168,6 @@ tdk_init(struct PHY *phy)
}
struct MDIOBus {
- /* bus. */
- int mdc;
- int mdio;
-
- /* decoder. */
- enum {
- PREAMBLE,
- SOF,
- OPC,
- ADDR,
- REQ,
- TURNAROUND,
- DATA
- } state;
- unsigned int drive;
-
- unsigned int cnt;
- unsigned int addr;
- unsigned int opc;
- unsigned int req;
- unsigned int data;
-
struct PHY *devs[32];
};
diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c
index 3254aadb6e..77b1235a3f 100644
--- a/hw/rdma/vmw/pvrdma_main.c
+++ b/hw/rdma/vmw/pvrdma_main.c
@@ -681,7 +681,7 @@ static void pvrdma_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
- RdmaProviderClass *ir = INTERFACE_RDMA_PROVIDER_CLASS(klass);
+ RdmaProviderClass *ir = RDMA_PROVIDER_CLASS(klass);
k->realize = pvrdma_realize;
k->vendor_id = PCI_VENDOR_ID_VMWARE;
diff --git a/hw/rx/rx-gdbsim.c b/hw/rx/rx-gdbsim.c
index 54992ebe57..6914de2e59 100644
--- a/hw/rx/rx-gdbsim.c
+++ b/hw/rx/rx-gdbsim.c
@@ -118,7 +118,7 @@ static void rx_gdbsim_init(MachineState *machine)
* the latter half of the SDRAM space.
*/
kernel_offset = machine->ram_size / 2;
- rx_load_image(RXCPU(first_cpu), kernel_filename,
+ rx_load_image(RX_CPU(first_cpu), kernel_filename,
SDRAM_BASE + kernel_offset, kernel_offset);
if (dtb_filename) {
ram_addr_t dtb_offset;
@@ -141,7 +141,7 @@ static void rx_gdbsim_init(MachineState *machine)
rom_add_blob_fixed("dtb", dtb, dtb_size,
SDRAM_BASE + dtb_offset);
/* Set dtb address to R1 */
- RXCPU(first_cpu)->env.regs[1] = SDRAM_BASE + dtb_offset;
+ RX_CPU(first_cpu)->env.regs[1] = SDRAM_BASE + dtb_offset;
}
}
}
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index 0e60270297..8feb3451a0 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -1237,6 +1237,7 @@ static const TypeInfo virtio_ccw_bus_info = {
.name = TYPE_VIRTIO_CCW_BUS,
.parent = TYPE_VIRTIO_BUS,
.instance_size = sizeof(VirtioCcwBusState),
+ .class_size = sizeof(VirtioCcwBusClass),
.class_init = virtio_ccw_bus_class_init,
};
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index 8ce68a9dd6..7612035a4e 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -71,7 +71,7 @@ typedef struct SCSIDiskClass {
typedef struct SCSIDiskReq {
SCSIRequest req;
- /* Both sector and sector_count are in terms of qemu 512 byte blocks. */
+ /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */
uint64_t sector;
uint32_t sector_count;
uint32_t buflen;
@@ -141,7 +141,7 @@ static void scsi_init_iovec(SCSIDiskReq *r, size_t size)
r->buflen = size;
r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
}
- r->iov.iov_len = MIN(r->sector_count * 512, r->buflen);
+ r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen);
qemu_iovec_init_external(&r->qiov, &r->iov, 1);
}
@@ -311,7 +311,7 @@ static void scsi_read_complete_noio(SCSIDiskReq *r, int ret)
goto done;
}
- n = r->qiov.size / 512;
+ n = r->qiov.size / BDRV_SECTOR_SIZE;
r->sector += n;
r->sector_count -= n;
scsi_req_data(&r->req, r->qiov.size);
@@ -505,7 +505,7 @@ static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
goto done;
}
- n = r->qiov.size / 512;
+ n = r->qiov.size / BDRV_SECTOR_SIZE;
r->sector += n;
r->sector_count -= n;
if (r->sector_count == 0) {
@@ -1284,7 +1284,7 @@ static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf)
} else { /* MODE_SENSE_10 */
outbuf[7] = 8; /* Block descriptor length */
}
- nb_sectors /= (s->qdev.blocksize / 512);
+ nb_sectors /= (s->qdev.blocksize / BDRV_SECTOR_SIZE);
if (nb_sectors > 0xffffff) {
nb_sectors = 0;
}
@@ -1342,7 +1342,7 @@ static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf)
start_track = req->cmd.buf[6];
blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1);
- nb_sectors /= s->qdev.blocksize / 512;
+ nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
switch (format) {
case 0:
toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track);
@@ -1738,9 +1738,10 @@ static void scsi_write_same_complete(void *opaque, int ret)
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
- data->nb_sectors -= data->iov.iov_len / 512;
- data->sector += data->iov.iov_len / 512;
- data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len);
+ data->nb_sectors -= data->iov.iov_len / BDRV_SECTOR_SIZE;
+ data->sector += data->iov.iov_len / BDRV_SECTOR_SIZE;
+ data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE,
+ data->iov.iov_len);
if (data->iov.iov_len) {
block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
data->iov.iov_len, BLOCK_ACCT_WRITE);
@@ -1805,9 +1806,10 @@ static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
data = g_new0(WriteSameCBData, 1);
data->r = r;
- data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
- data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512);
- data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX);
+ data->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
+ data->nb_sectors = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
+ data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE,
+ SCSI_WRITE_SAME_MAX);
data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk,
data->iov.iov_len);
qemu_iovec_init_external(&data->qiov, &data->iov, 1);
@@ -1980,7 +1982,7 @@ static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) {
goto illegal_request;
}
- nb_sectors /= s->qdev.blocksize / 512;
+ nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
/* Returned value is the address of the last sector. */
nb_sectors--;
/* Remember the new size for read/write sanity checking. */
@@ -2049,7 +2051,7 @@ static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) {
goto illegal_request;
}
- nb_sectors /= s->qdev.blocksize / 512;
+ nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
/* Returned value is the address of the last sector. */
nb_sectors--;
/* Remember the new size for read/write sanity checking. */
@@ -2180,8 +2182,8 @@ static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
if (!check_lba_range(s, r->req.cmd.lba, len)) {
goto illegal_lba;
}
- r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
- r->sector_count = len * (s->qdev.blocksize / 512);
+ r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
+ r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
break;
case WRITE_6:
case WRITE_10:
@@ -2211,8 +2213,8 @@ static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
if (!check_lba_range(s, r->req.cmd.lba, len)) {
goto illegal_lba;
}
- r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
- r->sector_count = len * (s->qdev.blocksize / 512);
+ r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
+ r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
break;
default:
abort();
@@ -2229,9 +2231,9 @@ static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
}
assert(r->iov.iov_len == 0);
if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
- return -r->sector_count * 512;
+ return -r->sector_count * BDRV_SECTOR_SIZE;
} else {
- return r->sector_count * 512;
+ return r->sector_count * BDRV_SECTOR_SIZE;
}
}
@@ -2243,7 +2245,7 @@ static void scsi_disk_reset(DeviceState *dev)
scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET));
blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
- nb_sectors /= s->qdev.blocksize / 512;
+ nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
if (nb_sectors) {
nb_sectors--;
}
diff --git a/hw/usb/bus.c b/hw/usb/bus.c
index b17bda3b29..2b11041451 100644
--- a/hw/usb/bus.c
+++ b/hw/usb/bus.c
@@ -612,8 +612,8 @@ static char *usb_get_fw_dev_path(DeviceState *qdev)
in++;
} else {
/* the device itself */
- pos += snprintf(fw_path + pos, fw_len - pos, "%s@%lx",
- qdev_fw_name(qdev), nr);
+ snprintf(fw_path + pos, fw_len - pos, "%s@%lx",
+ qdev_fw_name(qdev), nr);
break;
}
}
diff --git a/hw/usb/ccid-card-emulated.c b/hw/usb/ccid-card-emulated.c
index 7d6105ef34..0f1afd66be 100644
--- a/hw/usb/ccid-card-emulated.c
+++ b/hw/usb/ccid-card-emulated.c
@@ -350,7 +350,6 @@ static void *event_thread(void *arg)
case VEVENT_LAST: /* quit */
vevent_delete(event);
return NULL;
- break;
default:
break;
}
diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c
index ac2cefc9b1..869ed2c39d 100644
--- a/hw/vfio/platform.c
+++ b/hw/vfio/platform.c
@@ -236,7 +236,7 @@ static void vfio_intp_interrupt(VFIOINTp *intp)
trace_vfio_intp_interrupt_set_pending(intp->pin);
QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue,
intp, pqnext);
- ret = event_notifier_test_and_clear(intp->interrupt);
+ event_notifier_test_and_clear(intp->interrupt);
return;
}
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index d7e2423762..9c5b4f7fbc 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -603,7 +603,7 @@ static void scrub_shadow_regions(struct vhost_dev *dev,
*/
for (i = 0; i < dev->mem->nregions; i++) {
reg = &dev->mem->regions[i];
- mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
+ vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
if (fd > 0) {
++fd_num;
}
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index fc69570dcc..5bc769f685 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -2133,6 +2133,7 @@ static const TypeInfo virtio_pci_bus_info = {
.name = TYPE_VIRTIO_PCI_BUS,
.parent = TYPE_VIRTIO_BUS,
.instance_size = sizeof(VirtioPCIBusState),
+ .class_size = sizeof(VirtioPCIBusClass),
.class_init = virtio_pci_bus_class_init,
};