aboutsummaryrefslogtreecommitdiff
path: root/block/nvme.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/nvme.c')
-rw-r--r--block/nvme.c248
1 files changed, 137 insertions, 111 deletions
diff --git a/block/nvme.c b/block/nvme.c
index 05485fdd11..f4f27b6da7 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -83,25 +83,21 @@ typedef struct {
/* Memory mapped registers */
typedef volatile struct {
- uint64_t cap;
- uint32_t vs;
- uint32_t intms;
- uint32_t intmc;
- uint32_t cc;
- uint32_t reserved0;
- uint32_t csts;
- uint32_t nssr;
- uint32_t aqa;
- uint64_t asq;
- uint64_t acq;
- uint32_t cmbloc;
- uint32_t cmbsz;
- uint8_t reserved1[0xec0];
- uint8_t cmd_set_specfic[0x100];
- uint32_t doorbells[];
+ NvmeBar ctrl;
+ struct {
+ uint32_t sq_tail;
+ uint32_t cq_head;
+ } doorbells[];
} NVMeRegs;
-QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 0x1000);
+#define INDEX_ADMIN 0
+#define INDEX_IO(n) (1 + n)
+
+/* This driver shares a single MSIX IRQ for the admin and I/O queues */
+enum {
+ MSIX_SHARED_IRQ_IDX = 0,
+ MSIX_IRQ_COUNT = 1
+};
struct BDRVNVMeState {
AioContext *aio_context;
@@ -117,7 +113,7 @@ struct BDRVNVMeState {
/* How many uint32_t elements does each doorbell entry take. */
size_t doorbell_scale;
bool write_cache_supported;
- EventNotifier irq_notifier;
+ EventNotifier irq_notifier[MSIX_IRQ_COUNT];
uint64_t nsze; /* Namespace size reported by identify command */
int nsid; /* The namespace id to read/write data. */
@@ -162,21 +158,20 @@ static QemuOptsList runtime_opts = {
},
};
-static void nvme_init_queue(BlockDriverState *bs, NVMeQueue *q,
+static void nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
int nentries, int entry_bytes, Error **errp)
{
- BDRVNVMeState *s = bs->opaque;
size_t bytes;
int r;
bytes = ROUND_UP(nentries * entry_bytes, s->page_size);
q->head = q->tail = 0;
- q->queue = qemu_try_blockalign0(bs, bytes);
-
+ q->queue = qemu_try_memalign(s->page_size, bytes);
if (!q->queue) {
error_setg(errp, "Cannot allocate queue");
return;
}
+ memset(q->queue, 0, bytes);
r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova);
if (r) {
error_setg(errp, "Cannot map queue");
@@ -206,23 +201,31 @@ static void nvme_free_req_queue_cb(void *opaque)
qemu_mutex_unlock(&q->lock);
}
-static NVMeQueuePair *nvme_create_queue_pair(BlockDriverState *bs,
+static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
+ AioContext *aio_context,
int idx, int size,
Error **errp)
{
int i, r;
- BDRVNVMeState *s = bs->opaque;
Error *local_err = NULL;
- NVMeQueuePair *q = g_new0(NVMeQueuePair, 1);
+ NVMeQueuePair *q;
uint64_t prp_list_iova;
+ q = g_try_new0(NVMeQueuePair, 1);
+ if (!q) {
+ return NULL;
+ }
+ q->prp_list_pages = qemu_try_memalign(s->page_size,
+ s->page_size * NVME_NUM_REQS);
+ if (!q->prp_list_pages) {
+ goto fail;
+ }
+ memset(q->prp_list_pages, 0, s->page_size * NVME_NUM_REQS);
qemu_mutex_init(&q->lock);
q->s = s;
q->index = idx;
qemu_co_queue_init(&q->free_req_queue);
- q->prp_list_pages = qemu_blockalign0(bs, s->page_size * NVME_NUM_REQS);
- q->completion_bh = aio_bh_new(bdrv_get_aio_context(bs),
- nvme_process_completion_bh, q);
+ q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q);
r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages,
s->page_size * NVME_NUM_REQS,
false, &prp_list_iova);
@@ -239,19 +242,19 @@ static NVMeQueuePair *nvme_create_queue_pair(BlockDriverState *bs,
req->prp_list_iova = prp_list_iova + i * s->page_size;
}
- nvme_init_queue(bs, &q->sq, size, NVME_SQ_ENTRY_BYTES, &local_err);
+ nvme_init_queue(s, &q->sq, size, NVME_SQ_ENTRY_BYTES, &local_err);
if (local_err) {
error_propagate(errp, local_err);
goto fail;
}
- q->sq.doorbell = &s->regs->doorbells[idx * 2 * s->doorbell_scale];
+ q->sq.doorbell = &s->regs->doorbells[idx * s->doorbell_scale].sq_tail;
- nvme_init_queue(bs, &q->cq, size, NVME_CQ_ENTRY_BYTES, &local_err);
+ nvme_init_queue(s, &q->cq, size, NVME_CQ_ENTRY_BYTES, &local_err);
if (local_err) {
error_propagate(errp, local_err);
goto fail;
}
- q->cq.doorbell = &s->regs->doorbells[(idx * 2 + 1) * s->doorbell_scale];
+ q->cq.doorbell = &s->regs->doorbells[idx * s->doorbell_scale].cq_head;
return q;
fail:
@@ -441,6 +444,9 @@ static void nvme_trace_command(const NvmeCmd *cmd)
{
int i;
+ if (!trace_event_get_state_backends(TRACE_NVME_SUBMIT_COMMAND_RAW)) {
+ return;
+ }
for (i = 0; i < 8; ++i) {
uint8_t *cmdp = (uint8_t *)cmd + i * 8;
trace_nvme_submit_command_raw(cmdp[0], cmdp[1], cmdp[2], cmdp[3],
@@ -479,6 +485,7 @@ static void nvme_cmd_sync_cb(void *opaque, int ret)
static int nvme_cmd_sync(BlockDriverState *bs, NVMeQueuePair *q,
NvmeCmd *cmd)
{
+ AioContext *aio_context = bdrv_get_aio_context(bs);
NVMeRequest *req;
int ret = -EINPROGRESS;
req = nvme_get_free_req(q);
@@ -487,17 +494,18 @@ static int nvme_cmd_sync(BlockDriverState *bs, NVMeQueuePair *q,
}
nvme_submit_command(q, req, cmd, nvme_cmd_sync_cb, &ret);
- BDRV_POLL_WHILE(bs, ret == -EINPROGRESS);
+ AIO_WAIT_WHILE(aio_context, ret == -EINPROGRESS);
return ret;
}
static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
{
BDRVNVMeState *s = bs->opaque;
- NvmeIdCtrl *idctrl;
- NvmeIdNs *idns;
+ union {
+ NvmeIdCtrl ctrl;
+ NvmeIdNs ns;
+ } *id;
NvmeLBAF *lbaf;
- uint8_t *resp;
uint16_t oncs;
int r;
uint64_t iova;
@@ -506,54 +514,52 @@ static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
.cdw10 = cpu_to_le32(0x1),
};
- resp = qemu_try_blockalign0(bs, sizeof(NvmeIdCtrl));
- if (!resp) {
+ id = qemu_try_memalign(s->page_size, sizeof(*id));
+ if (!id) {
error_setg(errp, "Cannot allocate buffer for identify response");
goto out;
}
- idctrl = (NvmeIdCtrl *)resp;
- idns = (NvmeIdNs *)resp;
- r = qemu_vfio_dma_map(s->vfio, resp, sizeof(NvmeIdCtrl), true, &iova);
+ r = qemu_vfio_dma_map(s->vfio, id, sizeof(*id), true, &iova);
if (r) {
error_setg(errp, "Cannot map buffer for DMA");
goto out;
}
- cmd.dptr.prp1 = cpu_to_le64(iova);
- if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
+ memset(id, 0, sizeof(*id));
+ cmd.dptr.prp1 = cpu_to_le64(iova);
+ if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
error_setg(errp, "Failed to identify controller");
goto out;
}
- if (le32_to_cpu(idctrl->nn) < namespace) {
+ if (le32_to_cpu(id->ctrl.nn) < namespace) {
error_setg(errp, "Invalid namespace");
goto out;
}
- s->write_cache_supported = le32_to_cpu(idctrl->vwc) & 0x1;
- s->max_transfer = (idctrl->mdts ? 1 << idctrl->mdts : 0) * s->page_size;
+ s->write_cache_supported = le32_to_cpu(id->ctrl.vwc) & 0x1;
+ s->max_transfer = (id->ctrl.mdts ? 1 << id->ctrl.mdts : 0) * s->page_size;
/* For now the page list buffer per command is one page, to hold at most
* s->page_size / sizeof(uint64_t) entries. */
s->max_transfer = MIN_NON_ZERO(s->max_transfer,
s->page_size / sizeof(uint64_t) * s->page_size);
- oncs = le16_to_cpu(idctrl->oncs);
+ oncs = le16_to_cpu(id->ctrl.oncs);
s->supports_write_zeroes = !!(oncs & NVME_ONCS_WRITE_ZEROES);
s->supports_discard = !!(oncs & NVME_ONCS_DSM);
- memset(resp, 0, 4096);
-
+ memset(id, 0, sizeof(*id));
cmd.cdw10 = 0;
cmd.nsid = cpu_to_le32(namespace);
- if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
+ if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
error_setg(errp, "Failed to identify namespace");
goto out;
}
- s->nsze = le64_to_cpu(idns->nsze);
- lbaf = &idns->lbaf[NVME_ID_NS_FLBAS_INDEX(idns->flbas)];
+ s->nsze = le64_to_cpu(id->ns.nsze);
+ lbaf = &id->ns.lbaf[NVME_ID_NS_FLBAS_INDEX(id->ns.flbas)];
- if (NVME_ID_NS_DLFEAT_WRITE_ZEROES(idns->dlfeat) &&
- NVME_ID_NS_DLFEAT_READ_BEHAVIOR(idns->dlfeat) ==
+ if (NVME_ID_NS_DLFEAT_WRITE_ZEROES(id->ns.dlfeat) &&
+ NVME_ID_NS_DLFEAT_READ_BEHAVIOR(id->ns.dlfeat) ==
NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ZEROES) {
bs->supported_write_flags |= BDRV_REQ_MAY_UNMAP;
}
@@ -573,8 +579,34 @@ static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
s->blkshift = lbaf->ds;
out:
- qemu_vfio_dma_unmap(s->vfio, resp);
- qemu_vfree(resp);
+ qemu_vfio_dma_unmap(s->vfio, id);
+ qemu_vfree(id);
+}
+
+static bool nvme_poll_queue(NVMeQueuePair *q)
+{
+ bool progress = false;
+
+ const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
+ NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
+
+ /*
+ * Do an early check for completions. q->lock isn't needed because
+ * nvme_process_completion() only runs in the event loop thread and
+ * cannot race with itself.
+ */
+ if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) {
+ return false;
+ }
+
+ qemu_mutex_lock(&q->lock);
+ while (nvme_process_completion(q)) {
+ /* Keep polling */
+ progress = true;
+ }
+ qemu_mutex_unlock(&q->lock);
+
+ return progress;
}
static bool nvme_poll_queues(BDRVNVMeState *s)
@@ -583,32 +615,17 @@ static bool nvme_poll_queues(BDRVNVMeState *s)
int i;
for (i = 0; i < s->nr_queues; i++) {
- NVMeQueuePair *q = s->queues[i];
- const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
- NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
-
- /*
- * Do an early check for completions. q->lock isn't needed because
- * nvme_process_completion() only runs in the event loop thread and
- * cannot race with itself.
- */
- if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) {
- continue;
- }
-
- qemu_mutex_lock(&q->lock);
- while (nvme_process_completion(q)) {
- /* Keep polling */
+ if (nvme_poll_queue(s->queues[i])) {
progress = true;
}
- qemu_mutex_unlock(&q->lock);
}
return progress;
}
static void nvme_handle_event(EventNotifier *n)
{
- BDRVNVMeState *s = container_of(n, BDRVNVMeState, irq_notifier);
+ BDRVNVMeState *s = container_of(n, BDRVNVMeState,
+ irq_notifier[MSIX_SHARED_IRQ_IDX]);
trace_nvme_handle_event(s);
event_notifier_test_and_clear(n);
@@ -623,7 +640,8 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
NvmeCmd cmd;
int queue_size = NVME_QUEUE_SIZE;
- q = nvme_create_queue_pair(bs, n, queue_size, errp);
+ q = nvme_create_queue_pair(s, bdrv_get_aio_context(bs),
+ n, queue_size, errp);
if (!q) {
return false;
}
@@ -633,10 +651,9 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
.cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
.cdw11 = cpu_to_le32(0x3),
};
- if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
- error_setg(errp, "Failed to create io queue [%d]", n);
- nvme_free_queue_pair(q);
- return false;
+ if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
+ error_setg(errp, "Failed to create CQ io queue [%d]", n);
+ goto out_error;
}
cmd = (NvmeCmd) {
.opcode = NVME_ADM_CMD_CREATE_SQ,
@@ -644,21 +661,24 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
.cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
.cdw11 = cpu_to_le32(0x1 | (n << 16)),
};
- if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
- error_setg(errp, "Failed to create io queue [%d]", n);
- nvme_free_queue_pair(q);
- return false;
+ if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
+ error_setg(errp, "Failed to create SQ io queue [%d]", n);
+ goto out_error;
}
s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1);
s->queues[n] = q;
s->nr_queues++;
return true;
+out_error:
+ nvme_free_queue_pair(q);
+ return false;
}
static bool nvme_poll_cb(void *opaque)
{
EventNotifier *e = opaque;
- BDRVNVMeState *s = container_of(e, BDRVNVMeState, irq_notifier);
+ BDRVNVMeState *s = container_of(e, BDRVNVMeState,
+ irq_notifier[MSIX_SHARED_IRQ_IDX]);
trace_nvme_poll_cb(s);
return nvme_poll_queues(s);
@@ -668,6 +688,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
Error **errp)
{
BDRVNVMeState *s = bs->opaque;
+ AioContext *aio_context = bdrv_get_aio_context(bs);
int ret;
uint64_t cap;
uint64_t timeout_ms;
@@ -679,7 +700,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
s->device = g_strdup(device);
s->nsid = namespace;
s->aio_context = bdrv_get_aio_context(bs);
- ret = event_notifier_init(&s->irq_notifier, 0);
+ ret = event_notifier_init(&s->irq_notifier[MSIX_SHARED_IRQ_IDX], 0);
if (ret) {
error_setg(errp, "Failed to init event notifier");
return ret;
@@ -700,7 +721,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
/* Perform initialize sequence as described in NVMe spec "7.6.1
* Initialization". */
- cap = le64_to_cpu(s->regs->cap);
+ cap = le64_to_cpu(s->regs->ctrl.cap);
if (!(cap & (1ULL << 37))) {
error_setg(errp, "Device doesn't support NVMe command set");
ret = -EINVAL;
@@ -713,10 +734,10 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
timeout_ms = MIN(500 * ((cap >> 24) & 0xFF), 30000);
/* Reset device to get a clean state. */
- s->regs->cc = cpu_to_le32(le32_to_cpu(s->regs->cc) & 0xFE);
+ s->regs->ctrl.cc = cpu_to_le32(le32_to_cpu(s->regs->ctrl.cc) & 0xFE);
/* Wait for CSTS.RDY = 0. */
- deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * 1000000ULL;
- while (le32_to_cpu(s->regs->csts) & 0x1) {
+ deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * SCALE_MS;
+ while (le32_to_cpu(s->regs->ctrl.csts) & 0x1) {
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
error_setg(errp, "Timeout while waiting for device to reset (%"
PRId64 " ms)",
@@ -728,25 +749,27 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
/* Set up admin queue. */
s->queues = g_new(NVMeQueuePair *, 1);
- s->queues[0] = nvme_create_queue_pair(bs, 0, NVME_QUEUE_SIZE, errp);
- if (!s->queues[0]) {
+ s->queues[INDEX_ADMIN] = nvme_create_queue_pair(s, aio_context, 0,
+ NVME_QUEUE_SIZE,
+ errp);
+ if (!s->queues[INDEX_ADMIN]) {
ret = -EINVAL;
goto out;
}
s->nr_queues = 1;
QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000);
- s->regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE);
- s->regs->asq = cpu_to_le64(s->queues[0]->sq.iova);
- s->regs->acq = cpu_to_le64(s->queues[0]->cq.iova);
+ s->regs->ctrl.aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE);
+ s->regs->ctrl.asq = cpu_to_le64(s->queues[INDEX_ADMIN]->sq.iova);
+ s->regs->ctrl.acq = cpu_to_le64(s->queues[INDEX_ADMIN]->cq.iova);
/* After setting up all control registers we can enable device now. */
- s->regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << 20) |
+ s->regs->ctrl.cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << 20) |
(ctz32(NVME_SQ_ENTRY_BYTES) << 16) |
0x1);
/* Wait for CSTS.RDY = 1. */
now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
deadline = now + timeout_ms * 1000000;
- while (!(le32_to_cpu(s->regs->csts) & 0x1)) {
+ while (!(le32_to_cpu(s->regs->ctrl.csts) & 0x1)) {
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
error_setg(errp, "Timeout while waiting for device to start (%"
PRId64 " ms)",
@@ -756,12 +779,13 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
}
}
- ret = qemu_vfio_pci_init_irq(s->vfio, &s->irq_notifier,
+ ret = qemu_vfio_pci_init_irq(s->vfio, s->irq_notifier,
VFIO_PCI_MSIX_IRQ_INDEX, errp);
if (ret) {
goto out;
}
- aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
+ aio_set_event_notifier(bdrv_get_aio_context(bs),
+ &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, nvme_handle_event, nvme_poll_cb);
nvme_identify(bs, namespace, &local_err);
@@ -828,7 +852,7 @@ static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable,
.cdw11 = cpu_to_le32(enable ? 0x01 : 0x00),
};
- ret = nvme_cmd_sync(bs, s->queues[0], &cmd);
+ ret = nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd);
if (ret) {
error_setg(errp, "Failed to configure NVMe write cache");
}
@@ -844,9 +868,10 @@ static void nvme_close(BlockDriverState *bs)
nvme_free_queue_pair(s->queues[i]);
}
g_free(s->queues);
- aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
+ aio_set_event_notifier(bdrv_get_aio_context(bs),
+ &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, NULL, NULL);
- event_notifier_cleanup(&s->irq_notifier);
+ event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]);
qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE);
qemu_vfio_close(s->vfio);
@@ -1045,7 +1070,7 @@ static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
{
int r;
BDRVNVMeState *s = bs->opaque;
- NVMeQueuePair *ioq = s->queues[1];
+ NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
NVMeRequest *req;
uint32_t cdw12 = (((bytes >> s->blkshift) - 1) & 0xFFFF) |
@@ -1124,7 +1149,7 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
return nvme_co_prw_aligned(bs, offset, bytes, qiov, is_write, flags);
}
trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write);
- buf = qemu_try_blockalign(bs, bytes);
+ buf = qemu_try_memalign(s->page_size, bytes);
if (!buf) {
return -ENOMEM;
@@ -1160,7 +1185,7 @@ static coroutine_fn int nvme_co_pwritev(BlockDriverState *bs,
static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
{
BDRVNVMeState *s = bs->opaque;
- NVMeQueuePair *ioq = s->queues[1];
+ NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
NVMeRequest *req;
NvmeCmd cmd = {
.opcode = NVME_CMD_FLUSH,
@@ -1191,7 +1216,7 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
BdrvRequestFlags flags)
{
BDRVNVMeState *s = bs->opaque;
- NVMeQueuePair *ioq = s->queues[1];
+ NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
NVMeRequest *req;
uint32_t cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF;
@@ -1244,7 +1269,7 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
int bytes)
{
BDRVNVMeState *s = bs->opaque;
- NVMeQueuePair *ioq = s->queues[1];
+ NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
NVMeRequest *req;
NvmeDsmRange *buf;
QEMUIOVector local_qiov;
@@ -1268,11 +1293,11 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
assert(s->nr_queues > 1);
- buf = qemu_try_blockalign0(bs, s->page_size);
+ buf = qemu_try_memalign(s->page_size, s->page_size);
if (!buf) {
return -ENOMEM;
}
-
+ memset(buf, 0, s->page_size);
buf->nlb = cpu_to_le32(bytes >> s->blkshift);
buf->slba = cpu_to_le64(offset >> s->blkshift);
buf->cattr = 0;
@@ -1353,7 +1378,8 @@ static void nvme_detach_aio_context(BlockDriverState *bs)
q->completion_bh = NULL;
}
- aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
+ aio_set_event_notifier(bdrv_get_aio_context(bs),
+ &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, NULL, NULL);
}
@@ -1363,7 +1389,7 @@ static void nvme_attach_aio_context(BlockDriverState *bs,
BDRVNVMeState *s = bs->opaque;
s->aio_context = new_context;
- aio_set_event_notifier(new_context, &s->irq_notifier,
+ aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, nvme_handle_event, nvme_poll_cb);
for (int i = 0; i < s->nr_queues; i++) {
@@ -1387,7 +1413,7 @@ static void nvme_aio_unplug(BlockDriverState *bs)
BDRVNVMeState *s = bs->opaque;
assert(s->plugged);
s->plugged = false;
- for (i = 1; i < s->nr_queues; i++) {
+ for (i = INDEX_IO(0); i < s->nr_queues; i++) {
NVMeQueuePair *q = s->queues[i];
qemu_mutex_lock(&q->lock);
nvme_kick(q);