aboutsummaryrefslogtreecommitdiff
path: root/block/nvme.c
diff options
context:
space:
mode:
authorPhilippe Mathieu-Daudé <philmd@redhat.com>2020-10-29 10:32:50 +0100
committerStefan Hajnoczi <stefanha@redhat.com>2020-11-03 19:06:21 +0000
commit1b539bd6dbe1459f160e25610ec2fc3388f700e8 (patch)
treeedbef0008fff178a54677faaad90487a633d1adb /block/nvme.c
parent3214b0f0948d5e01ccca62730e60a69e2ded8774 (diff)
block/nvme: Use unsigned integer for queue counter/size
We can not have negative queue count/size/index, use unsigned type. Rename 'nr_queues' as 'queue_count' to match the spec naming. Reviewed-by: Eric Auger <eric.auger@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Tested-by: Eric Auger <eric.auger@redhat.com> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com> Message-id: 20201029093306.1063879-10-philmd@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Tested-by: Eric Auger <eric.auger@redhat.com>
Diffstat (limited to 'block/nvme.c')
-rw-r--r--block/nvme.c38
1 files changed, 18 insertions, 20 deletions
diff --git a/block/nvme.c b/block/nvme.c
index b0629f5de8..c450499111 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -104,7 +104,7 @@ struct BDRVNVMeState {
* [1..]: io queues.
*/
NVMeQueuePair **queues;
- int nr_queues;
+ unsigned queue_count;
size_t page_size;
/* How many uint32_t elements does each doorbell entry take. */
size_t doorbell_scale;
@@ -161,7 +161,7 @@ static QemuOptsList runtime_opts = {
};
static void nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
- int nentries, int entry_bytes, Error **errp)
+ unsigned nentries, size_t entry_bytes, Error **errp)
{
size_t bytes;
int r;
@@ -206,7 +206,7 @@ static void nvme_free_req_queue_cb(void *opaque)
static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
AioContext *aio_context,
- int idx, int size,
+ unsigned idx, size_t size,
Error **errp)
{
int i, r;
@@ -623,7 +623,7 @@ static bool nvme_poll_queues(BDRVNVMeState *s)
bool progress = false;
int i;
- for (i = 0; i < s->nr_queues; i++) {
+ for (i = 0; i < s->queue_count; i++) {
if (nvme_poll_queue(s->queues[i])) {
progress = true;
}
@@ -644,10 +644,10 @@ static void nvme_handle_event(EventNotifier *n)
static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
{
BDRVNVMeState *s = bs->opaque;
- int n = s->nr_queues;
+ unsigned n = s->queue_count;
NVMeQueuePair *q;
NvmeCmd cmd;
- int queue_size = NVME_QUEUE_SIZE;
+ unsigned queue_size = NVME_QUEUE_SIZE;
q = nvme_create_queue_pair(s, bdrv_get_aio_context(bs),
n, queue_size, errp);
@@ -661,7 +661,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
.cdw11 = cpu_to_le32(0x3),
};
if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
- error_setg(errp, "Failed to create CQ io queue [%d]", n);
+ error_setg(errp, "Failed to create CQ io queue [%u]", n);
goto out_error;
}
cmd = (NvmeCmd) {
@@ -671,12 +671,12 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
.cdw11 = cpu_to_le32(0x1 | (n << 16)),
};
if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
- error_setg(errp, "Failed to create SQ io queue [%d]", n);
+ error_setg(errp, "Failed to create SQ io queue [%u]", n);
goto out_error;
}
s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1);
s->queues[n] = q;
- s->nr_queues++;
+ s->queue_count++;
return true;
out_error:
nvme_free_queue_pair(q);
@@ -785,7 +785,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
ret = -EINVAL;
goto out;
}
- s->nr_queues = 1;
+ s->queue_count = 1;
QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000);
regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << AQA_ACQS_SHIFT) |
(NVME_QUEUE_SIZE << AQA_ASQS_SHIFT));
@@ -895,10 +895,9 @@ static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable,
static void nvme_close(BlockDriverState *bs)
{
- int i;
BDRVNVMeState *s = bs->opaque;
- for (i = 0; i < s->nr_queues; ++i) {
+ for (unsigned i = 0; i < s->queue_count; ++i) {
nvme_free_queue_pair(s->queues[i]);
}
g_free(s->queues);
@@ -1123,7 +1122,7 @@ static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
};
trace_nvme_prw_aligned(s, is_write, offset, bytes, flags, qiov->niov);
- assert(s->nr_queues > 1);
+ assert(s->queue_count > 1);
req = nvme_get_free_req(ioq);
assert(req);
@@ -1233,7 +1232,7 @@ static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
.ret = -EINPROGRESS,
};
- assert(s->nr_queues > 1);
+ assert(s->queue_count > 1);
req = nvme_get_free_req(ioq);
assert(req);
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
@@ -1285,7 +1284,7 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
cmd.cdw12 = cpu_to_le32(cdw12);
trace_nvme_write_zeroes(s, offset, bytes, flags);
- assert(s->nr_queues > 1);
+ assert(s->queue_count > 1);
req = nvme_get_free_req(ioq);
assert(req);
@@ -1328,7 +1327,7 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
return -ENOTSUP;
}
- assert(s->nr_queues > 1);
+ assert(s->queue_count > 1);
buf = qemu_try_memalign(s->page_size, s->page_size);
if (!buf) {
@@ -1408,7 +1407,7 @@ static void nvme_detach_aio_context(BlockDriverState *bs)
{
BDRVNVMeState *s = bs->opaque;
- for (int i = 0; i < s->nr_queues; i++) {
+ for (unsigned i = 0; i < s->queue_count; i++) {
NVMeQueuePair *q = s->queues[i];
qemu_bh_delete(q->completion_bh);
@@ -1429,7 +1428,7 @@ static void nvme_attach_aio_context(BlockDriverState *bs,
aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, nvme_handle_event, nvme_poll_cb);
- for (int i = 0; i < s->nr_queues; i++) {
+ for (unsigned i = 0; i < s->queue_count; i++) {
NVMeQueuePair *q = s->queues[i];
q->completion_bh =
@@ -1446,11 +1445,10 @@ static void nvme_aio_plug(BlockDriverState *bs)
static void nvme_aio_unplug(BlockDriverState *bs)
{
- int i;
BDRVNVMeState *s = bs->opaque;
assert(s->plugged);
s->plugged = false;
- for (i = INDEX_IO(0); i < s->nr_queues; i++) {
+ for (unsigned i = INDEX_IO(0); i < s->queue_count; i++) {
NVMeQueuePair *q = s->queues[i];
qemu_mutex_lock(&q->lock);
nvme_kick(q);