aboutsummaryrefslogtreecommitdiff
path: root/block/export
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2020-10-01 15:46:03 +0100
committerStefan Hajnoczi <stefanha@redhat.com>2020-10-23 13:42:16 +0100
commitd9b495f9c6a943c9bbd50f7469efb645c23009c3 (patch)
treed8ca95db872445d606fbe7eaa5577e99fa74d925 /block/export
parentf51d23c80af73c95e0ce703ad06a300f1b3d63ef (diff)
block/export: add vhost-user-blk multi-queue support
Allow the number of queues to be configured using --export vhost-user-blk,num-queues=N. This setting should match the QEMU --device vhost-user-blk-pci,num-queues=N setting but QEMU vhost-user-blk.c lowers its own value if the vhost-user-blk backend offers fewer queues than QEMU. The vhost-user-blk-server.c code is already capable of multi-queue. All virtqueue processing runs in the same AioContext. No new locking is needed. Add the num-queues=N option and set the VIRTIO_BLK_F_MQ feature bit. Note that the feature bit only announces the presence of the num_queues configuration space field. It does not promise that there is more than 1 virtqueue, so we can set it unconditionally. I tested multi-queue by running a random read fio test with numjobs=4 on an -smp 4 guest. After the benchmark finished the guest /proc/interrupts file showed activity on all 4 virtio-blk MSI-X. The /sys/block/vda/mq/ directory shows that Linux blk-mq has 4 queues configured. An automated test is included in the next commit. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Acked-by: Markus Armbruster <armbru@redhat.com> Message-id: 20201001144604.559733-2-stefanha@redhat.com [Fixed accidental tab characters as suggested by Markus Armbruster --Stefan] Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'block/export')
-rw-r--r--block/export/vhost-user-blk-server.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c
index 286eb5fb9a..41f4933d6e 100644
--- a/block/export/vhost-user-blk-server.c
+++ b/block/export/vhost-user-blk-server.c
@@ -21,7 +21,7 @@
#include "util/block-helpers.h"
enum {
- VHOST_USER_BLK_MAX_QUEUES = 1,
+ VHOST_USER_BLK_NUM_QUEUES_DEFAULT = 1,
};
struct virtio_blk_inhdr {
unsigned char status;
@@ -242,6 +242,7 @@ static uint64_t vu_blk_get_features(VuDev *dev)
1ull << VIRTIO_BLK_F_DISCARD |
1ull << VIRTIO_BLK_F_WRITE_ZEROES |
1ull << VIRTIO_BLK_F_CONFIG_WCE |
+ 1ull << VIRTIO_BLK_F_MQ |
1ull << VIRTIO_F_VERSION_1 |
1ull << VIRTIO_RING_F_INDIRECT_DESC |
1ull << VIRTIO_RING_F_EVENT_IDX |
@@ -338,7 +339,9 @@ static void blk_aio_detach(void *opaque)
static void
vu_blk_initialize_config(BlockDriverState *bs,
- struct virtio_blk_config *config, uint32_t blk_size)
+ struct virtio_blk_config *config,
+ uint32_t blk_size,
+ uint16_t num_queues)
{
config->capacity = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
config->blk_size = blk_size;
@@ -346,7 +349,7 @@ vu_blk_initialize_config(BlockDriverState *bs,
config->seg_max = 128 - 2;
config->min_io_size = 1;
config->opt_io_size = 1;
- config->num_queues = VHOST_USER_BLK_MAX_QUEUES;
+ config->num_queues = num_queues;
config->max_discard_sectors = 32768;
config->max_discard_seg = 1;
config->discard_sector_alignment = config->blk_size >> 9;
@@ -368,6 +371,7 @@ static int vu_blk_exp_create(BlockExport *exp, BlockExportOptions *opts,
BlockExportOptionsVhostUserBlk *vu_opts = &opts->u.vhost_user_blk;
Error *local_err = NULL;
uint64_t logical_block_size;
+ uint16_t num_queues = VHOST_USER_BLK_NUM_QUEUES_DEFAULT;
vexp->writable = opts->writable;
vexp->blkcfg.wce = 0;
@@ -385,15 +389,23 @@ static int vu_blk_exp_create(BlockExport *exp, BlockExportOptions *opts,
}
vexp->blk_size = logical_block_size;
blk_set_guest_block_size(exp->blk, logical_block_size);
+
+ if (vu_opts->has_num_queues) {
+ num_queues = vu_opts->num_queues;
+ }
+ if (num_queues == 0) {
+ error_setg(errp, "num-queues must be greater than 0");
+ return -EINVAL;
+ }
+
vu_blk_initialize_config(blk_bs(exp->blk), &vexp->blkcfg,
- logical_block_size);
+ logical_block_size, num_queues);
blk_add_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach,
vexp);
if (!vhost_user_server_start(&vexp->vu_server, vu_opts->addr, exp->ctx,
- VHOST_USER_BLK_MAX_QUEUES, &vu_blk_iface,
- errp)) {
+ num_queues, &vu_blk_iface, errp)) {
blk_remove_aio_context_notifier(exp->blk, blk_aio_attached,
blk_aio_detach, vexp);
return -EADDRNOTAVAIL;