aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2021-07-21 14:13:32 +0100
committerPeter Maydell <peter.maydell@linaro.org>2021-07-21 14:13:32 +0100
commit29c7daa00722e84a54f16cd0df46d289146dcda1 (patch)
treea83678a510ddec992140935cbc3e5d66f23cc34f
parent033bd16b8afbaafdcef37356705016d9c3c475fa (diff)
parentd7ddd0a1618a75b31dc308bb37365ce1da972154 (diff)
Merge remote-tracking branch 'remotes/stefanha-gitlab/tags/block-pull-request' into staging
Pull request Stefano's performance regression fix for commit 2558cb8dd4 ("linux-aio: increasing MAX_EVENTS to a larger hardcoded value"). # gpg: Signature made Wed 21 Jul 2021 14:12:47 BST # gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full] # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" [full] # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * remotes/stefanha-gitlab/tags/block-pull-request: linux-aio: limit the batch size using `aio-max-batch` parameter iothread: add aio-max-batch parameter iothread: generalize iothread_set_param/iothread_get_param Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--block/linux-aio.c9
-rw-r--r--include/block/aio.h12
-rw-r--r--include/sysemu/iothread.h3
-rw-r--r--iothread.c82
-rw-r--r--monitor/hmp-cmds.c2
-rw-r--r--qapi/misc.json6
-rw-r--r--qapi/qom.json7
-rw-r--r--qemu-options.hx8
-rw-r--r--util/aio-posix.c12
-rw-r--r--util/aio-win32.c5
-rw-r--r--util/async.c2
11 files changed, 134 insertions, 14 deletions
diff --git a/block/linux-aio.c b/block/linux-aio.c
index 3c0527c2bf..0dab507b71 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -28,6 +28,9 @@
*/
#define MAX_EVENTS 1024
+/* Maximum number of requests in a batch. (default value) */
+#define DEFAULT_MAX_BATCH 32
+
struct qemu_laiocb {
Coroutine *co;
LinuxAioState *ctx;
@@ -351,6 +354,10 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
LinuxAioState *s = laiocb->ctx;
struct iocb *iocbs = &laiocb->iocb;
QEMUIOVector *qiov = laiocb->qiov;
+ int64_t max_batch = s->aio_context->aio_max_batch ?: DEFAULT_MAX_BATCH;
+
+ /* limit the batch with the number of available events */
+ max_batch = MIN_NON_ZERO(MAX_EVENTS - s->io_q.in_flight, max_batch);
switch (type) {
case QEMU_AIO_WRITE:
@@ -371,7 +378,7 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
s->io_q.in_queue++;
if (!s->io_q.blocked &&
(!s->io_q.plugged ||
- s->io_q.in_flight + s->io_q.in_queue >= MAX_EVENTS)) {
+ s->io_q.in_queue >= max_batch)) {
ioq_submit(s);
}
diff --git a/include/block/aio.h b/include/block/aio.h
index 807edce9b5..47fbe9d81f 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -232,6 +232,9 @@ struct AioContext {
int64_t poll_grow; /* polling time growth factor */
int64_t poll_shrink; /* polling time shrink factor */
+ /* AIO engine parameters */
+ int64_t aio_max_batch; /* maximum number of requests in a batch */
+
/*
* List of handlers participating in userspace polling. Protected by
* ctx->list_lock. Iterated and modified mostly by the event loop thread
@@ -755,4 +758,13 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
int64_t grow, int64_t shrink,
Error **errp);
+/**
+ * aio_context_set_aio_params:
+ * @ctx: the aio context
+ * @max_batch: maximum number of requests in a batch, 0 means that the
+ * engine will use its default
+ */
+void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
+ Error **errp);
+
#endif
diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h
index f177142f16..7f714bd136 100644
--- a/include/sysemu/iothread.h
+++ b/include/sysemu/iothread.h
@@ -37,6 +37,9 @@ struct IOThread {
int64_t poll_max_ns;
int64_t poll_grow;
int64_t poll_shrink;
+
+ /* AioContext AIO engine parameters */
+ int64_t aio_max_batch;
};
typedef struct IOThread IOThread;
diff --git a/iothread.c b/iothread.c
index 2c5ccd7367..ddbbde61f7 100644
--- a/iothread.c
+++ b/iothread.c
@@ -152,6 +152,24 @@ static void iothread_init_gcontext(IOThread *iothread)
iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE);
}
+static void iothread_set_aio_context_params(IOThread *iothread, Error **errp)
+{
+ ERRP_GUARD();
+
+ aio_context_set_poll_params(iothread->ctx,
+ iothread->poll_max_ns,
+ iothread->poll_grow,
+ iothread->poll_shrink,
+ errp);
+ if (*errp) {
+ return;
+ }
+
+ aio_context_set_aio_params(iothread->ctx,
+ iothread->aio_max_batch,
+ errp);
+}
+
static void iothread_complete(UserCreatable *obj, Error **errp)
{
Error *local_error = NULL;
@@ -171,11 +189,7 @@ static void iothread_complete(UserCreatable *obj, Error **errp)
*/
iothread_init_gcontext(iothread);
- aio_context_set_poll_params(iothread->ctx,
- iothread->poll_max_ns,
- iothread->poll_grow,
- iothread->poll_shrink,
- &local_error);
+ iothread_set_aio_context_params(iothread, &local_error);
if (local_error) {
error_propagate(errp, local_error);
aio_context_unref(iothread->ctx);
@@ -212,8 +226,11 @@ static PollParamInfo poll_grow_info = {
static PollParamInfo poll_shrink_info = {
"poll-shrink", offsetof(IOThread, poll_shrink),
};
+static PollParamInfo aio_max_batch_info = {
+ "aio-max-batch", offsetof(IOThread, aio_max_batch),
+};
-static void iothread_get_poll_param(Object *obj, Visitor *v,
+static void iothread_get_param(Object *obj, Visitor *v,
const char *name, void *opaque, Error **errp)
{
IOThread *iothread = IOTHREAD(obj);
@@ -223,7 +240,7 @@ static void iothread_get_poll_param(Object *obj, Visitor *v,
visit_type_int64(v, name, field, errp);
}
-static void iothread_set_poll_param(Object *obj, Visitor *v,
+static bool iothread_set_param(Object *obj, Visitor *v,
const char *name, void *opaque, Error **errp)
{
IOThread *iothread = IOTHREAD(obj);
@@ -232,17 +249,36 @@ static void iothread_set_poll_param(Object *obj, Visitor *v,
int64_t value;
if (!visit_type_int64(v, name, &value, errp)) {
- return;
+ return false;
}
if (value < 0) {
error_setg(errp, "%s value must be in range [0, %" PRId64 "]",
info->name, INT64_MAX);
- return;
+ return false;
}
*field = value;
+ return true;
+}
+
+static void iothread_get_poll_param(Object *obj, Visitor *v,
+ const char *name, void *opaque, Error **errp)
+{
+
+ iothread_get_param(obj, v, name, opaque, errp);
+}
+
+static void iothread_set_poll_param(Object *obj, Visitor *v,
+ const char *name, void *opaque, Error **errp)
+{
+ IOThread *iothread = IOTHREAD(obj);
+
+ if (!iothread_set_param(obj, v, name, opaque, errp)) {
+ return;
+ }
+
if (iothread->ctx) {
aio_context_set_poll_params(iothread->ctx,
iothread->poll_max_ns,
@@ -252,6 +288,29 @@ static void iothread_set_poll_param(Object *obj, Visitor *v,
}
}
+static void iothread_get_aio_param(Object *obj, Visitor *v,
+ const char *name, void *opaque, Error **errp)
+{
+
+ iothread_get_param(obj, v, name, opaque, errp);
+}
+
+static void iothread_set_aio_param(Object *obj, Visitor *v,
+ const char *name, void *opaque, Error **errp)
+{
+ IOThread *iothread = IOTHREAD(obj);
+
+ if (!iothread_set_param(obj, v, name, opaque, errp)) {
+ return;
+ }
+
+ if (iothread->ctx) {
+ aio_context_set_aio_params(iothread->ctx,
+ iothread->aio_max_batch,
+ errp);
+ }
+}
+
static void iothread_class_init(ObjectClass *klass, void *class_data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
@@ -269,6 +328,10 @@ static void iothread_class_init(ObjectClass *klass, void *class_data)
iothread_get_poll_param,
iothread_set_poll_param,
NULL, &poll_shrink_info);
+ object_class_property_add(klass, "aio-max-batch", "int",
+ iothread_get_aio_param,
+ iothread_set_aio_param,
+ NULL, &aio_max_batch_info);
}
static const TypeInfo iothread_info = {
@@ -318,6 +381,7 @@ static int query_one_iothread(Object *object, void *opaque)
info->poll_max_ns = iothread->poll_max_ns;
info->poll_grow = iothread->poll_grow;
info->poll_shrink = iothread->poll_shrink;
+ info->aio_max_batch = iothread->aio_max_batch;
QAPI_LIST_APPEND(*tail, info);
return 0;
diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c
index 0942027208..e00255f7ee 100644
--- a/monitor/hmp-cmds.c
+++ b/monitor/hmp-cmds.c
@@ -1893,6 +1893,8 @@ void hmp_info_iothreads(Monitor *mon, const QDict *qdict)
monitor_printf(mon, " poll-max-ns=%" PRId64 "\n", value->poll_max_ns);
monitor_printf(mon, " poll-grow=%" PRId64 "\n", value->poll_grow);
monitor_printf(mon, " poll-shrink=%" PRId64 "\n", value->poll_shrink);
+ monitor_printf(mon, " aio-max-batch=%" PRId64 "\n",
+ value->aio_max_batch);
}
qapi_free_IOThreadInfoList(info_list);
diff --git a/qapi/misc.json b/qapi/misc.json
index 156f98203e..5c2ca3b556 100644
--- a/qapi/misc.json
+++ b/qapi/misc.json
@@ -86,6 +86,9 @@
# @poll-shrink: how many ns will be removed from polling time, 0 means that
# it's not configured (since 2.9)
#
+# @aio-max-batch: maximum number of requests in a batch for the AIO engine,
+# 0 means that the engine will use its default (since 6.1)
+#
# Since: 2.0
##
{ 'struct': 'IOThreadInfo',
@@ -93,7 +96,8 @@
'thread-id': 'int',
'poll-max-ns': 'int',
'poll-grow': 'int',
- 'poll-shrink': 'int' } }
+ 'poll-shrink': 'int',
+ 'aio-max-batch': 'int' } }
##
# @query-iothreads:
diff --git a/qapi/qom.json b/qapi/qom.json
index 652be317b8..6d5f4a88e6 100644
--- a/qapi/qom.json
+++ b/qapi/qom.json
@@ -516,12 +516,17 @@
# algorithm detects it is spending too long polling without
# encountering events. 0 selects a default behaviour (default: 0)
#
+# @aio-max-batch: maximum number of requests in a batch for the AIO engine,
+# 0 means that the engine will use its default
+# (default:0, since 6.1)
+#
# Since: 2.0
##
{ 'struct': 'IothreadProperties',
'data': { '*poll-max-ns': 'int',
'*poll-grow': 'int',
- '*poll-shrink': 'int' } }
+ '*poll-shrink': 'int',
+ '*aio-max-batch': 'int' } }
##
# @MemoryBackendProperties:
diff --git a/qemu-options.hx b/qemu-options.hx
index 0c9ddc0274..99ed5ec5f1 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -5301,7 +5301,7 @@ SRST
CN=laptop.example.com,O=Example Home,L=London,ST=London,C=GB
- ``-object iothread,id=id,poll-max-ns=poll-max-ns,poll-grow=poll-grow,poll-shrink=poll-shrink``
+ ``-object iothread,id=id,poll-max-ns=poll-max-ns,poll-grow=poll-grow,poll-shrink=poll-shrink,aio-max-batch=aio-max-batch``
Creates a dedicated event loop thread that devices can be
assigned to. This is known as an IOThread. By default device
emulation happens in vCPU threads or the main event loop thread.
@@ -5337,7 +5337,11 @@ SRST
the polling time when the algorithm detects it is spending too
long polling without encountering events.
- The polling parameters can be modified at run-time using the
+ The ``aio-max-batch`` parameter is the maximum number of requests
+ in a batch for the AIO engine, 0 means that the engine will use
+ its default.
+
+ The IOThread parameters can be modified at run-time using the
``qom-set`` command (where ``iothread1`` is the IOThread's
``id``):
diff --git a/util/aio-posix.c b/util/aio-posix.c
index 30f5354b1e..2b86777e91 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -716,3 +716,15 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
aio_notify(ctx);
}
+
+void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
+ Error **errp)
+{
+ /*
+ * No thread synchronization here, it doesn't matter if an incorrect value
+ * is used once.
+ */
+ ctx->aio_max_batch = max_batch;
+
+ aio_notify(ctx);
+}
diff --git a/util/aio-win32.c b/util/aio-win32.c
index 168717b51b..d5b09a1193 100644
--- a/util/aio-win32.c
+++ b/util/aio-win32.c
@@ -440,3 +440,8 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
error_setg(errp, "AioContext polling is not implemented on Windows");
}
}
+
+void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
+ Error **errp)
+{
+}
diff --git a/util/async.c b/util/async.c
index 9a41591319..6f6717a34b 100644
--- a/util/async.c
+++ b/util/async.c
@@ -554,6 +554,8 @@ AioContext *aio_context_new(Error **errp)
ctx->poll_grow = 0;
ctx->poll_shrink = 0;
+ ctx->aio_max_batch = 0;
+
return ctx;
fail:
g_source_destroy(&ctx->source);