aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitlab-ci.yml1
-rw-r--r--MAINTAINERS9
-rw-r--r--accel/dummy-cpus.c (renamed from accel/qtest/qtest-cpus.c)27
-rw-r--r--accel/meson.build8
-rw-r--r--accel/qtest/meson.build1
-rw-r--r--accel/qtest/qtest-cpus.h17
-rw-r--r--accel/qtest/qtest.c5
-rw-r--r--accel/xen/xen-all.c8
-rw-r--r--block/coroutines.h2
-rw-r--r--block/export/export.c37
-rw-r--r--block/export/meson.build3
-rw-r--r--block/export/vhost-user-blk-server.c431
-rw-r--r--block/export/vhost-user-blk-server.h19
-rw-r--r--block/io.c132
-rw-r--r--block/nvme.c27
-rw-r--r--block/qcow2.c16
-rw-r--r--contrib/libvhost-user/libvhost-user-glib.c2
-rw-r--r--contrib/libvhost-user/libvhost-user.c15
-rw-r--r--contrib/libvhost-user/libvhost-user.h21
-rw-r--r--contrib/libvhost-user/meson.build1
-rw-r--r--docs/devel/fuzzing.txt39
-rw-r--r--hw/core/qdev-properties-system.c31
-rw-r--r--hw/misc/sifive_u_otp.c95
-rw-r--r--hw/riscv/boot.c56
-rw-r--r--hw/riscv/opentitan.c3
-rw-r--r--hw/riscv/sifive_e.c3
-rw-r--r--hw/riscv/sifive_u.c28
-rw-r--r--hw/riscv/spike.c11
-rw-r--r--hw/riscv/virt.c11
-rw-r--r--include/exec/memory.h21
-rw-r--r--include/exec/memory_ldst_cached.h.inc3
-rw-r--r--include/hw/intc/sifive_plic.h (renamed from hw/intc/sifive_plic.h)0
-rw-r--r--include/hw/misc/sifive_u_otp.h5
-rw-r--r--include/hw/riscv/boot.h13
-rw-r--r--include/hw/riscv/sifive_u.h1
-rw-r--r--include/qemu/vhost-user-server.h65
-rw-r--r--include/sysemu/cpus.h3
-rw-r--r--memory_ldst.c.inc4
-rw-r--r--meson.build22
-rw-r--r--nbd/meson.build2
-rw-r--r--nbd/server.c2
-rw-r--r--qapi/block-core.json24
-rw-r--r--qapi/block-export.json36
-rw-r--r--qemu-nbd.c21
-rwxr-xr-xscripts/oss-fuzz/build.sh14
-rwxr-xr-xscripts/oss-fuzz/minimize_qtest_trace.py157
-rwxr-xr-xscripts/oss-fuzz/reorder_fuzzer_qtest_trace.py103
-rw-r--r--softmmu/memory.c27
-rw-r--r--softmmu/physmem.c2
-rw-r--r--softmmu/vl.c4
-rw-r--r--storage-daemon/meson.build3
-rw-r--r--stubs/blk-exp-close-all.c7
-rw-r--r--stubs/meson.build1
-rw-r--r--target/riscv/cpu.h10
-rw-r--r--target/riscv/cpu_helper.c50
-rw-r--r--target/riscv/op_helper.c7
-rw-r--r--tests/acceptance/machine_m68k_nextcube.py7
-rw-r--r--tests/acceptance/ppc_prep_40p.py4
-rw-r--r--tests/docker/dockerfiles/centos8.docker1
-rw-r--r--tests/docker/dockerfiles/debian-amd64.docker3
-rw-r--r--tests/docker/dockerfiles/fedora.docker1
-rw-r--r--tests/docker/dockerfiles/ubuntu2004.docker1
-rwxr-xr-xtests/qemu-iotests/27420
-rw-r--r--tests/qemu-iotests/274.out68
-rw-r--r--tests/qtest/fuzz/fuzz.c13
-rw-r--r--tests/qtest/fuzz/fuzz.h28
-rw-r--r--tests/qtest/fuzz/generic_fuzz.c954
-rw-r--r--tests/qtest/fuzz/generic_fuzz_configs.h121
-rw-r--r--tests/qtest/fuzz/meson.build1
-rw-r--r--tests/qtest/libqtest.c22
-rw-r--r--tests/qtest/meson.build3
-rw-r--r--tests/qtest/migration-helpers.c16
-rw-r--r--tests/vhost-user-bridge.c2
-rw-r--r--tools/virtiofsd/fuse_virtio.c4
-rw-r--r--util/block-helpers.c46
-rw-r--r--util/block-helpers.h19
-rw-r--r--util/meson.build4
-rw-r--r--util/vhost-user-server.c446
78 files changed, 3208 insertions, 242 deletions
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 66ad7aa5c2..5d6773efd2 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -66,6 +66,7 @@ include:
- if [ -d ${CI_PROJECT_DIR}/avocado-cache ]; then
du -chs ${CI_PROJECT_DIR}/avocado-cache ;
fi
+ - export AVOCADO_ALLOW_UNTRUSTED_CODE=1
after_script:
- cd build
- python3 -c 'import json; r = json.load(open("tests/results/latest/results.json")); [print(t["logfile"]) for t in r["tests"] if t["status"] not in ("PASS", "SKIP", "CANCEL")]' | xargs cat
diff --git a/MAINTAINERS b/MAINTAINERS
index 6a197bd358..ef6f5c7399 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3069,6 +3069,15 @@ L: qemu-block@nongnu.org
S: Supported
F: tests/image-fuzzer/
+Vhost-user block device backend server
+M: Coiby Xu <Coiby.Xu@gmail.com>
+S: Maintained
+F: block/export/vhost-user-blk-server.c
+F: block/export/vhost-user-blk-server.h
+F: include/qemu/vhost-user-server.h
+F: tests/qtest/libqos/vhost-user-blk.c
+F: util/vhost-user-server.c
+
Replication
M: Wen Congyang <wencongyang2@huawei.com>
M: Xie Changlong <xiechanglong.d@gmail.com>
diff --git a/accel/qtest/qtest-cpus.c b/accel/dummy-cpus.c
index 7c5399ed9d..10429fdfb2 100644
--- a/accel/qtest/qtest-cpus.c
+++ b/accel/dummy-cpus.c
@@ -1,5 +1,5 @@
/*
- * QTest accelerator code
+ * Dummy cpu thread code
*
* Copyright IBM, Corp. 2011
*
@@ -13,26 +13,13 @@
#include "qemu/osdep.h"
#include "qemu/rcu.h"
-#include "qapi/error.h"
-#include "qemu/module.h"
-#include "qemu/option.h"
-#include "qemu/config-file.h"
-#include "sysemu/accel.h"
-#include "sysemu/qtest.h"
#include "sysemu/cpus.h"
-#include "sysemu/cpu-timers.h"
#include "qemu/guest-random.h"
#include "qemu/main-loop.h"
#include "hw/core/cpu.h"
-#include "qtest-cpus.h"
-
-static void *qtest_cpu_thread_fn(void *arg)
+static void *dummy_cpu_thread_fn(void *arg)
{
-#ifdef _WIN32
- error_report("qtest is not supported under Windows");
- exit(1);
-#else
CPUState *cpu = arg;
sigset_t waitset;
int r;
@@ -69,10 +56,9 @@ static void *qtest_cpu_thread_fn(void *arg)
qemu_mutex_unlock_iothread();
rcu_unregister_thread();
return NULL;
-#endif
}
-static void qtest_start_vcpu_thread(CPUState *cpu)
+void dummy_start_vcpu_thread(CPUState *cpu)
{
char thread_name[VCPU_THREAD_NAME_SIZE];
@@ -81,11 +67,6 @@ static void qtest_start_vcpu_thread(CPUState *cpu)
qemu_cond_init(cpu->halt_cond);
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
cpu->cpu_index);
- qemu_thread_create(cpu->thread, thread_name, qtest_cpu_thread_fn, cpu,
+ qemu_thread_create(cpu->thread, thread_name, dummy_cpu_thread_fn, cpu,
QEMU_THREAD_JOINABLE);
}
-
-const CpusAccel qtest_cpus = {
- .create_vcpu_thread = qtest_start_vcpu_thread,
- .get_virtual_clock = qtest_get_virtual_clock,
-};
diff --git a/accel/meson.build b/accel/meson.build
index bb00d0fd13..b26cca227a 100644
--- a/accel/meson.build
+++ b/accel/meson.build
@@ -5,3 +5,11 @@ subdir('kvm')
subdir('tcg')
subdir('xen')
subdir('stubs')
+
+dummy_ss = ss.source_set()
+dummy_ss.add(files(
+ 'dummy-cpus.c',
+))
+
+specific_ss.add_all(when: ['CONFIG_SOFTMMU', 'CONFIG_POSIX'], if_true: dummy_ss)
+specific_ss.add_all(when: ['CONFIG_XEN'], if_true: dummy_ss)
diff --git a/accel/qtest/meson.build b/accel/qtest/meson.build
index e477cb2ae2..a2f3276459 100644
--- a/accel/qtest/meson.build
+++ b/accel/qtest/meson.build
@@ -1,7 +1,6 @@
qtest_ss = ss.source_set()
qtest_ss.add(files(
'qtest.c',
- 'qtest-cpus.c',
))
specific_ss.add_all(when: ['CONFIG_SOFTMMU', 'CONFIG_POSIX'], if_true: qtest_ss)
diff --git a/accel/qtest/qtest-cpus.h b/accel/qtest/qtest-cpus.h
deleted file mode 100644
index 739519a472..0000000000
--- a/accel/qtest/qtest-cpus.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Accelerator CPUS Interface
- *
- * Copyright 2020 SUSE LLC
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef QTEST_CPUS_H
-#define QTEST_CPUS_H
-
-#include "sysemu/cpus.h"
-
-extern const CpusAccel qtest_cpus;
-
-#endif /* QTEST_CPUS_H */
diff --git a/accel/qtest/qtest.c b/accel/qtest/qtest.c
index 537e8b449c..b282cea5cf 100644
--- a/accel/qtest/qtest.c
+++ b/accel/qtest/qtest.c
@@ -25,7 +25,10 @@
#include "qemu/main-loop.h"
#include "hw/core/cpu.h"
-#include "qtest-cpus.h"
+const CpusAccel qtest_cpus = {
+ .create_vcpu_thread = dummy_start_vcpu_thread,
+ .get_virtual_clock = qtest_get_virtual_clock,
+};
static int qtest_init_accel(MachineState *ms)
{
diff --git a/accel/xen/xen-all.c b/accel/xen/xen-all.c
index 60b971d0a8..878a4089d9 100644
--- a/accel/xen/xen-all.c
+++ b/accel/xen/xen-all.c
@@ -16,6 +16,7 @@
#include "hw/xen/xen_pt.h"
#include "chardev/char.h"
#include "sysemu/accel.h"
+#include "sysemu/cpus.h"
#include "sysemu/xen.h"
#include "sysemu/runstate.h"
#include "migration/misc.h"
@@ -153,6 +154,10 @@ static void xen_setup_post(MachineState *ms, AccelState *accel)
}
}
+const CpusAccel xen_cpus = {
+ .create_vcpu_thread = dummy_start_vcpu_thread,
+};
+
static int xen_init(MachineState *ms)
{
MachineClass *mc = MACHINE_GET_CLASS(ms);
@@ -180,6 +185,9 @@ static int xen_init(MachineState *ms)
* opt out of system RAM being allocated by generic code
*/
mc->default_ram_id = NULL;
+
+ cpus_register_accel(&xen_cpus);
+
return 0;
}
diff --git a/block/coroutines.h b/block/coroutines.h
index f69179f5ef..1cb3128b94 100644
--- a/block/coroutines.h
+++ b/block/coroutines.h
@@ -41,6 +41,7 @@ bdrv_pwritev(BdrvChild *child, int64_t offset, unsigned int bytes,
int coroutine_fn
bdrv_co_common_block_status_above(BlockDriverState *bs,
BlockDriverState *base,
+ bool include_base,
bool want_zero,
int64_t offset,
int64_t bytes,
@@ -50,6 +51,7 @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
int generated_co_wrapper
bdrv_common_block_status_above(BlockDriverState *bs,
BlockDriverState *base,
+ bool include_base,
bool want_zero,
int64_t offset,
int64_t bytes,
diff --git a/block/export/export.c b/block/export/export.c
index f2c00d13bf..c3478c6c97 100644
--- a/block/export/export.c
+++ b/block/export/export.c
@@ -15,15 +15,22 @@
#include "block/block.h"
#include "sysemu/block-backend.h"
+#include "sysemu/iothread.h"
#include "block/export.h"
#include "block/nbd.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-block-export.h"
#include "qapi/qapi-events-block-export.h"
#include "qemu/id.h"
+#if defined(CONFIG_LINUX) && defined(CONFIG_VHOST_USER)
+#include "vhost-user-blk-server.h"
+#endif
static const BlockExportDriver *blk_exp_drivers[] = {
&blk_exp_nbd,
+#if defined(CONFIG_LINUX) && defined(CONFIG_VHOST_USER)
+ &blk_exp_vhost_user_blk,
+#endif
};
/* Only accessed from the main thread */
@@ -57,10 +64,11 @@ static const BlockExportDriver *blk_exp_find_driver(BlockExportType type)
BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
{
+ bool fixed_iothread = export->has_fixed_iothread && export->fixed_iothread;
const BlockExportDriver *drv;
BlockExport *exp = NULL;
BlockDriverState *bs;
- BlockBackend *blk;
+ BlockBackend *blk = NULL;
AioContext *ctx;
uint64_t perm;
int ret;
@@ -96,6 +104,28 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
+ if (export->has_iothread) {
+ IOThread *iothread;
+ AioContext *new_ctx;
+
+ iothread = iothread_by_id(export->iothread);
+ if (!iothread) {
+ error_setg(errp, "iothread \"%s\" not found", export->iothread);
+ goto fail;
+ }
+
+ new_ctx = iothread_get_aio_context(iothread);
+
+ ret = bdrv_try_set_aio_context(bs, new_ctx, errp);
+ if (ret == 0) {
+ aio_context_release(ctx);
+ aio_context_acquire(new_ctx);
+ ctx = new_ctx;
+ } else if (fixed_iothread) {
+ goto fail;
+ }
+ }
+
/*
* Block exports are used for non-shared storage migration. Make sure
* that BDRV_O_INACTIVE is cleared and the image is ready for write
@@ -110,6 +140,11 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
}
blk = blk_new(ctx, perm, BLK_PERM_ALL);
+
+ if (!fixed_iothread) {
+ blk_set_allow_aio_context_change(blk, true);
+ }
+
ret = blk_insert_bs(blk, bs, errp);
if (ret < 0) {
goto fail;
diff --git a/block/export/meson.build b/block/export/meson.build
index 558ef35d38..9fb4fbf81d 100644
--- a/block/export/meson.build
+++ b/block/export/meson.build
@@ -1 +1,2 @@
-block_ss.add(files('export.c'))
+blockdev_ss.add(files('export.c'))
+blockdev_ss.add(when: ['CONFIG_LINUX', 'CONFIG_VHOST_USER'], if_true: files('vhost-user-blk-server.c'))
diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c
new file mode 100644
index 0000000000..41f4933d6e
--- /dev/null
+++ b/block/export/vhost-user-blk-server.c
@@ -0,0 +1,431 @@
+/*
+ * Sharing QEMU block devices via vhost-user protocal
+ *
+ * Parts of the code based on nbd/server.c.
+ *
+ * Copyright (c) Coiby Xu <coiby.xu@gmail.com>.
+ * Copyright (c) 2020 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+#include "qemu/osdep.h"
+#include "block/block.h"
+#include "contrib/libvhost-user/libvhost-user.h"
+#include "standard-headers/linux/virtio_blk.h"
+#include "qemu/vhost-user-server.h"
+#include "vhost-user-blk-server.h"
+#include "qapi/error.h"
+#include "qom/object_interfaces.h"
+#include "sysemu/block-backend.h"
+#include "util/block-helpers.h"
+
+enum {
+ VHOST_USER_BLK_NUM_QUEUES_DEFAULT = 1,
+};
+struct virtio_blk_inhdr {
+ unsigned char status;
+};
+
+typedef struct VuBlkReq {
+ VuVirtqElement elem;
+ int64_t sector_num;
+ size_t size;
+ struct virtio_blk_inhdr *in;
+ struct virtio_blk_outhdr out;
+ VuServer *server;
+ struct VuVirtq *vq;
+} VuBlkReq;
+
+/* vhost user block device */
+typedef struct {
+ BlockExport export;
+ VuServer vu_server;
+ uint32_t blk_size;
+ QIOChannelSocket *sioc;
+ struct virtio_blk_config blkcfg;
+ bool writable;
+} VuBlkExport;
+
+static void vu_blk_req_complete(VuBlkReq *req)
+{
+ VuDev *vu_dev = &req->server->vu_dev;
+
+ /* IO size with 1 extra status byte */
+ vu_queue_push(vu_dev, req->vq, &req->elem, req->size + 1);
+ vu_queue_notify(vu_dev, req->vq);
+
+ free(req);
+}
+
+static int coroutine_fn
+vu_blk_discard_write_zeroes(BlockBackend *blk, struct iovec *iov,
+ uint32_t iovcnt, uint32_t type)
+{
+ struct virtio_blk_discard_write_zeroes desc;
+ ssize_t size = iov_to_buf(iov, iovcnt, 0, &desc, sizeof(desc));
+ if (unlikely(size != sizeof(desc))) {
+ error_report("Invalid size %zd, expect %zu", size, sizeof(desc));
+ return -EINVAL;
+ }
+
+ uint64_t range[2] = { le64_to_cpu(desc.sector) << 9,
+ le32_to_cpu(desc.num_sectors) << 9 };
+ if (type == VIRTIO_BLK_T_DISCARD) {
+ if (blk_co_pdiscard(blk, range[0], range[1]) == 0) {
+ return 0;
+ }
+ } else if (type == VIRTIO_BLK_T_WRITE_ZEROES) {
+ if (blk_co_pwrite_zeroes(blk, range[0], range[1], 0) == 0) {
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void coroutine_fn vu_blk_virtio_process_req(void *opaque)
+{
+ VuBlkReq *req = opaque;
+ VuServer *server = req->server;
+ VuVirtqElement *elem = &req->elem;
+ uint32_t type;
+
+ VuBlkExport *vexp = container_of(server, VuBlkExport, vu_server);
+ BlockBackend *blk = vexp->export.blk;
+
+ struct iovec *in_iov = elem->in_sg;
+ struct iovec *out_iov = elem->out_sg;
+ unsigned in_num = elem->in_num;
+ unsigned out_num = elem->out_num;
+
+ /* refer to hw/block/virtio_blk.c */
+ if (elem->out_num < 1 || elem->in_num < 1) {
+ error_report("virtio-blk request missing headers");
+ goto err;
+ }
+
+ if (unlikely(iov_to_buf(out_iov, out_num, 0, &req->out,
+ sizeof(req->out)) != sizeof(req->out))) {
+ error_report("virtio-blk request outhdr too short");
+ goto err;
+ }
+
+ iov_discard_front(&out_iov, &out_num, sizeof(req->out));
+
+ if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) {
+ error_report("virtio-blk request inhdr too short");
+ goto err;
+ }
+
+ /* We always touch the last byte, so just see how big in_iov is. */
+ req->in = (void *)in_iov[in_num - 1].iov_base
+ + in_iov[in_num - 1].iov_len
+ - sizeof(struct virtio_blk_inhdr);
+ iov_discard_back(in_iov, &in_num, sizeof(struct virtio_blk_inhdr));
+
+ type = le32_to_cpu(req->out.type);
+ switch (type & ~VIRTIO_BLK_T_BARRIER) {
+ case VIRTIO_BLK_T_IN:
+ case VIRTIO_BLK_T_OUT: {
+ ssize_t ret = 0;
+ bool is_write = type & VIRTIO_BLK_T_OUT;
+ req->sector_num = le64_to_cpu(req->out.sector);
+
+ if (is_write && !vexp->writable) {
+ req->in->status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ int64_t offset = req->sector_num * vexp->blk_size;
+ QEMUIOVector qiov;
+ if (is_write) {
+ qemu_iovec_init_external(&qiov, out_iov, out_num);
+ ret = blk_co_pwritev(blk, offset, qiov.size, &qiov, 0);
+ } else {
+ qemu_iovec_init_external(&qiov, in_iov, in_num);
+ ret = blk_co_preadv(blk, offset, qiov.size, &qiov, 0);
+ }
+ if (ret >= 0) {
+ req->in->status = VIRTIO_BLK_S_OK;
+ } else {
+ req->in->status = VIRTIO_BLK_S_IOERR;
+ }
+ break;
+ }
+ case VIRTIO_BLK_T_FLUSH:
+ if (blk_co_flush(blk) == 0) {
+ req->in->status = VIRTIO_BLK_S_OK;
+ } else {
+ req->in->status = VIRTIO_BLK_S_IOERR;
+ }
+ break;
+ case VIRTIO_BLK_T_GET_ID: {
+ size_t size = MIN(iov_size(&elem->in_sg[0], in_num),
+ VIRTIO_BLK_ID_BYTES);
+ snprintf(elem->in_sg[0].iov_base, size, "%s", "vhost_user_blk");
+ req->in->status = VIRTIO_BLK_S_OK;
+ req->size = elem->in_sg[0].iov_len;
+ break;
+ }
+ case VIRTIO_BLK_T_DISCARD:
+ case VIRTIO_BLK_T_WRITE_ZEROES: {
+ int rc;
+
+ if (!vexp->writable) {
+ req->in->status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ rc = vu_blk_discard_write_zeroes(blk, &elem->out_sg[1], out_num, type);
+ if (rc == 0) {
+ req->in->status = VIRTIO_BLK_S_OK;
+ } else {
+ req->in->status = VIRTIO_BLK_S_IOERR;
+ }
+ break;
+ }
+ default:
+ req->in->status = VIRTIO_BLK_S_UNSUPP;
+ break;
+ }
+
+ vu_blk_req_complete(req);
+ return;
+
+err:
+ free(req);
+}
+
+static void vu_blk_process_vq(VuDev *vu_dev, int idx)
+{
+ VuServer *server = container_of(vu_dev, VuServer, vu_dev);
+ VuVirtq *vq = vu_get_queue(vu_dev, idx);
+
+ while (1) {
+ VuBlkReq *req;
+
+ req = vu_queue_pop(vu_dev, vq, sizeof(VuBlkReq));
+ if (!req) {
+ break;
+ }
+
+ req->server = server;
+ req->vq = vq;
+
+ Coroutine *co =
+ qemu_coroutine_create(vu_blk_virtio_process_req, req);
+ qemu_coroutine_enter(co);
+ }
+}
+
+static void vu_blk_queue_set_started(VuDev *vu_dev, int idx, bool started)
+{
+ VuVirtq *vq;
+
+ assert(vu_dev);
+
+ vq = vu_get_queue(vu_dev, idx);
+ vu_set_queue_handler(vu_dev, vq, started ? vu_blk_process_vq : NULL);
+}
+
+static uint64_t vu_blk_get_features(VuDev *dev)
+{
+ uint64_t features;
+ VuServer *server = container_of(dev, VuServer, vu_dev);
+ VuBlkExport *vexp = container_of(server, VuBlkExport, vu_server);
+ features = 1ull << VIRTIO_BLK_F_SIZE_MAX |
+ 1ull << VIRTIO_BLK_F_SEG_MAX |
+ 1ull << VIRTIO_BLK_F_TOPOLOGY |
+ 1ull << VIRTIO_BLK_F_BLK_SIZE |
+ 1ull << VIRTIO_BLK_F_FLUSH |
+ 1ull << VIRTIO_BLK_F_DISCARD |
+ 1ull << VIRTIO_BLK_F_WRITE_ZEROES |
+ 1ull << VIRTIO_BLK_F_CONFIG_WCE |
+ 1ull << VIRTIO_BLK_F_MQ |
+ 1ull << VIRTIO_F_VERSION_1 |
+ 1ull << VIRTIO_RING_F_INDIRECT_DESC |
+ 1ull << VIRTIO_RING_F_EVENT_IDX |
+ 1ull << VHOST_USER_F_PROTOCOL_FEATURES;
+
+ if (!vexp->writable) {
+ features |= 1ull << VIRTIO_BLK_F_RO;
+ }
+
+ return features;
+}
+
+static uint64_t vu_blk_get_protocol_features(VuDev *dev)
+{
+ return 1ull << VHOST_USER_PROTOCOL_F_CONFIG |
+ 1ull << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD;
+}
+
+static int
+vu_blk_get_config(VuDev *vu_dev, uint8_t *config, uint32_t len)
+{
+ /* TODO blkcfg must be little-endian for VIRTIO 1.0 */
+ VuServer *server = container_of(vu_dev, VuServer, vu_dev);
+ VuBlkExport *vexp = container_of(server, VuBlkExport, vu_server);
+ memcpy(config, &vexp->blkcfg, len);
+ return 0;
+}
+
+static int
+vu_blk_set_config(VuDev *vu_dev, const uint8_t *data,
+ uint32_t offset, uint32_t size, uint32_t flags)
+{
+ VuServer *server = container_of(vu_dev, VuServer, vu_dev);
+ VuBlkExport *vexp = container_of(server, VuBlkExport, vu_server);
+ uint8_t wce;
+
+ /* don't support live migration */
+ if (flags != VHOST_SET_CONFIG_TYPE_MASTER) {
+ return -EINVAL;
+ }
+
+ if (offset != offsetof(struct virtio_blk_config, wce) ||
+ size != 1) {
+ return -EINVAL;
+ }
+
+ wce = *data;
+ vexp->blkcfg.wce = wce;
+ blk_set_enable_write_cache(vexp->export.blk, wce);
+ return 0;
+}
+
+/*
+ * When the client disconnects, it sends a VHOST_USER_NONE request
+ * and vu_process_message will simple call exit which cause the VM
+ * to exit abruptly.
+ * To avoid this issue, process VHOST_USER_NONE request ahead
+ * of vu_process_message.
+ *
+ */
+static int vu_blk_process_msg(VuDev *dev, VhostUserMsg *vmsg, int *do_reply)
+{
+ if (vmsg->request == VHOST_USER_NONE) {
+ dev->panic(dev, "disconnect");
+ return true;
+ }
+ return false;
+}
+
+static const VuDevIface vu_blk_iface = {
+ .get_features = vu_blk_get_features,
+ .queue_set_started = vu_blk_queue_set_started,
+ .get_protocol_features = vu_blk_get_protocol_features,
+ .get_config = vu_blk_get_config,
+ .set_config = vu_blk_set_config,
+ .process_msg = vu_blk_process_msg,
+};
+
+static void blk_aio_attached(AioContext *ctx, void *opaque)
+{
+ VuBlkExport *vexp = opaque;
+
+ vexp->export.ctx = ctx;
+ vhost_user_server_attach_aio_context(&vexp->vu_server, ctx);
+}
+
+static void blk_aio_detach(void *opaque)
+{
+ VuBlkExport *vexp = opaque;
+
+ vhost_user_server_detach_aio_context(&vexp->vu_server);
+ vexp->export.ctx = NULL;
+}
+
+static void
+vu_blk_initialize_config(BlockDriverState *bs,
+ struct virtio_blk_config *config,
+ uint32_t blk_size,
+ uint16_t num_queues)
+{
+ config->capacity = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
+ config->blk_size = blk_size;
+ config->size_max = 0;
+ config->seg_max = 128 - 2;
+ config->min_io_size = 1;
+ config->opt_io_size = 1;
+ config->num_queues = num_queues;
+ config->max_discard_sectors = 32768;
+ config->max_discard_seg = 1;
+ config->discard_sector_alignment = config->blk_size >> 9;
+ config->max_write_zeroes_sectors = 32768;
+ config->max_write_zeroes_seg = 1;
+}
+
+static void vu_blk_exp_request_shutdown(BlockExport *exp)
+{
+ VuBlkExport *vexp = container_of(exp, VuBlkExport, export);
+
+ vhost_user_server_stop(&vexp->vu_server);
+}
+
+static int vu_blk_exp_create(BlockExport *exp, BlockExportOptions *opts,
+ Error **errp)
+{
+ VuBlkExport *vexp = container_of(exp, VuBlkExport, export);
+ BlockExportOptionsVhostUserBlk *vu_opts = &opts->u.vhost_user_blk;
+ Error *local_err = NULL;
+ uint64_t logical_block_size;
+ uint16_t num_queues = VHOST_USER_BLK_NUM_QUEUES_DEFAULT;
+
+ vexp->writable = opts->writable;
+ vexp->blkcfg.wce = 0;
+
+ if (vu_opts->has_logical_block_size) {
+ logical_block_size = vu_opts->logical_block_size;
+ } else {
+ logical_block_size = BDRV_SECTOR_SIZE;
+ }
+ check_block_size(exp->id, "logical-block-size", logical_block_size,
+ &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return -EINVAL;
+ }
+ vexp->blk_size = logical_block_size;
+ blk_set_guest_block_size(exp->blk, logical_block_size);
+
+ if (vu_opts->has_num_queues) {
+ num_queues = vu_opts->num_queues;
+ }
+ if (num_queues == 0) {
+ error_setg(errp, "num-queues must be greater than 0");
+ return -EINVAL;
+ }
+
+ vu_blk_initialize_config(blk_bs(exp->blk), &vexp->blkcfg,
+ logical_block_size, num_queues);
+
+ blk_add_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach,
+ vexp);
+
+ if (!vhost_user_server_start(&vexp->vu_server, vu_opts->addr, exp->ctx,
+ num_queues, &vu_blk_iface, errp)) {
+ blk_remove_aio_context_notifier(exp->blk, blk_aio_attached,
+ blk_aio_detach, vexp);
+ return -EADDRNOTAVAIL;
+ }
+
+ return 0;
+}
+
+static void vu_blk_exp_delete(BlockExport *exp)
+{
+ VuBlkExport *vexp = container_of(exp, VuBlkExport, export);
+
+ blk_remove_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach,
+ vexp);
+}
+
+const BlockExportDriver blk_exp_vhost_user_blk = {
+ .type = BLOCK_EXPORT_TYPE_VHOST_USER_BLK,
+ .instance_size = sizeof(VuBlkExport),
+ .create = vu_blk_exp_create,
+ .delete = vu_blk_exp_delete,
+ .request_shutdown = vu_blk_exp_request_shutdown,
+};
diff --git a/block/export/vhost-user-blk-server.h b/block/export/vhost-user-blk-server.h
new file mode 100644
index 0000000000..fcf46fc8a5
--- /dev/null
+++ b/block/export/vhost-user-blk-server.h
@@ -0,0 +1,19 @@
+/*
+ * Sharing QEMU block devices via vhost-user protocal
+ *
+ * Copyright (c) Coiby Xu <coiby.xu@gmail.com>.
+ * Copyright (c) 2020 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#ifndef VHOST_USER_BLK_SERVER_H
+#define VHOST_USER_BLK_SERVER_H
+
+#include "block/export.h"
+
+/* For block/export/export.c */
+extern const BlockExportDriver blk_exp_vhost_user_blk;
+
+#endif /* VHOST_USER_BLK_SERVER_H */
diff --git a/block/io.c b/block/io.c
index 54f0968aee..02528b3823 100644
--- a/block/io.c
+++ b/block/io.c
@@ -2343,6 +2343,7 @@ early_out:
int coroutine_fn
bdrv_co_common_block_status_above(BlockDriverState *bs,
BlockDriverState *base,
+ bool include_base,
bool want_zero,
int64_t offset,
int64_t bytes,
@@ -2350,34 +2351,84 @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
int64_t *map,
BlockDriverState **file)
{
+ int ret;
BlockDriverState *p;
- int ret = 0;
- bool first = true;
+ int64_t eof = 0;
+
+ assert(!include_base || base); /* Can't include NULL base */
+
+ if (!include_base && bs == base) {
+ *pnum = bytes;
+ return 0;
+ }
+
+ ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file);
+ if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
+ return ret;
+ }
+
+ if (ret & BDRV_BLOCK_EOF) {
+ eof = offset + *pnum;
+ }
+
+ assert(*pnum <= bytes);
+ bytes = *pnum;
- assert(bs != base);
- for (p = bs; p != base; p = bdrv_filter_or_cow_bs(p)) {
+ for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
+ p = bdrv_filter_or_cow_bs(p))
+ {
ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
file);
if (ret < 0) {
- break;
+ return ret;
}
- if (ret & BDRV_BLOCK_ZERO && ret & BDRV_BLOCK_EOF && !first) {
+ if (*pnum == 0) {
/*
- * Reading beyond the end of the file continues to read
- * zeroes, but we can only widen the result to the
- * unallocated length we learned from an earlier
- * iteration.
+ * The top layer deferred to this layer, and because this layer is
+ * short, any zeroes that we synthesize beyond EOF behave as if they
+ * were allocated at this layer.
+ *
+ * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
+ * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
+ * below.
*/
+ assert(ret & BDRV_BLOCK_EOF);
*pnum = bytes;
+ if (file) {
+ *file = p;
+ }
+ ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED;
+ break;
+ }
+ if (ret & BDRV_BLOCK_ALLOCATED) {
+ /*
+ * We've found the node and the status, we must break.
+ *
+ * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
+ * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
+ * below.
+ */
+ ret &= ~BDRV_BLOCK_EOF;
+ break;
}
- if (ret & (BDRV_BLOCK_ZERO | BDRV_BLOCK_DATA)) {
+
+ if (p == base) {
+ assert(include_base);
break;
}
- /* [offset, pnum] unallocated on this layer, which could be only
- * the first part of [offset, bytes]. */
- bytes = MIN(bytes, *pnum);
- first = false;
+
+ /*
+ * OK, [offset, offset + *pnum) region is unallocated on this layer,
+ * let's continue the diving.
+ */
+ assert(*pnum <= bytes);
+ bytes = *pnum;
+ }
+
+ if (offset + *pnum == eof) {
+ ret |= BDRV_BLOCK_EOF;
}
+
return ret;
}
@@ -2385,7 +2436,7 @@ int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
int64_t offset, int64_t bytes, int64_t *pnum,
int64_t *map, BlockDriverState **file)
{
- return bdrv_common_block_status_above(bs, base, true, offset, bytes,
+ return bdrv_common_block_status_above(bs, base, false, true, offset, bytes,
pnum, map, file);
}
@@ -2402,9 +2453,9 @@ int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
int ret;
int64_t dummy;
- ret = bdrv_common_block_status_above(bs, bdrv_filter_or_cow_bs(bs), false,
- offset, bytes, pnum ? pnum : &dummy,
- NULL, NULL);
+ ret = bdrv_common_block_status_above(bs, bs, true, false, offset,
+ bytes, pnum ? pnum : &dummy, NULL,
+ NULL);
if (ret < 0) {
return ret;
}
@@ -2426,52 +2477,19 @@ int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
* at 'offset + *pnum' may return the same allocation status (in other
* words, the result is not necessarily the maximum possible range);
* but 'pnum' will only be 0 when end of file is reached.
- *
*/
int bdrv_is_allocated_above(BlockDriverState *top,
BlockDriverState *base,
bool include_base, int64_t offset,
int64_t bytes, int64_t *pnum)
{
- BlockDriverState *intermediate;
- int ret;
- int64_t n = bytes;
-
- assert(base || !include_base);
-
- intermediate = top;
- while (include_base || intermediate != base) {
- int64_t pnum_inter;
- int64_t size_inter;
-
- assert(intermediate);
- ret = bdrv_is_allocated(intermediate, offset, bytes, &pnum_inter);
- if (ret < 0) {
- return ret;
- }
- if (ret) {
- *pnum = pnum_inter;
- return 1;
- }
-
- size_inter = bdrv_getlength(intermediate);
- if (size_inter < 0) {
- return size_inter;
- }
- if (n > pnum_inter &&
- (intermediate == top || offset + pnum_inter < size_inter)) {
- n = pnum_inter;
- }
-
- if (intermediate == base) {
- break;
- }
-
- intermediate = bdrv_filter_or_cow_bs(intermediate);
+ int ret = bdrv_common_block_status_above(top, base, include_base, false,
+ offset, bytes, pnum, NULL, NULL);
+ if (ret < 0) {
+ return ret;
}
- *pnum = n;
- return 0;
+ return !!(ret & BDRV_BLOCK_ALLOCATED);
}
int coroutine_fn
diff --git a/block/nvme.c b/block/nvme.c
index b48f6f2588..739a0a700c 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -128,6 +128,12 @@ struct BDRVNVMeState {
/* PCI address (required for nvme_refresh_filename()) */
char *device;
+
+ struct {
+ uint64_t completion_errors;
+ uint64_t aligned_accesses;
+ uint64_t unaligned_accesses;
+ } stats;
};
#define NVME_BLOCK_OPT_DEVICE "device"
@@ -384,6 +390,9 @@ static bool nvme_process_completion(NVMeQueuePair *q)
break;
}
ret = nvme_translate_error(c);
+ if (ret) {
+ s->stats.completion_errors++;
+ }
q->cq.head = (q->cq.head + 1) % NVME_QUEUE_SIZE;
if (!q->cq.head) {
q->cq_phase = !q->cq_phase;
@@ -1155,8 +1164,10 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
assert(QEMU_IS_ALIGNED(bytes, s->page_size));
assert(bytes <= s->max_transfer);
if (nvme_qiov_aligned(bs, qiov)) {
+ s->stats.aligned_accesses++;
return nvme_co_prw_aligned(bs, offset, bytes, qiov, is_write, flags);
}
+ s->stats.unaligned_accesses++;
trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write);
buf = qemu_try_memalign(s->page_size, bytes);
@@ -1452,6 +1463,21 @@ static void nvme_unregister_buf(BlockDriverState *bs, void *host)
qemu_vfio_dma_unmap(s->vfio, host);
}
+static BlockStatsSpecific *nvme_get_specific_stats(BlockDriverState *bs)
+{
+ BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1);
+ BDRVNVMeState *s = bs->opaque;
+
+ stats->driver = BLOCKDEV_DRIVER_NVME;
+ stats->u.nvme = (BlockStatsSpecificNvme) {
+ .completion_errors = s->stats.completion_errors,
+ .aligned_accesses = s->stats.aligned_accesses,
+ .unaligned_accesses = s->stats.unaligned_accesses,
+ };
+
+ return stats;
+}
+
static const char *const nvme_strong_runtime_opts[] = {
NVME_BLOCK_OPT_DEVICE,
NVME_BLOCK_OPT_NAMESPACE,
@@ -1485,6 +1511,7 @@ static BlockDriver bdrv_nvme = {
.bdrv_refresh_filename = nvme_refresh_filename,
.bdrv_refresh_limits = nvme_refresh_limits,
.strong_runtime_opts = nvme_strong_runtime_opts,
+ .bdrv_get_specific_stats = nvme_get_specific_stats,
.bdrv_detach_aio_context = nvme_detach_aio_context,
.bdrv_attach_aio_context = nvme_attach_aio_context,
diff --git a/block/qcow2.c b/block/qcow2.c
index b05512718c..b6cb4db8bb 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -3860,8 +3860,20 @@ static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
if (!bytes) {
return true;
}
- res = bdrv_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL);
- return res >= 0 && (res & BDRV_BLOCK_ZERO) && nr == bytes;
+
+ /*
+ * bdrv_block_status_above doesn't merge different types of zeros, for
+ * example, zeros which come from the region which is unallocated in
+ * the whole backing chain, and zeros which come because of a short
+ * backing file. So, we need a loop.
+ */
+ do {
+ res = bdrv_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL);
+ offset += nr;
+ bytes -= nr;
+ } while (res >= 0 && (res & BDRV_BLOCK_ZERO) && nr && bytes);
+
+ return res >= 0 && (res & BDRV_BLOCK_ZERO) && bytes == 0;
}
static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs,
diff --git a/contrib/libvhost-user/libvhost-user-glib.c b/contrib/libvhost-user/libvhost-user-glib.c
index 53f1ca4cdd..0df2ec9271 100644
--- a/contrib/libvhost-user/libvhost-user-glib.c
+++ b/contrib/libvhost-user/libvhost-user-glib.c
@@ -147,7 +147,7 @@ vug_init(VugDev *dev, uint16_t max_queues, int socket,
g_assert(dev);
g_assert(iface);
- if (!vu_init(&dev->parent, max_queues, socket, panic, set_watch,
+ if (!vu_init(&dev->parent, max_queues, socket, panic, NULL, set_watch,
remove_watch, iface)) {
return false;
}
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
index 9f1285b8a1..bfec8a881a 100644
--- a/contrib/libvhost-user/libvhost-user.c
+++ b/contrib/libvhost-user/libvhost-user.c
@@ -68,8 +68,6 @@
/* The version of inflight buffer */
#define INFLIGHT_VERSION 1
-#define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
-
/* The version of the protocol we support */
#define VHOST_USER_VERSION 1
#define LIBVHOST_USER_DEBUG 0
@@ -268,7 +266,7 @@ have_userfault(void)
}
static bool
-vu_message_read(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
+vu_message_read_default(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
{
char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = {};
struct iovec iov = {
@@ -416,7 +414,7 @@ vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg)
goto out;
}
- if (!vu_message_read(dev, dev->slave_fd, &msg_reply)) {
+ if (!vu_message_read_default(dev, dev->slave_fd, &msg_reply)) {
goto out;
}
@@ -907,7 +905,7 @@ vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
/* Wait for QEMU to confirm that it's registered the handler for the
* faults.
*/
- if (!vu_message_read(dev, dev->sock, vmsg) ||
+ if (!dev->read_msg(dev, dev->sock, vmsg) ||
vmsg->size != sizeof(vmsg->payload.u64) ||
vmsg->payload.u64 != 0) {
vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table");
@@ -1869,7 +1867,7 @@ vu_dispatch(VuDev *dev)
int reply_requested;
bool need_reply, success = false;
- if (!vu_message_read(dev, dev->sock, &vmsg)) {
+ if (!dev->read_msg(dev, dev->sock, &vmsg)) {
goto end;
}
@@ -1920,6 +1918,7 @@ vu_deinit(VuDev *dev)
}
if (vq->kick_fd != -1) {
+ dev->remove_watch(dev, vq->kick_fd);
close(vq->kick_fd);
vq->kick_fd = -1;
}
@@ -1967,6 +1966,7 @@ vu_init(VuDev *dev,
uint16_t max_queues,
int socket,
vu_panic_cb panic,
+ vu_read_msg_cb read_msg,
vu_set_watch_cb set_watch,
vu_remove_watch_cb remove_watch,
const VuDevIface *iface)
@@ -1984,6 +1984,7 @@ vu_init(VuDev *dev,
dev->sock = socket;
dev->panic = panic;
+ dev->read_msg = read_msg ? read_msg : vu_message_read_default;
dev->set_watch = set_watch;
dev->remove_watch = remove_watch;
dev->iface = iface;
@@ -2349,7 +2350,7 @@ static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync)
vu_message_write(dev, dev->slave_fd, &vmsg);
if (ack) {
- vu_message_read(dev, dev->slave_fd, &vmsg);
+ vu_message_read_default(dev, dev->slave_fd, &vmsg);
}
return;
}
diff --git a/contrib/libvhost-user/libvhost-user.h b/contrib/libvhost-user/libvhost-user.h
index 287ac5fec7..3bbeae8587 100644
--- a/contrib/libvhost-user/libvhost-user.h
+++ b/contrib/libvhost-user/libvhost-user.h
@@ -36,6 +36,8 @@
*/
#define VHOST_USER_MAX_RAM_SLOTS 32
+#define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
+
typedef enum VhostSetConfigType {
VHOST_SET_CONFIG_TYPE_MASTER = 0,
VHOST_SET_CONFIG_TYPE_MIGRATION = 1,
@@ -221,6 +223,7 @@ typedef uint64_t (*vu_get_features_cb) (VuDev *dev);
typedef void (*vu_set_features_cb) (VuDev *dev, uint64_t features);
typedef int (*vu_process_msg_cb) (VuDev *dev, VhostUserMsg *vmsg,
int *do_reply);
+typedef bool (*vu_read_msg_cb) (VuDev *dev, int sock, VhostUserMsg *vmsg);
typedef void (*vu_queue_set_started_cb) (VuDev *dev, int qidx, bool started);
typedef bool (*vu_queue_is_processed_in_order_cb) (VuDev *dev, int qidx);
typedef int (*vu_get_config_cb) (VuDev *dev, uint8_t *config, uint32_t len);
@@ -389,6 +392,23 @@ struct VuDev {
bool broken;
uint16_t max_queues;
+ /* @read_msg: custom method to read vhost-user message
+ *
+ * Read data from vhost_user socket fd and fill up
+ * the passed VhostUserMsg *vmsg struct.
+ *
+ * If reading fails, it should close the received set of file
+ * descriptors as socket message's auxiliary data.
+ *
+ * For the details, please refer to vu_message_read in libvhost-user.c
+ * which will be used by default if not custom method is provided when
+ * calling vu_init
+ *
+ * Returns: true if vhost-user message successfully received,
+ * otherwise return false.
+ *
+ */
+ vu_read_msg_cb read_msg;
/* @set_watch: add or update the given fd to the watch set,
* call cb when condition is met */
vu_set_watch_cb set_watch;
@@ -432,6 +452,7 @@ bool vu_init(VuDev *dev,
uint16_t max_queues,
int socket,
vu_panic_cb panic,
+ vu_read_msg_cb read_msg,
vu_set_watch_cb set_watch,
vu_remove_watch_cb remove_watch,
const VuDevIface *iface);
diff --git a/contrib/libvhost-user/meson.build b/contrib/libvhost-user/meson.build
index e68dd1a581..a261e7665f 100644
--- a/contrib/libvhost-user/meson.build
+++ b/contrib/libvhost-user/meson.build
@@ -1,3 +1,4 @@
libvhost_user = static_library('vhost-user',
files('libvhost-user.c', 'libvhost-user-glib.c'),
build_by_default: false)
+vhost_user = declare_dependency(link_with: libvhost_user)
diff --git a/docs/devel/fuzzing.txt b/docs/devel/fuzzing.txt
index 96d71c94d7..03585c1a9b 100644
--- a/docs/devel/fuzzing.txt
+++ b/docs/devel/fuzzing.txt
@@ -125,6 +125,45 @@ provided by libfuzzer. Libfuzzer passes a byte array and length. Commonly the
fuzzer loops over the byte-array interpreting it as a list of qtest commands,
addresses, or values.
+== The Generic Fuzzer ==
+Writing a fuzz target can be a lot of effort (especially if a device driver has
+not be built-out within libqos). Many devices can be fuzzed to some degree,
+without any device-specific code, using the generic-fuzz target.
+
+The generic-fuzz target is capable of fuzzing devices over their PIO, MMIO,
+and DMA input-spaces. To apply the generic-fuzz to a device, we need to define
+two env-variables, at minimum:
+
+QEMU_FUZZ_ARGS= is the set of QEMU arguments used to configure a machine, with
+the device attached. For example, if we want to fuzz the virtio-net device
+attached to a pc-i440fx machine, we can specify:
+QEMU_FUZZ_ARGS="-M pc -nodefaults -netdev user,id=user0 \
+ -device virtio-net,netdev=user0"
+
+QEMU_FUZZ_OBJECTS= is a set of space-delimited strings used to identify the
+MemoryRegions that will be fuzzed. These strings are compared against
+MemoryRegion names and MemoryRegion owner names, to decide whether each
+MemoryRegion should be fuzzed. These strings support globbing. For the
+virtio-net example, we could use QEMU_FUZZ_OBJECTS=
+ * 'virtio-net'
+ * 'virtio*'
+ * 'virtio* pcspk' (Fuzz the virtio devices and the PC speaker...)
+ * '*' (Fuzz the whole machine)
+
+The "info mtree" and "info qom-tree" monitor commands can be especially useful
+for identifying the MemoryRegion and Object names used for matching.
+
+As a generic rule-of-thumb, the more MemoryRegions/Devices we match, the greater
+the input-space, and the smaller the probability of finding crashing inputs for
+individual devices. As such, it is usually a good idea to limit the fuzzer to
+only a few MemoryRegions.
+
+To ensure that these env variables have been configured correctly, we can use:
+
+./qemu-fuzz-i386 --fuzz-target=generic-fuzz -runs=0
+
+The output should contain a complete list of matched MemoryRegions.
+
= Implementation Details =
== The Fuzzer's Lifecycle ==
diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c
index 49bdd12581..b81a4e8d14 100644
--- a/hw/core/qdev-properties-system.c
+++ b/hw/core/qdev-properties-system.c
@@ -30,6 +30,7 @@
#include "sysemu/blockdev.h"
#include "net/net.h"
#include "hw/pci/pci.h"
+#include "util/block-helpers.h"
static bool check_prop_still_unset(DeviceState *dev, const char *name,
const void *old_val, const char *new_val,
@@ -576,16 +577,6 @@ const PropertyInfo qdev_prop_losttickpolicy = {
/* --- blocksize --- */
-/* lower limit is sector size */
-#define MIN_BLOCK_SIZE 512
-#define MIN_BLOCK_SIZE_STR "512 B"
-/*
- * upper limit is arbitrary, 2 MiB looks sufficient for all sensible uses, and
- * matches qcow2 cluster size limit
- */
-#define MAX_BLOCK_SIZE (2 * MiB)
-#define MAX_BLOCK_SIZE_STR "2 MiB"
-
static void set_blocksize(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
@@ -593,6 +584,7 @@ static void set_blocksize(Object *obj, Visitor *v, const char *name,
Property *prop = opaque;
uint32_t *ptr = qdev_get_prop_ptr(dev, prop);
uint64_t value;
+ Error *local_err = NULL;
if (dev->realized) {
qdev_prop_set_after_realize(dev, name, errp);
@@ -602,24 +594,11 @@ static void set_blocksize(Object *obj, Visitor *v, const char *name,
if (!visit_type_size(v, name, &value, errp)) {
return;
}
- /* value of 0 means "unset" */
- if (value && (value < MIN_BLOCK_SIZE || value > MAX_BLOCK_SIZE)) {
- error_setg(errp,
- "Property %s.%s doesn't take value %" PRIu64
- " (minimum: " MIN_BLOCK_SIZE_STR
- ", maximum: " MAX_BLOCK_SIZE_STR ")",
- dev->id ? : "", name, value);
- return;
- }
-
- /* We rely on power-of-2 blocksizes for bitmasks */
- if ((value & (value - 1)) != 0) {
- error_setg(errp,
- "Property %s.%s doesn't take value '%" PRId64 "', "
- "it's not a power of 2", dev->id ?: "", name, (int64_t)value);
+ check_block_size(dev->id ? : "", name, value, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
return;
}
-
*ptr = value;
}
diff --git a/hw/misc/sifive_u_otp.c b/hw/misc/sifive_u_otp.c
index c2f3c8e129..60066375ab 100644
--- a/hw/misc/sifive_u_otp.c
+++ b/hw/misc/sifive_u_otp.c
@@ -19,11 +19,22 @@
*/
#include "qemu/osdep.h"
+#include "qapi/error.h"
#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "hw/misc/sifive_u_otp.h"
+#include "sysemu/blockdev.h"
+#include "sysemu/block-backend.h"
+
+#define WRITTEN_BIT_ON 0x1
+
+#define SET_FUSEARRAY_BIT(map, i, off, bit) \
+ map[i] = bit ? (map[i] | bit << off) : (map[i] & ~(0x1 << off))
+
+#define GET_FUSEARRAY_BIT(map, i, off) \
+ ((map[i] >> off) & 0x1)
static uint64_t sifive_u_otp_read(void *opaque, hwaddr addr, unsigned int size)
{
@@ -46,6 +57,16 @@ static uint64_t sifive_u_otp_read(void *opaque, hwaddr addr, unsigned int size)
if ((s->pce & SIFIVE_U_OTP_PCE_EN) &&
(s->pdstb & SIFIVE_U_OTP_PDSTB_EN) &&
(s->ptrim & SIFIVE_U_OTP_PTRIM_EN)) {
+
+ /* read from backend */
+ if (s->blk) {
+ int32_t buf;
+
+ blk_pread(s->blk, s->pa * SIFIVE_U_OTP_FUSE_WORD, &buf,
+ SIFIVE_U_OTP_FUSE_WORD);
+ return buf;
+ }
+
return s->fuse[s->pa & SIFIVE_U_OTP_PA_MASK];
} else {
return 0xff;
@@ -123,7 +144,30 @@ static void sifive_u_otp_write(void *opaque, hwaddr addr,
s->ptrim = val32;
break;
case SIFIVE_U_OTP_PWE:
- s->pwe = val32;
+ s->pwe = val32 & SIFIVE_U_OTP_PWE_EN;
+
+ /* PWE is enabled. Ignore PAS=1 (no redundancy cell) */
+ if (s->pwe && !s->pas) {
+ if (GET_FUSEARRAY_BIT(s->fuse_wo, s->pa, s->paio)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "write once error: idx<%u>, bit<%u>\n",
+ s->pa, s->paio);
+ break;
+ }
+
+ /* write bit data */
+ SET_FUSEARRAY_BIT(s->fuse, s->pa, s->paio, s->pdin);
+
+ /* write to backend */
+ if (s->blk) {
+ blk_pwrite(s->blk, s->pa * SIFIVE_U_OTP_FUSE_WORD,
+ &s->fuse[s->pa], SIFIVE_U_OTP_FUSE_WORD, 0);
+ }
+
+ /* update written bit */
+ SET_FUSEARRAY_BIT(s->fuse_wo, s->pa, s->paio, WRITTEN_BIT_ON);
+ }
+
break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "%s: bad write: addr=0x%" HWADDR_PRIx
@@ -143,16 +187,48 @@ static const MemoryRegionOps sifive_u_otp_ops = {
static Property sifive_u_otp_properties[] = {
DEFINE_PROP_UINT32("serial", SiFiveUOTPState, serial, 0),
+ DEFINE_PROP_DRIVE("drive", SiFiveUOTPState, blk),
DEFINE_PROP_END_OF_LIST(),
};
static void sifive_u_otp_realize(DeviceState *dev, Error **errp)
{
SiFiveUOTPState *s = SIFIVE_U_OTP(dev);
+ DriveInfo *dinfo;
memory_region_init_io(&s->mmio, OBJECT(dev), &sifive_u_otp_ops, s,
TYPE_SIFIVE_U_OTP, SIFIVE_U_OTP_REG_SIZE);
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio);
+
+ dinfo = drive_get_next(IF_NONE);
+ if (dinfo) {
+ int ret;
+ uint64_t perm;
+ int filesize;
+ BlockBackend *blk;
+
+ blk = blk_by_legacy_dinfo(dinfo);
+ filesize = SIFIVE_U_OTP_NUM_FUSES * SIFIVE_U_OTP_FUSE_WORD;
+ if (blk_getlength(blk) < filesize) {
+ error_setg(errp, "OTP drive size < 16K");
+ return;
+ }
+
+ qdev_prop_set_drive_err(dev, "drive", blk, errp);
+
+ if (s->blk) {
+ perm = BLK_PERM_CONSISTENT_READ |
+ (blk_is_read_only(s->blk) ? 0 : BLK_PERM_WRITE);
+ ret = blk_set_perm(s->blk, perm, BLK_PERM_ALL, errp);
+ if (ret < 0) {
+ return;
+ }
+
+ if (blk_pread(s->blk, 0, s->fuse, filesize) != filesize) {
+ error_setg(errp, "failed to read the initial flash content");
+ }
+ }
+ }
}
static void sifive_u_otp_reset(DeviceState *dev)
@@ -165,6 +241,23 @@ static void sifive_u_otp_reset(DeviceState *dev)
/* Make a valid content of serial number */
s->fuse[SIFIVE_U_OTP_SERIAL_ADDR] = s->serial;
s->fuse[SIFIVE_U_OTP_SERIAL_ADDR + 1] = ~(s->serial);
+
+ if (s->blk) {
+ /* Put serial number to backend as well*/
+ uint32_t serial_data;
+ int index = SIFIVE_U_OTP_SERIAL_ADDR;
+
+ serial_data = s->serial;
+ blk_pwrite(s->blk, index * SIFIVE_U_OTP_FUSE_WORD,
+ &serial_data, SIFIVE_U_OTP_FUSE_WORD, 0);
+
+ serial_data = ~(s->serial);
+ blk_pwrite(s->blk, (index + 1) * SIFIVE_U_OTP_FUSE_WORD,
+ &serial_data, SIFIVE_U_OTP_FUSE_WORD, 0);
+ }
+
+ /* Initialize write-once map */
+ memset(s->fuse_wo, 0x00, sizeof(s->fuse_wo));
}
static void sifive_u_otp_class_init(ObjectClass *klass, void *data)
diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c
index 21adaae56e..9b3fe3fb1e 100644
--- a/hw/riscv/boot.c
+++ b/hw/riscv/boot.c
@@ -33,19 +33,36 @@
#include <libfdt.h>
#if defined(TARGET_RISCV32)
-# define KERNEL_BOOT_ADDRESS 0x80400000
#define fw_dynamic_info_data(__val) cpu_to_le32(__val)
#else
-# define KERNEL_BOOT_ADDRESS 0x80200000
#define fw_dynamic_info_data(__val) cpu_to_le64(__val)
#endif
-void riscv_find_and_load_firmware(MachineState *machine,
- const char *default_machine_firmware,
- hwaddr firmware_load_addr,
- symbol_fn_t sym_cb)
+bool riscv_is_32_bit(MachineState *machine)
+{
+ if (!strncmp(machine->cpu_type, "rv32", 4)) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+target_ulong riscv_calc_kernel_start_addr(MachineState *machine,
+ target_ulong firmware_end_addr) {
+ if (riscv_is_32_bit(machine)) {
+ return QEMU_ALIGN_UP(firmware_end_addr, 4 * MiB);
+ } else {
+ return QEMU_ALIGN_UP(firmware_end_addr, 2 * MiB);
+ }
+}
+
+target_ulong riscv_find_and_load_firmware(MachineState *machine,
+ const char *default_machine_firmware,
+ hwaddr firmware_load_addr,
+ symbol_fn_t sym_cb)
{
char *firmware_filename = NULL;
+ target_ulong firmware_end_addr = firmware_load_addr;
if ((!machine->firmware) || (!strcmp(machine->firmware, "default"))) {
/*
@@ -60,9 +77,12 @@ void riscv_find_and_load_firmware(MachineState *machine,
if (firmware_filename) {
/* If not "none" load the firmware */
- riscv_load_firmware(firmware_filename, firmware_load_addr, sym_cb);
+ firmware_end_addr = riscv_load_firmware(firmware_filename,
+ firmware_load_addr, sym_cb);
g_free(firmware_filename);
}
+
+ return firmware_end_addr;
}
char *riscv_find_firmware(const char *firmware_filename)
@@ -91,24 +111,28 @@ target_ulong riscv_load_firmware(const char *firmware_filename,
hwaddr firmware_load_addr,
symbol_fn_t sym_cb)
{
- uint64_t firmware_entry;
+ uint64_t firmware_entry, firmware_size, firmware_end;
if (load_elf_ram_sym(firmware_filename, NULL, NULL, NULL,
- &firmware_entry, NULL, NULL, NULL,
+ &firmware_entry, NULL, &firmware_end, NULL,
0, EM_RISCV, 1, 0, NULL, true, sym_cb) > 0) {
- return firmware_entry;
+ return firmware_end;
}
- if (load_image_targphys_as(firmware_filename, firmware_load_addr,
- ram_size, NULL) > 0) {
- return firmware_load_addr;
+ firmware_size = load_image_targphys_as(firmware_filename,
+ firmware_load_addr, ram_size, NULL);
+
+ if (firmware_size > 0) {
+ return firmware_load_addr + firmware_size;
}
error_report("could not load firmware '%s'", firmware_filename);
exit(1);
}
-target_ulong riscv_load_kernel(const char *kernel_filename, symbol_fn_t sym_cb)
+target_ulong riscv_load_kernel(const char *kernel_filename,
+ target_ulong kernel_start_addr,
+ symbol_fn_t sym_cb)
{
uint64_t kernel_entry;
@@ -123,9 +147,9 @@ target_ulong riscv_load_kernel(const char *kernel_filename, symbol_fn_t sym_cb)
return kernel_entry;
}
- if (load_image_targphys_as(kernel_filename, KERNEL_BOOT_ADDRESS,
+ if (load_image_targphys_as(kernel_filename, kernel_start_addr,
ram_size, NULL) > 0) {
- return KERNEL_BOOT_ADDRESS;
+ return kernel_start_addr;
}
error_report("could not load kernel '%s'", kernel_filename);
diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c
index 0531bd879b..cc758b78b8 100644
--- a/hw/riscv/opentitan.c
+++ b/hw/riscv/opentitan.c
@@ -75,7 +75,8 @@ static void opentitan_board_init(MachineState *machine)
}
if (machine->kernel_filename) {
- riscv_load_kernel(machine->kernel_filename, NULL);
+ riscv_load_kernel(machine->kernel_filename,
+ memmap[IBEX_DEV_RAM].base, NULL);
}
}
diff --git a/hw/riscv/sifive_e.c b/hw/riscv/sifive_e.c
index fcfac16816..59bac4cc9a 100644
--- a/hw/riscv/sifive_e.c
+++ b/hw/riscv/sifive_e.c
@@ -114,7 +114,8 @@ static void sifive_e_machine_init(MachineState *machine)
memmap[SIFIVE_E_DEV_MROM].base, &address_space_memory);
if (machine->kernel_filename) {
- riscv_load_kernel(machine->kernel_filename, NULL);
+ riscv_load_kernel(machine->kernel_filename,
+ memmap[SIFIVE_E_DEV_DTIM].base, NULL);
}
}
diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c
index 6ad975d692..b2472c6627 100644
--- a/hw/riscv/sifive_u.c
+++ b/hw/riscv/sifive_u.c
@@ -415,6 +415,7 @@ static void sifive_u_machine_init(MachineState *machine)
MemoryRegion *main_mem = g_new(MemoryRegion, 1);
MemoryRegion *flash0 = g_new(MemoryRegion, 1);
target_ulong start_addr = memmap[SIFIVE_U_DEV_DRAM].base;
+ target_ulong firmware_end_addr, kernel_start_addr;
uint32_t start_addr_hi32 = 0x00000000;
int i;
uint32_t fdt_load_addr;
@@ -424,6 +425,8 @@ static void sifive_u_machine_init(MachineState *machine)
object_initialize_child(OBJECT(machine), "soc", &s->soc, TYPE_RISCV_U_SOC);
object_property_set_uint(OBJECT(&s->soc), "serial", s->serial,
&error_abort);
+ object_property_set_str(OBJECT(&s->soc), "cpu-type", machine->cpu_type,
+ &error_abort);
qdev_realize(DEVICE(&s->soc), NULL, &error_abort);
/* register RAM */
@@ -472,10 +475,15 @@ static void sifive_u_machine_init(MachineState *machine)
break;
}
- riscv_find_and_load_firmware(machine, BIOS_FILENAME, start_addr, NULL);
+ firmware_end_addr = riscv_find_and_load_firmware(machine, BIOS_FILENAME,
+ start_addr, NULL);
if (machine->kernel_filename) {
- kernel_entry = riscv_load_kernel(machine->kernel_filename, NULL);
+ kernel_start_addr = riscv_calc_kernel_start_addr(machine,
+ firmware_end_addr);
+
+ kernel_entry = riscv_load_kernel(machine->kernel_filename,
+ kernel_start_addr, NULL);
if (machine->initrd_filename) {
hwaddr start;
@@ -590,6 +598,11 @@ static void sifive_u_machine_class_init(ObjectClass *oc, void *data)
mc->init = sifive_u_machine_init;
mc->max_cpus = SIFIVE_U_MANAGEMENT_CPU_COUNT + SIFIVE_U_COMPUTE_CPU_COUNT;
mc->min_cpus = SIFIVE_U_MANAGEMENT_CPU_COUNT + 1;
+#if defined(TARGET_RISCV32)
+ mc->default_cpu_type = TYPE_RISCV_CPU_SIFIVE_U34;
+#elif defined(TARGET_RISCV64)
+ mc->default_cpu_type = TYPE_RISCV_CPU_SIFIVE_U54;
+#endif
mc->default_cpus = mc->min_cpus;
object_class_property_add_bool(oc, "start-in-flash",
@@ -618,7 +631,6 @@ type_init(sifive_u_machine_init_register_types)
static void sifive_u_soc_instance_init(Object *obj)
{
- MachineState *ms = MACHINE(qdev_get_machine());
SiFiveUSoCState *s = RISCV_U_SOC(obj);
object_initialize_child(obj, "e-cluster", &s->e_cluster, TYPE_CPU_CLUSTER);
@@ -636,10 +648,6 @@ static void sifive_u_soc_instance_init(Object *obj)
object_initialize_child(OBJECT(&s->u_cluster), "u-cpus", &s->u_cpus,
TYPE_RISCV_HART_ARRAY);
- qdev_prop_set_uint32(DEVICE(&s->u_cpus), "num-harts", ms->smp.cpus - 1);
- qdev_prop_set_uint32(DEVICE(&s->u_cpus), "hartid-base", 1);
- qdev_prop_set_string(DEVICE(&s->u_cpus), "cpu-type", SIFIVE_U_CPU);
- qdev_prop_set_uint64(DEVICE(&s->u_cpus), "resetvec", 0x1004);
object_initialize_child(obj, "prci", &s->prci, TYPE_SIFIVE_U_PRCI);
object_initialize_child(obj, "otp", &s->otp, TYPE_SIFIVE_U_OTP);
@@ -661,6 +669,11 @@ static void sifive_u_soc_realize(DeviceState *dev, Error **errp)
int i;
NICInfo *nd = &nd_table[0];
+ qdev_prop_set_uint32(DEVICE(&s->u_cpus), "num-harts", ms->smp.cpus - 1);
+ qdev_prop_set_uint32(DEVICE(&s->u_cpus), "hartid-base", 1);
+ qdev_prop_set_string(DEVICE(&s->u_cpus), "cpu-type", s->cpu_type);
+ qdev_prop_set_uint64(DEVICE(&s->u_cpus), "resetvec", 0x1004);
+
sysbus_realize(SYS_BUS_DEVICE(&s->e_cpus), &error_abort);
sysbus_realize(SYS_BUS_DEVICE(&s->u_cpus), &error_abort);
/*
@@ -792,6 +805,7 @@ static void sifive_u_soc_realize(DeviceState *dev, Error **errp)
static Property sifive_u_soc_props[] = {
DEFINE_PROP_UINT32("serial", SiFiveUSoCState, serial, OTP_SERIAL),
+ DEFINE_PROP_STRING("cpu-type", SiFiveUSoCState, cpu_type),
DEFINE_PROP_END_OF_LIST()
};
diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c
index 3fd152a035..facac6e7d2 100644
--- a/hw/riscv/spike.c
+++ b/hw/riscv/spike.c
@@ -195,6 +195,7 @@ static void spike_board_init(MachineState *machine)
MemoryRegion *system_memory = get_system_memory();
MemoryRegion *main_mem = g_new(MemoryRegion, 1);
MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
+ target_ulong firmware_end_addr, kernel_start_addr;
uint32_t fdt_load_addr;
uint64_t kernel_entry;
char *soc_name;
@@ -261,12 +262,16 @@ static void spike_board_init(MachineState *machine)
memory_region_add_subregion(system_memory, memmap[SPIKE_MROM].base,
mask_rom);
- riscv_find_and_load_firmware(machine, BIOS_FILENAME,
- memmap[SPIKE_DRAM].base,
- htif_symbol_callback);
+ firmware_end_addr = riscv_find_and_load_firmware(machine, BIOS_FILENAME,
+ memmap[SPIKE_DRAM].base,
+ htif_symbol_callback);
if (machine->kernel_filename) {
+ kernel_start_addr = riscv_calc_kernel_start_addr(machine,
+ firmware_end_addr);
+
kernel_entry = riscv_load_kernel(machine->kernel_filename,
+ kernel_start_addr,
htif_symbol_callback);
if (machine->initrd_filename) {
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
index 41bd2f38ba..6bfd10dfc7 100644
--- a/hw/riscv/virt.c
+++ b/hw/riscv/virt.c
@@ -493,6 +493,7 @@ static void virt_machine_init(MachineState *machine)
char *plic_hart_config, *soc_name;
size_t plic_hart_config_len;
target_ulong start_addr = memmap[VIRT_DRAM].base;
+ target_ulong firmware_end_addr, kernel_start_addr;
uint32_t fdt_load_addr;
uint64_t kernel_entry;
DeviceState *mmio_plic, *virtio_plic, *pcie_plic;
@@ -602,11 +603,15 @@ static void virt_machine_init(MachineState *machine)
memory_region_add_subregion(system_memory, memmap[VIRT_MROM].base,
mask_rom);
- riscv_find_and_load_firmware(machine, BIOS_FILENAME,
- memmap[VIRT_DRAM].base, NULL);
+ firmware_end_addr = riscv_find_and_load_firmware(machine, BIOS_FILENAME,
+ start_addr, NULL);
if (machine->kernel_filename) {
- kernel_entry = riscv_load_kernel(machine->kernel_filename, NULL);
+ kernel_start_addr = riscv_calc_kernel_start_addr(machine,
+ firmware_end_addr);
+
+ kernel_entry = riscv_load_kernel(machine->kernel_filename,
+ kernel_start_addr, NULL);
if (machine->initrd_filename) {
hwaddr start;
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 622207bde1..aff6ef7605 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -42,6 +42,21 @@ typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
+#ifdef CONFIG_FUZZ
+void fuzz_dma_read_cb(size_t addr,
+ size_t len,
+ MemoryRegion *mr,
+ bool is_write);
+#else
+static inline void fuzz_dma_read_cb(size_t addr,
+ size_t len,
+ MemoryRegion *mr,
+ bool is_write)
+{
+ /* Do Nothing */
+}
+#endif
+
extern bool global_dirty_log;
typedef struct MemoryRegionOps MemoryRegionOps;
@@ -719,6 +734,11 @@ static inline FlatView *address_space_to_flatview(AddressSpace *as)
return qatomic_rcu_read(&as->current_map);
}
+typedef int (*flatview_cb)(Int128 start,
+ Int128 len,
+ const MemoryRegion*, void*);
+
+void flatview_for_each_range(FlatView *fv, flatview_cb cb , void *opaque);
/**
* struct MemoryRegionSection: describes a fragment of a #MemoryRegion
@@ -2442,6 +2462,7 @@ address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
void *buf, hwaddr len)
{
assert(addr < cache->len && len <= cache->len - addr);
+ fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr, false);
if (likely(cache->ptr)) {
memcpy(buf, cache->ptr + addr, len);
return MEMTX_OK;
diff --git a/include/exec/memory_ldst_cached.h.inc b/include/exec/memory_ldst_cached.h.inc
index fd4bbb40e7..aff574039f 100644
--- a/include/exec/memory_ldst_cached.h.inc
+++ b/include/exec/memory_ldst_cached.h.inc
@@ -28,6 +28,7 @@ static inline uint32_t ADDRESS_SPACE_LD_CACHED(l)(MemoryRegionCache *cache,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
{
assert(addr < cache->len && 4 <= cache->len - addr);
+ fuzz_dma_read_cb(cache->xlat + addr, 4, cache->mrs.mr, false);
if (likely(cache->ptr)) {
return LD_P(l)(cache->ptr + addr);
} else {
@@ -39,6 +40,7 @@ static inline uint64_t ADDRESS_SPACE_LD_CACHED(q)(MemoryRegionCache *cache,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
{
assert(addr < cache->len && 8 <= cache->len - addr);
+ fuzz_dma_read_cb(cache->xlat + addr, 8, cache->mrs.mr, false);
if (likely(cache->ptr)) {
return LD_P(q)(cache->ptr + addr);
} else {
@@ -50,6 +52,7 @@ static inline uint32_t ADDRESS_SPACE_LD_CACHED(uw)(MemoryRegionCache *cache,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
{
assert(addr < cache->len && 2 <= cache->len - addr);
+ fuzz_dma_read_cb(cache->xlat + addr, 2, cache->mrs.mr, false);
if (likely(cache->ptr)) {
return LD_P(uw)(cache->ptr + addr);
} else {
diff --git a/hw/intc/sifive_plic.h b/include/hw/intc/sifive_plic.h
index b75b1f145d..b75b1f145d 100644
--- a/hw/intc/sifive_plic.h
+++ b/include/hw/intc/sifive_plic.h
diff --git a/include/hw/misc/sifive_u_otp.h b/include/hw/misc/sifive_u_otp.h
index 82c9176c8f..5d0d7df455 100644
--- a/include/hw/misc/sifive_u_otp.h
+++ b/include/hw/misc/sifive_u_otp.h
@@ -36,6 +36,8 @@
#define SIFIVE_U_OTP_PTRIM 0x34
#define SIFIVE_U_OTP_PWE 0x38
+#define SIFIVE_U_OTP_PWE_EN (1 << 0)
+
#define SIFIVE_U_OTP_PCE_EN (1 << 0)
#define SIFIVE_U_OTP_PDSTB_EN (1 << 0)
@@ -44,6 +46,7 @@
#define SIFIVE_U_OTP_PA_MASK 0xfff
#define SIFIVE_U_OTP_NUM_FUSES 0x1000
+#define SIFIVE_U_OTP_FUSE_WORD 4
#define SIFIVE_U_OTP_SERIAL_ADDR 0xfc
#define SIFIVE_U_OTP_REG_SIZE 0x1000
@@ -75,8 +78,10 @@ struct SiFiveUOTPState {
uint32_t ptrim;
uint32_t pwe;
uint32_t fuse[SIFIVE_U_OTP_NUM_FUSES];
+ uint32_t fuse_wo[SIFIVE_U_OTP_NUM_FUSES];
/* config */
uint32_t serial;
+ BlockBackend *blk;
};
#endif /* HW_SIFIVE_U_OTP_H */
diff --git a/include/hw/riscv/boot.h b/include/hw/riscv/boot.h
index 451338780a..0b01988727 100644
--- a/include/hw/riscv/boot.h
+++ b/include/hw/riscv/boot.h
@@ -23,15 +23,20 @@
#include "exec/cpu-defs.h"
#include "hw/loader.h"
-void riscv_find_and_load_firmware(MachineState *machine,
- const char *default_machine_firmware,
- hwaddr firmware_load_addr,
- symbol_fn_t sym_cb);
+bool riscv_is_32_bit(MachineState *machine);
+
+target_ulong riscv_calc_kernel_start_addr(MachineState *machine,
+ target_ulong firmware_end_addr);
+target_ulong riscv_find_and_load_firmware(MachineState *machine,
+ const char *default_machine_firmware,
+ hwaddr firmware_load_addr,
+ symbol_fn_t sym_cb);
char *riscv_find_firmware(const char *firmware_filename);
target_ulong riscv_load_firmware(const char *firmware_filename,
hwaddr firmware_load_addr,
symbol_fn_t sym_cb);
target_ulong riscv_load_kernel(const char *kernel_filename,
+ target_ulong firmware_end_addr,
symbol_fn_t sym_cb);
hwaddr riscv_load_initrd(const char *filename, uint64_t mem_size,
uint64_t kernel_entry, hwaddr *start);
diff --git a/include/hw/riscv/sifive_u.h b/include/hw/riscv/sifive_u.h
index 22e7e6efa1..a9f7b4a084 100644
--- a/include/hw/riscv/sifive_u.h
+++ b/include/hw/riscv/sifive_u.h
@@ -48,6 +48,7 @@ typedef struct SiFiveUSoCState {
CadenceGEMState gem;
uint32_t serial;
+ char *cpu_type;
} SiFiveUSoCState;
#define TYPE_RISCV_U_MACHINE MACHINE_TYPE_NAME("sifive_u")
diff --git a/include/qemu/vhost-user-server.h b/include/qemu/vhost-user-server.h
new file mode 100644
index 0000000000..0da4c2cc4c
--- /dev/null
+++ b/include/qemu/vhost-user-server.h
@@ -0,0 +1,65 @@
+/*
+ * Sharing QEMU devices via vhost-user protocol
+ *
+ * Copyright (c) Coiby Xu <coiby.xu@gmail.com>.
+ * Copyright (c) 2020 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#ifndef VHOST_USER_SERVER_H
+#define VHOST_USER_SERVER_H
+
+#include "contrib/libvhost-user/libvhost-user.h"
+#include "io/channel-socket.h"
+#include "io/channel-file.h"
+#include "io/net-listener.h"
+#include "qemu/error-report.h"
+#include "qapi/error.h"
+#include "standard-headers/linux/virtio_blk.h"
+
+/* A kick fd that we monitor on behalf of libvhost-user */
+typedef struct VuFdWatch {
+ VuDev *vu_dev;
+ int fd; /*kick fd*/
+ void *pvt;
+ vu_watch_cb cb;
+ QTAILQ_ENTRY(VuFdWatch) next;
+} VuFdWatch;
+
+/**
+ * VuServer:
+ * A vhost-user server instance with user-defined VuDevIface callbacks.
+ * Vhost-user device backends can be implemented using VuServer. VuDevIface
+ * callbacks and virtqueue kicks run in the given AioContext.
+ */
+typedef struct {
+ QIONetListener *listener;
+ QEMUBH *restart_listener_bh;
+ AioContext *ctx;
+ int max_queues;
+ const VuDevIface *vu_iface;
+
+ /* Protected by ctx lock */
+ VuDev vu_dev;
+ QIOChannel *ioc; /* The I/O channel with the client */
+ QIOChannelSocket *sioc; /* The underlying data channel with the client */
+ QTAILQ_HEAD(, VuFdWatch) vu_fd_watches;
+
+ Coroutine *co_trip; /* coroutine for processing VhostUserMsg */
+} VuServer;
+
+bool vhost_user_server_start(VuServer *server,
+ SocketAddress *unix_socket,
+ AioContext *ctx,
+ uint16_t max_queues,
+ const VuDevIface *vu_iface,
+ Error **errp);
+
+void vhost_user_server_stop(VuServer *server);
+
+void vhost_user_server_attach_aio_context(VuServer *server, AioContext *ctx);
+void vhost_user_server_detach_aio_context(VuServer *server);
+
+#endif /* VHOST_USER_SERVER_H */
diff --git a/include/sysemu/cpus.h b/include/sysemu/cpus.h
index 231685955d..e8156728c6 100644
--- a/include/sysemu/cpus.h
+++ b/include/sysemu/cpus.h
@@ -25,6 +25,9 @@ typedef struct CpusAccel {
/* register accel-specific cpus interface implementation */
void cpus_register_accel(const CpusAccel *i);
+/* Create a dummy vcpu for CpusAccel->create_vcpu_thread */
+void dummy_start_vcpu_thread(CPUState *);
+
/* interface available for cpus accelerator threads */
/* For temporary buffers for forming a name */
diff --git a/memory_ldst.c.inc b/memory_ldst.c.inc
index c54aee4a95..8d45d2eeff 100644
--- a/memory_ldst.c.inc
+++ b/memory_ldst.c.inc
@@ -42,6 +42,7 @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
MO_32 | devend_memop(endian), attrs);
} else {
/* RAM case */
+ fuzz_dma_read_cb(addr, 4, mr, false);
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
@@ -110,6 +111,7 @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
MO_64 | devend_memop(endian), attrs);
} else {
/* RAM case */
+ fuzz_dma_read_cb(addr, 8, mr, false);
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
@@ -175,6 +177,7 @@ uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
r = memory_region_dispatch_read(mr, addr1, &val, MO_8, attrs);
} else {
/* RAM case */
+ fuzz_dma_read_cb(addr, 1, mr, false);
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
val = ldub_p(ptr);
r = MEMTX_OK;
@@ -212,6 +215,7 @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
MO_16 | devend_memop(endian), attrs);
} else {
/* RAM case */
+ fuzz_dma_read_cb(addr, 2, mr, false);
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
diff --git a/meson.build b/meson.build
index 7627a0ae46..b349c9bda8 100644
--- a/meson.build
+++ b/meson.build
@@ -1398,6 +1398,11 @@ trace_events_subdirs += [
'util',
]
+vhost_user = not_found
+if 'CONFIG_VHOST_USER' in config_host
+ subdir('contrib/libvhost-user')
+endif
+
subdir('qapi')
subdir('qobject')
subdir('stubs')
@@ -1438,7 +1443,6 @@ subdir('dump')
block_ss.add(files(
'block.c',
- 'blockdev-nbd.c',
'blockjob.c',
'job.c',
'qemu-io-cmds.c',
@@ -1451,6 +1455,7 @@ subdir('block')
blockdev_ss.add(files(
'blockdev.c',
+ 'blockdev-nbd.c',
'iothread.c',
'job-qmp.c',
))
@@ -1459,7 +1464,6 @@ blockdev_ss.add(files(
# os-win32.c does not
blockdev_ss.add(when: 'CONFIG_POSIX', if_true: files('os-posix.c'))
softmmu_ss.add(when: 'CONFIG_WIN32', if_true: [files('os-win32.c')])
-softmmu_ss.add_all(blockdev_ss)
common_ss.add(files('cpus-common.c'))
@@ -1591,6 +1595,15 @@ block = declare_dependency(link_whole: [libblock],
link_args: '@block.syms',
dependencies: [crypto, io])
+blockdev_ss = blockdev_ss.apply(config_host, strict: false)
+libblockdev = static_library('blockdev', blockdev_ss.sources() + genh,
+ dependencies: blockdev_ss.dependencies(),
+ name_suffix: 'fa',
+ build_by_default: false)
+
+blockdev = declare_dependency(link_whole: [libblockdev],
+ dependencies: [block])
+
qmp_ss = qmp_ss.apply(config_host, strict: false)
libqmp = static_library('qmp', qmp_ss.sources() + genh,
dependencies: qmp_ss.dependencies(),
@@ -1623,7 +1636,7 @@ foreach m : block_mods + softmmu_mods
install_dir: config_host['qemu_moddir'])
endforeach
-softmmu_ss.add(authz, block, chardev, crypto, io, qmp)
+softmmu_ss.add(authz, blockdev, chardev, crypto, io, qmp)
common_ss.add(qom, qemuutil)
common_ss.add_all(when: 'CONFIG_SOFTMMU', if_true: [softmmu_ss])
@@ -1819,7 +1832,7 @@ if have_tools
qemu_io = executable('qemu-io', files('qemu-io.c'),
dependencies: [block, qemuutil], install: true)
qemu_nbd = executable('qemu-nbd', files('qemu-nbd.c'),
- dependencies: [block, qemuutil], install: true)
+ dependencies: [blockdev, qemuutil], install: true)
subdir('storage-daemon')
subdir('contrib/rdmacm-mux')
@@ -1830,7 +1843,6 @@ if have_tools
install: true)
if 'CONFIG_VHOST_USER' in config_host
- subdir('contrib/libvhost-user')
subdir('contrib/vhost-user-blk')
subdir('contrib/vhost-user-gpu')
subdir('contrib/vhost-user-input')
diff --git a/nbd/meson.build b/nbd/meson.build
index 0c00a776d3..2baaa36948 100644
--- a/nbd/meson.build
+++ b/nbd/meson.build
@@ -1,5 +1,7 @@
block_ss.add(files(
'client.c',
'common.c',
+))
+blockdev_ss.add(files(
'server.c',
))
diff --git a/nbd/server.c b/nbd/server.c
index e75c825879..08b621f70a 100644
--- a/nbd/server.c
+++ b/nbd/server.c
@@ -1517,8 +1517,6 @@ static int nbd_export_create(BlockExport *blk_exp, BlockExportOptions *exp_args,
return ret;
}
- blk_set_allow_aio_context_change(blk, true);
-
QTAILQ_INIT(&exp->clients);
exp->name = g_strdup(arg->name);
exp->description = g_strdup(arg->description);
diff --git a/qapi/block-core.json b/qapi/block-core.json
index ee5ebef7f2..e00fc27b5e 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -948,6 +948,27 @@
'discard-bytes-ok': 'uint64' } }
##
+# @BlockStatsSpecificNvme:
+#
+# NVMe driver statistics
+#
+# @completion-errors: The number of completion errors.
+#
+# @aligned-accesses: The number of aligned accesses performed by
+# the driver.
+#
+# @unaligned-accesses: The number of unaligned accesses performed by
+# the driver.
+#
+# Since: 5.2
+##
+{ 'struct': 'BlockStatsSpecificNvme',
+ 'data': {
+ 'completion-errors': 'uint64',
+ 'aligned-accesses': 'uint64',
+ 'unaligned-accesses': 'uint64' } }
+
+##
# @BlockStatsSpecific:
#
# Block driver specific statistics
@@ -959,7 +980,8 @@
'discriminator': 'driver',
'data': {
'file': 'BlockStatsSpecificFile',
- 'host_device': 'BlockStatsSpecificFile' } }
+ 'host_device': 'BlockStatsSpecificFile',
+ 'nvme': 'BlockStatsSpecificNvme' } }
##
# @BlockStats:
diff --git a/qapi/block-export.json b/qapi/block-export.json
index 65804834d9..480c497690 100644
--- a/qapi/block-export.json
+++ b/qapi/block-export.json
@@ -85,6 +85,25 @@
'*bitmap': 'str' } }
##
+# @BlockExportOptionsVhostUserBlk:
+#
+# A vhost-user-blk block export.
+#
+# @addr: The vhost-user socket on which to listen. Both 'unix' and 'fd'
+# SocketAddress types are supported. Passed fds must be UNIX domain
+# sockets.
+# @logical-block-size: Logical block size in bytes. Defaults to 512 bytes.
+# @num-queues: Number of request virtqueues. Must be greater than 0. Defaults
+# to 1.
+#
+# Since: 5.2
+##
+{ 'struct': 'BlockExportOptionsVhostUserBlk',
+ 'data': { 'addr': 'SocketAddress',
+ '*logical-block-size': 'size',
+ '*num-queues': 'uint16'} }
+
+##
# @NbdServerAddOptions:
#
# An NBD block export.
@@ -180,11 +199,12 @@
# An enumeration of block export types
#
# @nbd: NBD export
+# @vhost-user-blk: vhost-user-blk export (since 5.2)
#
# Since: 4.2
##
{ 'enum': 'BlockExportType',
- 'data': [ 'nbd' ] }
+ 'data': [ 'nbd', 'vhost-user-blk' ] }
##
# @BlockExportOptions:
@@ -203,17 +223,29 @@
# export before completion is signalled. (since: 5.2;
# default: false)
#
+# @iothread: The name of the iothread object where the export will run. The
+# default is to use the thread currently associated with the
+# block node. (since: 5.2)
+#
+# @fixed-iothread: True prevents the block node from being moved to another
+# thread while the export is active. If true and @iothread is
+# given, export creation fails if the block node cannot be
+# moved to the iothread. The default is false. (since: 5.2)
+#
# Since: 4.2
##
{ 'union': 'BlockExportOptions',
'base': { 'type': 'BlockExportType',
'id': 'str',
+ '*fixed-iothread': 'bool',
+ '*iothread': 'str',
'node-name': 'str',
'*writable': 'bool',
'*writethrough': 'bool' },
'discriminator': 'type',
'data': {
- 'nbd': 'BlockExportOptionsNbd'
+ 'nbd': 'BlockExportOptionsNbd',
+ 'vhost-user-blk': 'BlockExportOptionsVhostUserBlk'
} }
##
diff --git a/qemu-nbd.c b/qemu-nbd.c
index bc644a0670..a0701cdf36 100644
--- a/qemu-nbd.c
+++ b/qemu-nbd.c
@@ -25,6 +25,7 @@
#include "qapi/error.h"
#include "qemu/cutils.h"
#include "sysemu/block-backend.h"
+#include "sysemu/runstate.h" /* for qemu_system_killed() prototype */
#include "block/block_int.h"
#include "block/nbd.h"
#include "qemu/main-loop.h"
@@ -155,7 +156,11 @@ QEMU_COPYRIGHT "\n"
}
#ifdef CONFIG_POSIX
-static void termsig_handler(int signum)
+/*
+ * The client thread uses SIGTERM to interrupt the server. A signal
+ * handler ensures that "qemu-nbd -v -c" exits with a nice status code.
+ */
+void qemu_system_killed(int signum, pid_t pid)
{
qatomic_cmpxchg(&state, RUNNING, TERMINATE);
qemu_notify_event();
@@ -582,18 +587,8 @@ int main(int argc, char **argv)
BlockExportOptions *export_opts;
#ifdef CONFIG_POSIX
- /*
- * Exit gracefully on various signals, which includes SIGTERM used
- * by 'qemu-nbd -v -c'.
- */
- struct sigaction sa_sigterm;
- memset(&sa_sigterm, 0, sizeof(sa_sigterm));
- sa_sigterm.sa_handler = termsig_handler;
- sigaction(SIGTERM, &sa_sigterm, NULL);
- sigaction(SIGINT, &sa_sigterm, NULL);
- sigaction(SIGHUP, &sa_sigterm, NULL);
-
- signal(SIGPIPE, SIG_IGN);
+ os_setup_early_signal_handling();
+ os_setup_signal_handling();
#endif
socket_init();
diff --git a/scripts/oss-fuzz/build.sh b/scripts/oss-fuzz/build.sh
index 0c3ca9e06f..fcae4a0c26 100755
--- a/scripts/oss-fuzz/build.sh
+++ b/scripts/oss-fuzz/build.sh
@@ -62,6 +62,9 @@ fi
mkdir -p "$DEST_DIR/lib/" # Copy the shared libraries here
+mkdir -p "$DEST_DIR/bin/" # Copy executables that shouldn't
+ # be treated as fuzzers by oss-fuzz here
+
# Build once to get the list of dynamic lib paths, and copy them over
../configure --disable-werror --cc="$CC" --cxx="$CXX" --enable-fuzzing \
--prefix="$DEST_DIR" --bindir="$DEST_DIR" --datadir="$DEST_DIR/data/" \
@@ -88,13 +91,22 @@ make "-j$(nproc)" qemu-fuzz-i386 V=1
# Copy over the datadir
cp -r ../pc-bios/ "$DEST_DIR/pc-bios"
+cp "./qemu-fuzz-i386" "$DEST_DIR/bin/"
+
# Run the fuzzer with no arguments, to print the help-string and get the list
# of available fuzz-targets. Copy over the qemu-fuzz-i386, naming it according
# to each available fuzz target (See 05509c8e6d fuzz: select fuzz target using
# executable name)
for target in $(./qemu-fuzz-i386 | awk '$1 ~ /\*/ {print $2}');
do
- cp qemu-fuzz-i386 "$DEST_DIR/qemu-fuzz-i386-target-$target"
+ # Ignore the generic-fuzz target, as it requires some environment variables
+ # to be configured. We have some generic-fuzz-{pc-q35, floppy, ...} targets
+ # that are thin wrappers around this target that set the required
+ # environment variables according to predefined configs.
+ if [ "$target" != "generic-fuzz" ]; then
+ ln "$DEST_DIR/bin/qemu-fuzz-i386" \
+ "$DEST_DIR/qemu-fuzz-i386-target-$target"
+ fi
done
echo "Done. The fuzzers are located in $DEST_DIR"
diff --git a/scripts/oss-fuzz/minimize_qtest_trace.py b/scripts/oss-fuzz/minimize_qtest_trace.py
new file mode 100755
index 0000000000..5e405a0d5f
--- /dev/null
+++ b/scripts/oss-fuzz/minimize_qtest_trace.py
@@ -0,0 +1,157 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+"""
+This takes a crashing qtest trace and tries to remove superflous operations
+"""
+
+import sys
+import os
+import subprocess
+import time
+import struct
+
+QEMU_ARGS = None
+QEMU_PATH = None
+TIMEOUT = 5
+CRASH_TOKEN = None
+
+write_suffix_lookup = {"b": (1, "B"),
+ "w": (2, "H"),
+ "l": (4, "L"),
+ "q": (8, "Q")}
+
+def usage():
+ sys.exit("""\
+Usage: QEMU_PATH="/path/to/qemu" QEMU_ARGS="args" {} input_trace output_trace
+By default, will try to use the second-to-last line in the output to identify
+whether the crash occred. Optionally, manually set a string that idenitifes the
+crash by setting CRASH_TOKEN=
+""".format((sys.argv[0])))
+
+def check_if_trace_crashes(trace, path):
+ global CRASH_TOKEN
+ with open(path, "w") as tracefile:
+ tracefile.write("".join(trace))
+
+ rc = subprocess.Popen("timeout -s 9 {timeout}s {qemu_path} {qemu_args} 2>&1\
+ < {trace_path}".format(timeout=TIMEOUT,
+ qemu_path=QEMU_PATH,
+ qemu_args=QEMU_ARGS,
+ trace_path=path),
+ shell=True,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ stdo = rc.communicate()[0]
+ output = stdo.decode('unicode_escape')
+ if rc.returncode == 137: # Timed Out
+ return False
+ if len(output.splitlines()) < 2:
+ return False
+
+ if CRASH_TOKEN is None:
+ CRASH_TOKEN = output.splitlines()[-2]
+
+ return CRASH_TOKEN in output
+
+
+def minimize_trace(inpath, outpath):
+ global TIMEOUT
+ with open(inpath) as f:
+ trace = f.readlines()
+ start = time.time()
+ if not check_if_trace_crashes(trace, outpath):
+ sys.exit("The input qtest trace didn't cause a crash...")
+ end = time.time()
+ print("Crashed in {} seconds".format(end-start))
+ TIMEOUT = (end-start)*5
+ print("Setting the timeout for {} seconds".format(TIMEOUT))
+ print("Identifying Crashes by this string: {}".format(CRASH_TOKEN))
+
+ i = 0
+ newtrace = trace[:]
+ # For each line
+ while i < len(newtrace):
+ # 1.) Try to remove it completely and reproduce the crash. If it works,
+ # we're done.
+ prior = newtrace[i]
+ print("Trying to remove {}".format(newtrace[i]))
+ # Try to remove the line completely
+ newtrace[i] = ""
+ if check_if_trace_crashes(newtrace, outpath):
+ i += 1
+ continue
+ newtrace[i] = prior
+
+ # 2.) Try to replace write{bwlq} commands with a write addr, len
+ # command. Since this can require swapping endianness, try both LE and
+ # BE options. We do this, so we can "trim" the writes in (3)
+ if (newtrace[i].startswith("write") and not
+ newtrace[i].startswith("write ")):
+ suffix = newtrace[i].split()[0][-1]
+ assert(suffix in write_suffix_lookup)
+ addr = int(newtrace[i].split()[1], 16)
+ value = int(newtrace[i].split()[2], 16)
+ for endianness in ['<', '>']:
+ data = struct.pack("{end}{size}".format(end=endianness,
+ size=write_suffix_lookup[suffix][1]),
+ value)
+ newtrace[i] = "write {addr} {size} 0x{data}\n".format(
+ addr=hex(addr),
+ size=hex(write_suffix_lookup[suffix][0]),
+ data=data.hex())
+ if(check_if_trace_crashes(newtrace, outpath)):
+ break
+ else:
+ newtrace[i] = prior
+
+ # 3.) If it is a qtest write command: write addr len data, try to split
+ # it into two separate write commands. If splitting the write down the
+ # middle does not work, try to move the pivot "left" and retry, until
+ # there is no space left. The idea is to prune unneccessary bytes from
+ # long writes, while accommodating arbitrary MemoryRegion access sizes
+ # and alignments.
+ if newtrace[i].startswith("write "):
+ addr = int(newtrace[i].split()[1], 16)
+ length = int(newtrace[i].split()[2], 16)
+ data = newtrace[i].split()[3][2:]
+ if length > 1:
+ leftlength = int(length/2)
+ rightlength = length - leftlength
+ newtrace.insert(i+1, "")
+ while leftlength > 0:
+ newtrace[i] = "write {addr} {size} 0x{data}\n".format(
+ addr=hex(addr),
+ size=hex(leftlength),
+ data=data[:leftlength*2])
+ newtrace[i+1] = "write {addr} {size} 0x{data}\n".format(
+ addr=hex(addr+leftlength),
+ size=hex(rightlength),
+ data=data[leftlength*2:])
+ if check_if_trace_crashes(newtrace, outpath):
+ break
+ else:
+ leftlength -= 1
+ rightlength += 1
+ if check_if_trace_crashes(newtrace, outpath):
+ i -= 1
+ else:
+ newtrace[i] = prior
+ del newtrace[i+1]
+ i += 1
+ check_if_trace_crashes(newtrace, outpath)
+
+
+if __name__ == '__main__':
+ if len(sys.argv) < 3:
+ usage()
+
+ QEMU_PATH = os.getenv("QEMU_PATH")
+ QEMU_ARGS = os.getenv("QEMU_ARGS")
+ if QEMU_PATH is None or QEMU_ARGS is None:
+ usage()
+ # if "accel" not in QEMU_ARGS:
+ # QEMU_ARGS += " -accel qtest"
+ CRASH_TOKEN = os.getenv("CRASH_TOKEN")
+ QEMU_ARGS += " -qtest stdio -monitor none -serial none "
+ minimize_trace(sys.argv[1], sys.argv[2])
diff --git a/scripts/oss-fuzz/reorder_fuzzer_qtest_trace.py b/scripts/oss-fuzz/reorder_fuzzer_qtest_trace.py
new file mode 100755
index 0000000000..890e1def85
--- /dev/null
+++ b/scripts/oss-fuzz/reorder_fuzzer_qtest_trace.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+"""
+Use this to convert qtest log info from a generic fuzzer input into a qtest
+trace that you can feed into a standard qemu-system process. Example usage:
+
+QEMU_FUZZ_ARGS="-machine q35,accel=qtest" QEMU_FUZZ_OBJECTS="*" \
+ ./i386-softmmu/qemu-fuzz-i386 --fuzz-target=generic-pci-fuzz
+# .. Finds some crash
+QTEST_LOG=1 FUZZ_SERIALIZE_QTEST=1 \
+QEMU_FUZZ_ARGS="-machine q35,accel=qtest" QEMU_FUZZ_OBJECTS="*" \
+ ./i386-softmmu/qemu-fuzz-i386 --fuzz-target=generic-pci-fuzz
+ /path/to/crash 2> qtest_log_output
+scripts/oss-fuzz/reorder_fuzzer_qtest_trace.py qtest_log_output > qtest_trace
+./i386-softmmu/qemu-fuzz-i386 -machine q35,accel=qtest \
+ -qtest stdin < qtest_trace
+
+### Details ###
+
+Some fuzzer make use of hooks that allow us to populate some memory range, just
+before a DMA read from that range. This means that the fuzzer can produce
+activity that looks like:
+ [start] read from mmio addr
+ [end] read from mmio addr
+ [start] write to pio addr
+ [start] fill a DMA buffer just in time
+ [end] fill a DMA buffer just in time
+ [start] fill a DMA buffer just in time
+ [end] fill a DMA buffer just in time
+ [end] write to pio addr
+ [start] read from mmio addr
+ [end] read from mmio addr
+
+We annotate these "nested" DMA writes, so with QTEST_LOG=1 the QTest trace
+might look something like:
+[R +0.028431] readw 0x10000
+[R +0.028434] outl 0xc000 0xbeef # Triggers a DMA read from 0xbeef and 0xbf00
+[DMA][R +0.034639] write 0xbeef 0x2 0xAAAA
+[DMA][R +0.034639] write 0xbf00 0x2 0xBBBB
+[R +0.028431] readw 0xfc000
+
+This script would reorder the above trace so it becomes:
+readw 0x10000
+write 0xbeef 0x2 0xAAAA
+write 0xbf00 0x2 0xBBBB
+outl 0xc000 0xbeef
+readw 0xfc000
+
+I.e. by the time, 0xc000 tries to read from DMA, those DMA buffers have already
+been set up, removing the need for the DMA hooks. We can simply provide this
+reordered trace via -qtest stdio to reproduce the input
+
+Note: this won't work for traces where the device tries to read from the same
+DMA region twice in between MMIO/PIO commands. E.g:
+ [R +0.028434] outl 0xc000 0xbeef
+ [DMA][R +0.034639] write 0xbeef 0x2 0xAAAA
+ [DMA][R +0.034639] write 0xbeef 0x2 0xBBBB
+
+The fuzzer will annotate suspected double-fetches with [DOUBLE-FETCH]. This
+script looks for these tags and warns the users that the resulting trace might
+not reproduce the bug.
+"""
+
+import sys
+
+__author__ = "Alexander Bulekov <alxndr@bu.edu>"
+__copyright__ = "Copyright (C) 2020, Red Hat, Inc."
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Alexander Bulekov"
+__email__ = "alxndr@bu.edu"
+
+
+def usage():
+ sys.exit("Usage: {} /path/to/qtest_log_output".format((sys.argv[0])))
+
+
+def main(filename):
+ with open(filename, "r") as f:
+ trace = f.readlines()
+
+ # Leave only lines that look like logged qtest commands
+ trace[:] = [x.strip() for x in trace if "[R +" in x
+ or "[S +" in x and "CLOSED" not in x]
+
+ for i in range(len(trace)):
+ if i+1 < len(trace):
+ if "[DMA]" in trace[i+1]:
+ if "[DOUBLE-FETCH]" in trace[i+1]:
+ sys.stderr.write("Warning: Likely double fetch on line"
+ "{}.\n There will likely be problems "
+ "reproducing behavior with the "
+ "resulting qtest trace\n\n".format(i+1))
+ trace[i], trace[i+1] = trace[i+1], trace[i]
+ for line in trace:
+ print(line.split("]")[-1].strip())
+
+
+if __name__ == '__main__':
+ if len(sys.argv) == 1:
+ usage()
+ main(sys.argv[1])
diff --git a/softmmu/memory.c b/softmmu/memory.c
index 403ff3abc9..ee4a6bc168 100644
--- a/softmmu/memory.c
+++ b/softmmu/memory.c
@@ -656,6 +656,19 @@ static void render_memory_region(FlatView *view,
}
}
+void flatview_for_each_range(FlatView *fv, flatview_cb cb , void *opaque)
+{
+ FlatRange *fr;
+
+ assert(fv);
+ assert(cb);
+
+ FOR_EACH_FLAT_RANGE(fr, fv) {
+ if (cb(fr->addr.start, fr->addr.size, fr->mr, opaque))
+ break;
+ }
+}
+
static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
{
while (mr->enabled) {
@@ -1420,6 +1433,7 @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
unsigned size = memop_size(op);
MemTxResult r;
+ fuzz_dma_read_cb(addr, size, mr, false);
if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
*pval = unassigned_mem_read(mr, addr, size);
return MEMTX_DECODE_ERROR;
@@ -3233,6 +3247,19 @@ void memory_region_init_rom_device(MemoryRegion *mr,
vmstate_register_ram(mr, owner_dev);
}
+/*
+ * Support softmmu builds with CONFIG_FUZZ using a weak symbol and a stub for
+ * the fuzz_dma_read_cb callback
+ */
+#ifdef CONFIG_FUZZ
+void __attribute__((weak)) fuzz_dma_read_cb(size_t addr,
+ size_t len,
+ MemoryRegion *mr,
+ bool is_write)
+{
+}
+#endif
+
static const TypeInfo memory_region_info = {
.parent = TYPE_OBJECT,
.name = TYPE_MEMORY_REGION,
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
index e319fb2a1e..a9adedb9f8 100644
--- a/softmmu/physmem.c
+++ b/softmmu/physmem.c
@@ -2832,6 +2832,7 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
stn_he_p(buf, l, val);
} else {
/* RAM case */
+ fuzz_dma_read_cb(addr, len, mr, false);
ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
memcpy(buf, ram_ptr, l);
}
@@ -3192,6 +3193,7 @@ void *address_space_map(AddressSpace *as,
memory_region_ref(mr);
*plen = flatview_extend_translation(fv, addr, len, mr, xlat,
l, is_write, attrs);
+ fuzz_dma_read_cb(addr, *plen, mr, is_write);
ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true);
return ptr;
diff --git a/softmmu/vl.c b/softmmu/vl.c
index 14fc527fc6..e86d20334b 100644
--- a/softmmu/vl.c
+++ b/softmmu/vl.c
@@ -2540,6 +2540,10 @@ static bool object_create_initial(const char *type, QemuOpts *opts)
}
#endif
+ /* Reason: vhost-user-blk-server property "node-name" */
+ if (g_str_equal(type, "vhost-user-blk-server")) {
+ return false;
+ }
/*
* Reason: filter-* property "netdev" etc.
*/
diff --git a/storage-daemon/meson.build b/storage-daemon/meson.build
index 0409acc3f5..c5adce81c3 100644
--- a/storage-daemon/meson.build
+++ b/storage-daemon/meson.build
@@ -1,7 +1,6 @@
qsd_ss = ss.source_set()
qsd_ss.add(files('qemu-storage-daemon.c'))
-qsd_ss.add(block, chardev, qmp, qom, qemuutil)
-qsd_ss.add_all(blockdev_ss)
+qsd_ss.add(blockdev, chardev, qmp, qom, qemuutil)
subdir('qapi')
diff --git a/stubs/blk-exp-close-all.c b/stubs/blk-exp-close-all.c
new file mode 100644
index 0000000000..1c71316763
--- /dev/null
+++ b/stubs/blk-exp-close-all.c
@@ -0,0 +1,7 @@
+#include "qemu/osdep.h"
+#include "block/export.h"
+
+/* Only used in programs that support block exports (libblockdev.fa) */
+void blk_exp_close_all(void)
+{
+}
diff --git a/stubs/meson.build b/stubs/meson.build
index 67f2a8c069..7b733fadb7 100644
--- a/stubs/meson.build
+++ b/stubs/meson.build
@@ -1,6 +1,7 @@
stub_ss.add(files('arch_type.c'))
stub_ss.add(files('bdrv-next-monitor-owned.c'))
stub_ss.add(files('blk-commit-all.c'))
+stub_ss.add(files('blk-exp-close-all.c'))
stub_ss.add(files('blockdev-close-all-bdrv-states.c'))
stub_ss.add(files('change-state-handler.c'))
stub_ss.add(files('cmos.c'))
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index de275782e6..de4705bb57 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -82,9 +82,13 @@ enum {
#define VEXT_VERSION_0_07_1 0x00000701
-#define TRANSLATE_PMP_FAIL 2
-#define TRANSLATE_FAIL 1
-#define TRANSLATE_SUCCESS 0
+enum {
+ TRANSLATE_SUCCESS,
+ TRANSLATE_FAIL,
+ TRANSLATE_PMP_FAIL,
+ TRANSLATE_G_STAGE_FAIL
+};
+
#define MMU_USER_IDX 3
#define MAX_RISCV_PMPS (16)
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 904899054d..4652082df1 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -316,6 +316,9 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
* @physical: This will be set to the calculated physical address
* @prot: The returned protection attributes
* @addr: The virtual address to be translated
+ * @fault_pte_addr: If not NULL, this will be set to fault pte address
+ * when a error occurs on pte address translation.
+ * This will already be shifted to match htval.
* @access_type: The type of MMU access
* @mmu_idx: Indicates current privilege level
* @first_stage: Are we in first stage translation?
@@ -324,6 +327,7 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
*/
static int get_physical_address(CPURISCVState *env, hwaddr *physical,
int *prot, target_ulong addr,
+ target_ulong *fault_pte_addr,
int access_type, int mmu_idx,
bool first_stage, bool two_stage)
{
@@ -447,11 +451,14 @@ restart:
/* Do the second stage translation on the base PTE address. */
int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
- base, MMU_DATA_LOAD,
+ base, NULL, MMU_DATA_LOAD,
mmu_idx, false, true);
if (vbase_ret != TRANSLATE_SUCCESS) {
- return vbase_ret;
+ if (fault_pte_addr) {
+ *fault_pte_addr = (base + idx * ptesize) >> 2;
+ }
+ return TRANSLATE_G_STAGE_FAIL;
}
pte_addr = vbase + idx * ptesize;
@@ -632,13 +639,13 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
int prot;
int mmu_idx = cpu_mmu_index(&cpu->env, false);
- if (get_physical_address(env, &phys_addr, &prot, addr, 0, mmu_idx,
+ if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
true, riscv_cpu_virt_enabled(env))) {
return -1;
}
if (riscv_cpu_virt_enabled(env)) {
- if (get_physical_address(env, &phys_addr, &prot, phys_addr,
+ if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
0, mmu_idx, false, true)) {
return -1;
}
@@ -727,19 +734,30 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
if (riscv_cpu_virt_enabled(env) ||
(riscv_cpu_two_stage_lookup(env) && access_type != MMU_INST_FETCH)) {
/* Two stage lookup */
- ret = get_physical_address(env, &pa, &prot, address, access_type,
+ ret = get_physical_address(env, &pa, &prot, address,
+ &env->guest_phys_fault_addr, access_type,
mmu_idx, true, true);
+ /*
+ * A G-stage exception may be triggered during two state lookup.
+ * And the env->guest_phys_fault_addr has already been set in
+ * get_physical_address().
+ */
+ if (ret == TRANSLATE_G_STAGE_FAIL) {
+ first_stage_error = false;
+ access_type = MMU_DATA_LOAD;
+ }
+
qemu_log_mask(CPU_LOG_MMU,
"%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
TARGET_FMT_plx " prot %d\n",
__func__, address, ret, pa, prot);
- if (ret != TRANSLATE_FAIL) {
+ if (ret == TRANSLATE_SUCCESS) {
/* Second stage lookup */
im_address = pa;
- ret = get_physical_address(env, &pa, &prot2, im_address,
+ ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
access_type, mmu_idx, false, true);
qemu_log_mask(CPU_LOG_MMU,
@@ -768,8 +786,8 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
}
} else {
/* Single stage lookup */
- ret = get_physical_address(env, &pa, &prot, address, access_type,
- mmu_idx, true, false);
+ ret = get_physical_address(env, &pa, &prot, address, NULL,
+ access_type, mmu_idx, true, false);
qemu_log_mask(CPU_LOG_MMU,
"%s address=%" VADDR_PRIx " ret %d physical "
@@ -852,6 +870,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
target_ulong deleg = async ? env->mideleg : env->medeleg;
+ bool write_tval = false;
target_ulong tval = 0;
target_ulong htval = 0;
target_ulong mtval2 = 0;
@@ -873,6 +892,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
case RISCV_EXCP_INST_PAGE_FAULT:
case RISCV_EXCP_LOAD_PAGE_FAULT:
case RISCV_EXCP_STORE_PAGE_FAULT:
+ write_tval = true;
tval = env->badaddr;
break;
default:
@@ -895,7 +915,13 @@ void riscv_cpu_do_interrupt(CPUState *cs)
}
trace_riscv_trap(env->mhartid, async, cause, env->pc, tval,
- riscv_cpu_get_trap_name(cause, async));
+ riscv_cpu_get_trap_name(cause, async));
+
+ qemu_log_mask(CPU_LOG_INT,
+ "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", "
+ "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n",
+ __func__, env->mhartid, async, cause, env->pc, tval,
+ riscv_cpu_get_trap_name(cause, async));
if (env->priv <= PRV_S &&
cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) {
@@ -904,7 +930,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
target_ulong hdeleg = async ? env->hideleg : env->hedeleg;
if ((riscv_cpu_virt_enabled(env) ||
- riscv_cpu_two_stage_lookup(env)) && tval) {
+ riscv_cpu_two_stage_lookup(env)) && write_tval) {
/*
* If we are writing a guest virtual address to stval, set
* this to 1. If we are trapping to VS we will set this to 0
@@ -932,7 +958,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
/* Trap into HS mode, from virt */
riscv_cpu_swap_hypervisor_regs(env);
env->hstatus = set_field(env->hstatus, HSTATUS_SPVP,
- get_field(env->mstatus, SSTATUS_SPP));
+ env->priv);
env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
riscv_cpu_virt_enabled(env));
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
index 9b9ada45a9..4ce73575a7 100644
--- a/target/riscv/op_helper.c
+++ b/target/riscv/op_helper.c
@@ -29,7 +29,6 @@ void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env,
uint32_t exception, uintptr_t pc)
{
CPUState *cs = env_cpu(env);
- qemu_log_mask(CPU_LOG_INT, "%s: %d\n", __func__, exception);
cs->exception_index = exception;
cpu_loop_exit_restore(cs, pc);
}
@@ -334,12 +333,12 @@ target_ulong helper_hyp_x_load(CPURISCVState *env, target_ulong address,
riscv_cpu_set_two_stage_lookup(env, true);
switch (memop) {
- case MO_TEUL:
- pte = cpu_ldub_data_ra(env, address, GETPC());
- break;
case MO_TEUW:
pte = cpu_lduw_data_ra(env, address, GETPC());
break;
+ case MO_TEUL:
+ pte = cpu_ldl_data_ra(env, address, GETPC());
+ break;
default:
g_assert_not_reached();
}
diff --git a/tests/acceptance/machine_m68k_nextcube.py b/tests/acceptance/machine_m68k_nextcube.py
index 32cf571f94..2baba5fdc2 100644
--- a/tests/acceptance/machine_m68k_nextcube.py
+++ b/tests/acceptance/machine_m68k_nextcube.py
@@ -9,7 +9,6 @@ import os
import re
import time
import logging
-import distutils.spawn
from avocado_qemu import Test
from avocado import skipUnless
@@ -70,7 +69,7 @@ class NextCubeMachine(Test):
@skipUnless(PIL_AVAILABLE, 'Python PIL not installed')
def test_bootrom_framebuffer_size(self):
- screenshot_path = os.path.join(self.workdir, "dump.png")
+ screenshot_path = os.path.join(self.workdir, "dump.ppm")
self.check_bootrom_framebuffer(screenshot_path)
width, height = Image.open(screenshot_path).size
@@ -79,7 +78,7 @@ class NextCubeMachine(Test):
@skipUnless(tesseract_available(3), 'tesseract v3 OCR tool not available')
def test_bootrom_framebuffer_ocr_with_tesseract_v3(self):
- screenshot_path = os.path.join(self.workdir, "dump.png")
+ screenshot_path = os.path.join(self.workdir, "dump.ppm")
self.check_bootrom_framebuffer(screenshot_path)
console_logger = logging.getLogger('console')
@@ -95,7 +94,7 @@ class NextCubeMachine(Test):
# that it is still alpha-level software.
@skipUnless(tesseract_available(4), 'tesseract v4 OCR tool not available')
def test_bootrom_framebuffer_ocr_with_tesseract_v4(self):
- screenshot_path = os.path.join(self.workdir, "dump.png")
+ screenshot_path = os.path.join(self.workdir, "dump.ppm")
self.check_bootrom_framebuffer(screenshot_path)
console_logger = logging.getLogger('console')
diff --git a/tests/acceptance/ppc_prep_40p.py b/tests/acceptance/ppc_prep_40p.py
index 1515561249..96ba13b894 100644
--- a/tests/acceptance/ppc_prep_40p.py
+++ b/tests/acceptance/ppc_prep_40p.py
@@ -22,7 +22,6 @@ class IbmPrep40pMachine(Test):
# All rights reserved.
# U.S. Government Users Restricted Rights - Use, duplication or disclosure
# restricted by GSA ADP Schedule Contract with IBM Corp.
- @skipIf(os.getenv('CONTINUOUS_INTEGRATION'), 'Running on Travis-CI')
@skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
def test_factory_firmware_and_netbsd(self):
"""
@@ -35,7 +34,7 @@ class IbmPrep40pMachine(Test):
'7020-40p/P12H0456.IMG')
bios_hash = '1775face4e6dc27f3a6ed955ef6eb331bf817f03'
bios_path = self.fetch_asset(bios_url, asset_hash=bios_hash)
- drive_url = ('https://cdn.netbsd.org/pub/NetBSD/NetBSD-archive/'
+ drive_url = ('https://archive.netbsd.org/pub/NetBSD-archive/'
'NetBSD-4.0/prep/installation/floppy/generic_com0.fs')
drive_hash = 'dbcfc09912e71bd5f0d82c7c1ee43082fb596ceb'
drive_path = self.fetch_asset(drive_url, asset_hash=drive_hash)
@@ -61,7 +60,6 @@ class IbmPrep40pMachine(Test):
wait_for_console_pattern(self, '>> Memory: 192M')
wait_for_console_pattern(self, '>> CPU type PowerPC,604')
- @skipIf(os.getenv('CONTINUOUS_INTEGRATION'), 'Running on Travis-CI')
def test_openbios_and_netbsd(self):
"""
:avocado: tags=arch:ppc
diff --git a/tests/docker/dockerfiles/centos8.docker b/tests/docker/dockerfiles/centos8.docker
index 585dfad9be..a589142114 100644
--- a/tests/docker/dockerfiles/centos8.docker
+++ b/tests/docker/dockerfiles/centos8.docker
@@ -18,6 +18,7 @@ ENV PACKAGES \
lzo-devel \
make \
mesa-libEGL-devel \
+ nmap-ncat \
nettle-devel \
ninja-build \
perl-Test-Harness \
diff --git a/tests/docker/dockerfiles/debian-amd64.docker b/tests/docker/dockerfiles/debian-amd64.docker
index 314c6bae83..55075d9fce 100644
--- a/tests/docker/dockerfiles/debian-amd64.docker
+++ b/tests/docker/dockerfiles/debian-amd64.docker
@@ -23,6 +23,9 @@ RUN apt update && \
libsnappy-dev \
libvte-dev \
netcat-openbsd \
+ openssh-client \
+ python3-numpy \
+ python3-opencv \
python3-venv
# virgl
diff --git a/tests/docker/dockerfiles/fedora.docker b/tests/docker/dockerfiles/fedora.docker
index ac79d95418..0b5053f2d0 100644
--- a/tests/docker/dockerfiles/fedora.docker
+++ b/tests/docker/dockerfiles/fedora.docker
@@ -73,6 +73,7 @@ ENV PACKAGES \
mingw64-pixman \
mingw64-pkg-config \
mingw64-SDL2 \
+ nmap-ncat \
ncurses-devel \
nettle-devel \
ninja-build \
diff --git a/tests/docker/dockerfiles/ubuntu2004.docker b/tests/docker/dockerfiles/ubuntu2004.docker
index 17b37cda38..355bbb3c63 100644
--- a/tests/docker/dockerfiles/ubuntu2004.docker
+++ b/tests/docker/dockerfiles/ubuntu2004.docker
@@ -47,6 +47,7 @@ ENV PACKAGES flex bison \
libxen-dev \
libzstd-dev \
make \
+ netcat-openbsd \
ninja-build \
python3-numpy \
python3-opencv \
diff --git a/tests/qemu-iotests/274 b/tests/qemu-iotests/274
index d4571c5465..76b1ba6a52 100755
--- a/tests/qemu-iotests/274
+++ b/tests/qemu-iotests/274
@@ -115,6 +115,26 @@ with iotests.FilePath('base') as base, \
iotests.qemu_io_log('-c', 'read -P 1 0 %d' % size_short, mid)
iotests.qemu_io_log('-c', 'read -P 0 %d %d' % (size_short, size_diff), mid)
+ iotests.log('=== Testing qemu-img commit (top -> base) ===')
+
+ create_chain()
+ iotests.qemu_img_log('commit', '-b', base, top)
+ iotests.img_info_log(base)
+ iotests.qemu_io_log('-c', 'read -P 1 0 %d' % size_short, base)
+ iotests.qemu_io_log('-c', 'read -P 0 %d %d' % (size_short, size_diff), base)
+
+ iotests.log('=== Testing QMP active commit (top -> base) ===')
+
+ create_chain()
+ with create_vm() as vm:
+ vm.launch()
+ vm.qmp_log('block-commit', device='top', base_node='base',
+ job_id='job0', auto_dismiss=False)
+ vm.run_job('job0', wait=5)
+
+ iotests.img_info_log(mid)
+ iotests.qemu_io_log('-c', 'read -P 1 0 %d' % size_short, base)
+ iotests.qemu_io_log('-c', 'read -P 0 %d %d' % (size_short, size_diff), base)
iotests.log('== Resize tests ==')
diff --git a/tests/qemu-iotests/274.out b/tests/qemu-iotests/274.out
index bf5abd4c10..cfe17a8659 100644
--- a/tests/qemu-iotests/274.out
+++ b/tests/qemu-iotests/274.out
@@ -135,6 +135,74 @@ read 1048576/1048576 bytes at offset 0
read 1048576/1048576 bytes at offset 1048576
1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+=== Testing qemu-img commit (top -> base) ===
+Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=2097152 lazy_refcounts=off refcount_bits=16
+
+Formatting 'TEST_DIR/PID-mid', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=1048576 backing_file=TEST_DIR/PID-base backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
+
+Formatting 'TEST_DIR/PID-top', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=2097152 backing_file=TEST_DIR/PID-mid backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
+
+wrote 2097152/2097152 bytes at offset 0
+2 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+Image committed.
+
+image: TEST_IMG
+file format: IMGFMT
+virtual size: 2 MiB (2097152 bytes)
+cluster_size: 65536
+Format specific information:
+ compat: 1.1
+ compression type: zlib
+ lazy refcounts: false
+ refcount bits: 16
+ corrupt: false
+ extended l2: false
+
+read 1048576/1048576 bytes at offset 0
+1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+read 1048576/1048576 bytes at offset 1048576
+1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+=== Testing QMP active commit (top -> base) ===
+Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=2097152 lazy_refcounts=off refcount_bits=16
+
+Formatting 'TEST_DIR/PID-mid', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=1048576 backing_file=TEST_DIR/PID-base backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
+
+Formatting 'TEST_DIR/PID-top', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=2097152 backing_file=TEST_DIR/PID-mid backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
+
+wrote 2097152/2097152 bytes at offset 0
+2 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+{"execute": "block-commit", "arguments": {"auto-dismiss": false, "base-node": "base", "device": "top", "job-id": "job0"}}
+{"return": {}}
+{"execute": "job-complete", "arguments": {"id": "job0"}}
+{"return": {}}
+{"data": {"device": "job0", "len": 1048576, "offset": 1048576, "speed": 0, "type": "commit"}, "event": "BLOCK_JOB_READY", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
+{"data": {"device": "job0", "len": 1048576, "offset": 1048576, "speed": 0, "type": "commit"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
+{"execute": "job-dismiss", "arguments": {"id": "job0"}}
+{"return": {}}
+image: TEST_IMG
+file format: IMGFMT
+virtual size: 1 MiB (1048576 bytes)
+cluster_size: 65536
+backing file: TEST_DIR/PID-base
+backing file format: IMGFMT
+Format specific information:
+ compat: 1.1
+ compression type: zlib
+ lazy refcounts: false
+ refcount bits: 16
+ corrupt: false
+ extended l2: false
+
+read 1048576/1048576 bytes at offset 0
+1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+read 1048576/1048576 bytes at offset 1048576
+1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
== Resize tests ==
=== preallocation=off ===
Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=6442450944 lazy_refcounts=off refcount_bits=16
diff --git a/tests/qtest/fuzz/fuzz.c b/tests/qtest/fuzz/fuzz.c
index eb0070437f..7be7226bc0 100644
--- a/tests/qtest/fuzz/fuzz.c
+++ b/tests/qtest/fuzz/fuzz.c
@@ -118,6 +118,19 @@ static FuzzTarget *fuzz_get_target(char* name)
}
+/* Sometimes called by libfuzzer to mutate two inputs into one */
+size_t LLVMFuzzerCustomCrossOver(const uint8_t *data1, size_t size1,
+ const uint8_t *data2, size_t size2,
+ uint8_t *out, size_t max_out_size,
+ unsigned int seed)
+{
+ if (fuzz_target->crossover) {
+ return fuzz_target->crossover(data1, size1, data2, size2, out,
+ max_out_size, seed);
+ }
+ return 0;
+}
+
/* Executed for each fuzzing-input */
int LLVMFuzzerTestOneInput(const unsigned char *Data, size_t Size)
{
diff --git a/tests/qtest/fuzz/fuzz.h b/tests/qtest/fuzz/fuzz.h
index 8eb765edc8..08e9560a79 100644
--- a/tests/qtest/fuzz/fuzz.h
+++ b/tests/qtest/fuzz/fuzz.h
@@ -77,6 +77,30 @@ typedef struct FuzzTarget {
*/
void(*fuzz)(QTestState *, const unsigned char *, size_t);
+ /*
+ * The fuzzer can specify a "Custom Crossover" function for combining two
+ * inputs from the corpus. This function is sometimes called by libfuzzer
+ * when mutating inputs.
+ *
+ * data1: location of first input
+ * size1: length of first input
+ * data1: location of second input
+ * size1: length of second input
+ * out: where to place the resulting, mutated input
+ * max_out_size: the maximum length of the input that can be placed in out
+ * seed: the seed that should be used to make mutations deterministic, when
+ * needed
+ *
+ * See libfuzzer's LLVMFuzzerCustomCrossOver API for more info.
+ *
+ * Can be NULL
+ */
+ size_t(*crossover)(const uint8_t *data1, size_t size1,
+ const uint8_t *data2, size_t size2,
+ uint8_t *out, size_t max_out_size,
+ unsigned int seed);
+
+ void *opaque;
} FuzzTarget;
void flush_events(QTestState *);
@@ -91,6 +115,10 @@ void fuzz_qtest_set_serialize(bool option);
*/
void fuzz_add_target(const FuzzTarget *target);
+size_t LLVMFuzzerCustomCrossOver(const uint8_t *data1, size_t size1,
+ const uint8_t *data2, size_t size2,
+ uint8_t *out, size_t max_out_size,
+ unsigned int seed);
int LLVMFuzzerTestOneInput(const unsigned char *Data, size_t Size);
int LLVMFuzzerInitialize(int *argc, char ***argv, char ***envp);
diff --git a/tests/qtest/fuzz/generic_fuzz.c b/tests/qtest/fuzz/generic_fuzz.c
new file mode 100644
index 0000000000..a8f5864883
--- /dev/null
+++ b/tests/qtest/fuzz/generic_fuzz.c
@@ -0,0 +1,954 @@
+/*
+ * Generic Virtual-Device Fuzzing Target
+ *
+ * Copyright Red Hat Inc., 2020
+ *
+ * Authors:
+ * Alexander Bulekov <alxndr@bu.edu>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+
+#include <wordexp.h>
+
+#include "hw/core/cpu.h"
+#include "tests/qtest/libqos/libqtest.h"
+#include "fuzz.h"
+#include "fork_fuzz.h"
+#include "exec/address-spaces.h"
+#include "string.h"
+#include "exec/memory.h"
+#include "exec/ramblock.h"
+#include "exec/address-spaces.h"
+#include "hw/qdev-core.h"
+#include "hw/pci/pci.h"
+#include "hw/boards.h"
+#include "generic_fuzz_configs.h"
+
+/*
+ * SEPARATOR is used to separate "operations" in the fuzz input
+ */
+#define SEPARATOR "FUZZ"
+
+enum cmds {
+ OP_IN,
+ OP_OUT,
+ OP_READ,
+ OP_WRITE,
+ OP_PCI_READ,
+ OP_PCI_WRITE,
+ OP_DISABLE_PCI,
+ OP_ADD_DMA_PATTERN,
+ OP_CLEAR_DMA_PATTERNS,
+ OP_CLOCK_STEP,
+};
+
+#define DEFAULT_TIMEOUT_US 100000
+#define USEC_IN_SEC 1000000000
+
+#define MAX_DMA_FILL_SIZE 0x10000
+
+#define PCI_HOST_BRIDGE_CFG 0xcf8
+#define PCI_HOST_BRIDGE_DATA 0xcfc
+
+typedef struct {
+ ram_addr_t addr;
+ ram_addr_t size; /* The number of bytes until the end of the I/O region */
+} address_range;
+
+static useconds_t timeout = DEFAULT_TIMEOUT_US;
+
+static bool qtest_log_enabled;
+
+/*
+ * A pattern used to populate a DMA region or perform a memwrite. This is
+ * useful for e.g. populating tables of unique addresses.
+ * Example {.index = 1; .stride = 2; .len = 3; .data = "\x00\x01\x02"}
+ * Renders as: 00 01 02 00 03 02 00 05 02 00 07 02 ...
+ */
+typedef struct {
+ uint8_t index; /* Index of a byte to increment by stride */
+ uint8_t stride; /* Increment each index'th byte by this amount */
+ size_t len;
+ const uint8_t *data;
+} pattern;
+
+/* Avoid filling the same DMA region between MMIO/PIO commands ? */
+static bool avoid_double_fetches;
+
+static QTestState *qts_global; /* Need a global for the DMA callback */
+
+/*
+ * List of memory regions that are children of QOM objects specified by the
+ * user for fuzzing.
+ */
+static GHashTable *fuzzable_memoryregions;
+static GPtrArray *fuzzable_pci_devices;
+
+struct get_io_cb_info {
+ int index;
+ int found;
+ address_range result;
+};
+
+static int get_io_address_cb(Int128 start, Int128 size,
+ const MemoryRegion *mr, void *opaque) {
+ struct get_io_cb_info *info = opaque;
+ if (g_hash_table_lookup(fuzzable_memoryregions, mr)) {
+ if (info->index == 0) {
+ info->result.addr = (ram_addr_t)start;
+ info->result.size = (ram_addr_t)size;
+ info->found = 1;
+ return 1;
+ }
+ info->index--;
+ }
+ return 0;
+}
+
+/*
+ * List of dma regions populated since the last fuzzing command. Used to ensure
+ * that we only write to each DMA address once, to avoid race conditions when
+ * building reproducers.
+ */
+static GArray *dma_regions;
+
+static GArray *dma_patterns;
+static int dma_pattern_index;
+static bool pci_disabled;
+
+/*
+ * Allocate a block of memory and populate it with a pattern.
+ */
+static void *pattern_alloc(pattern p, size_t len)
+{
+ int i;
+ uint8_t *buf = g_malloc(len);
+ uint8_t sum = 0;
+
+ for (i = 0; i < len; ++i) {
+ buf[i] = p.data[i % p.len];
+ if ((i % p.len) == p.index) {
+ buf[i] += sum;
+ sum += p.stride;
+ }
+ }
+ return buf;
+}
+
+static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
+{
+ unsigned access_size_max = mr->ops->valid.max_access_size;
+
+ /*
+ * Regions are assumed to support 1-4 byte accesses unless
+ * otherwise specified.
+ */
+ if (access_size_max == 0) {
+ access_size_max = 4;
+ }
+
+ /* Bound the maximum access by the alignment of the address. */
+ if (!mr->ops->impl.unaligned) {
+ unsigned align_size_max = addr & -addr;
+ if (align_size_max != 0 && align_size_max < access_size_max) {
+ access_size_max = align_size_max;
+ }
+ }
+
+ /* Don't attempt accesses larger than the maximum. */
+ if (l > access_size_max) {
+ l = access_size_max;
+ }
+ l = pow2floor(l);
+
+ return l;
+}
+
+/*
+ * Call-back for functions that perform DMA reads from guest memory. Confirm
+ * that the region has not already been populated since the last loop in
+ * generic_fuzz(), avoiding potential race-conditions, which we don't have
+ * a good way for reproducing right now.
+ */
+void fuzz_dma_read_cb(size_t addr, size_t len, MemoryRegion *mr, bool is_write)
+{
+ /* Are we in the generic-fuzzer or are we using another fuzz-target? */
+ if (!qts_global) {
+ return;
+ }
+
+ /*
+ * Return immediately if:
+ * - We have no DMA patterns defined
+ * - The length of the DMA read request is zero
+ * - The DMA read is hitting an MR other than the machine's main RAM
+ * - The DMA request is not a read (what happens for a address_space_map
+ * with is_write=True? Can the device use the same pointer to do reads?)
+ * - The DMA request hits past the bounds of our RAM
+ */
+ if (dma_patterns->len == 0
+ || len == 0
+ /* || mr != MACHINE(qdev_get_machine())->ram */
+ || is_write
+ || addr > current_machine->ram_size) {
+ return;
+ }
+
+ /*
+ * If we overlap with any existing dma_regions, split the range and only
+ * populate the non-overlapping parts.
+ */
+ address_range region;
+ bool double_fetch = false;
+ for (int i = 0;
+ i < dma_regions->len && (avoid_double_fetches || qtest_log_enabled);
+ ++i) {
+ region = g_array_index(dma_regions, address_range, i);
+ if (addr < region.addr + region.size && addr + len > region.addr) {
+ double_fetch = true;
+ if (addr < region.addr
+ && avoid_double_fetches) {
+ fuzz_dma_read_cb(addr, region.addr - addr, mr, is_write);
+ }
+ if (addr + len > region.addr + region.size
+ && avoid_double_fetches) {
+ fuzz_dma_read_cb(region.addr + region.size,
+ addr + len - (region.addr + region.size), mr, is_write);
+ }
+ return;
+ }
+ }
+
+ /* Cap the length of the DMA access to something reasonable */
+ len = MIN(len, MAX_DMA_FILL_SIZE);
+
+ address_range ar = {addr, len};
+ g_array_append_val(dma_regions, ar);
+ pattern p = g_array_index(dma_patterns, pattern, dma_pattern_index);
+ void *buf = pattern_alloc(p, ar.size);
+ hwaddr l, addr1;
+ MemoryRegion *mr1;
+ uint8_t *ram_ptr;
+ while (len > 0) {
+ l = len;
+ mr1 = address_space_translate(first_cpu->as,
+ addr, &addr1, &l, true,
+ MEMTXATTRS_UNSPECIFIED);
+
+ if (!(memory_region_is_ram(mr1) ||
+ memory_region_is_romd(mr1))) {
+ l = memory_access_size(mr1, l, addr1);
+ } else {
+ /* ROM/RAM case */
+ ram_ptr = qemu_map_ram_ptr(mr1->ram_block, addr1);
+ memcpy(ram_ptr, buf, l);
+ break;
+ }
+ len -= l;
+ buf += l;
+ addr += l;
+
+ }
+ if (qtest_log_enabled) {
+ /*
+ * With QTEST_LOG, use a normal, slow QTest memwrite. Prefix the log
+ * that will be written by qtest.c with a DMA tag, so we can reorder
+ * the resulting QTest trace so the DMA fills precede the last PIO/MMIO
+ * command.
+ */
+ fprintf(stderr, "[DMA] ");
+ if (double_fetch) {
+ fprintf(stderr, "[DOUBLE-FETCH] ");
+ }
+ fflush(stderr);
+ }
+ qtest_memwrite(qts_global, ar.addr, buf, ar.size);
+ g_free(buf);
+
+ /* Increment the index of the pattern for the next DMA access */
+ dma_pattern_index = (dma_pattern_index + 1) % dma_patterns->len;
+}
+
+/*
+ * Here we want to convert a fuzzer-provided [io-region-index, offset] to
+ * a physical address. To do this, we iterate over all of the matched
+ * MemoryRegions. Check whether each region exists within the particular io
+ * space. Return the absolute address of the offset within the index'th region
+ * that is a subregion of the io_space and the distance until the end of the
+ * memory region.
+ */
+static bool get_io_address(address_range *result, AddressSpace *as,
+ uint8_t index,
+ uint32_t offset) {
+ FlatView *view;
+ view = as->current_map;
+ g_assert(view);
+ struct get_io_cb_info cb_info = {};
+
+ cb_info.index = index;
+
+ /*
+ * Loop around the FlatView until we match "index" number of
+ * fuzzable_memoryregions, or until we know that there are no matching
+ * memory_regions.
+ */
+ do {
+ flatview_for_each_range(view, get_io_address_cb , &cb_info);
+ } while (cb_info.index != index && !cb_info.found);
+
+ *result = cb_info.result;
+ return cb_info.found;
+}
+
+static bool get_pio_address(address_range *result,
+ uint8_t index, uint16_t offset)
+{
+ /*
+ * PIO BARs can be set past the maximum port address (0xFFFF). Thus, result
+ * can contain an addr that extends past the PIO space. When we pass this
+ * address to qtest_in/qtest_out, it is cast to a uint16_t, so we might end
+ * up fuzzing a completely different MemoryRegion/Device. Therefore, check
+ * that the address here is within the PIO space limits.
+ */
+ bool found = get_io_address(result, &address_space_io, index, offset);
+ return result->addr <= 0xFFFF ? found : false;
+}
+
+static bool get_mmio_address(address_range *result,
+ uint8_t index, uint32_t offset)
+{
+ return get_io_address(result, &address_space_memory, index, offset);
+}
+
+static void op_in(QTestState *s, const unsigned char * data, size_t len)
+{
+ enum Sizes {Byte, Word, Long, end_sizes};
+ struct {
+ uint8_t size;
+ uint8_t base;
+ uint16_t offset;
+ } a;
+ address_range abs;
+
+ if (len < sizeof(a)) {
+ return;
+ }
+ memcpy(&a, data, sizeof(a));
+ if (get_pio_address(&abs, a.base, a.offset) == 0) {
+ return;
+ }
+
+ switch (a.size %= end_sizes) {
+ case Byte:
+ qtest_inb(s, abs.addr);
+ break;
+ case Word:
+ if (abs.size >= 2) {
+ qtest_inw(s, abs.addr);
+ }
+ break;
+ case Long:
+ if (abs.size >= 4) {
+ qtest_inl(s, abs.addr);
+ }
+ break;
+ }
+}
+
+static void op_out(QTestState *s, const unsigned char * data, size_t len)
+{
+ enum Sizes {Byte, Word, Long, end_sizes};
+ struct {
+ uint8_t size;
+ uint8_t base;
+ uint16_t offset;
+ uint32_t value;
+ } a;
+ address_range abs;
+
+ if (len < sizeof(a)) {
+ return;
+ }
+ memcpy(&a, data, sizeof(a));
+
+ if (get_pio_address(&abs, a.base, a.offset) == 0) {
+ return;
+ }
+
+ switch (a.size %= end_sizes) {
+ case Byte:
+ qtest_outb(s, abs.addr, a.value & 0xFF);
+ break;
+ case Word:
+ if (abs.size >= 2) {
+ qtest_outw(s, abs.addr, a.value & 0xFFFF);
+ }
+ break;
+ case Long:
+ if (abs.size >= 4) {
+ qtest_outl(s, abs.addr, a.value);
+ }
+ break;
+ }
+}
+
+static void op_read(QTestState *s, const unsigned char * data, size_t len)
+{
+ enum Sizes {Byte, Word, Long, Quad, end_sizes};
+ struct {
+ uint8_t size;
+ uint8_t base;
+ uint32_t offset;
+ } a;
+ address_range abs;
+
+ if (len < sizeof(a)) {
+ return;
+ }
+ memcpy(&a, data, sizeof(a));
+
+ if (get_mmio_address(&abs, a.base, a.offset) == 0) {
+ return;
+ }
+
+ switch (a.size %= end_sizes) {
+ case Byte:
+ qtest_readb(s, abs.addr);
+ break;
+ case Word:
+ if (abs.size >= 2) {
+ qtest_readw(s, abs.addr);
+ }
+ break;
+ case Long:
+ if (abs.size >= 4) {
+ qtest_readl(s, abs.addr);
+ }
+ break;
+ case Quad:
+ if (abs.size >= 8) {
+ qtest_readq(s, abs.addr);
+ }
+ break;
+ }
+}
+
+static void op_write(QTestState *s, const unsigned char * data, size_t len)
+{
+ enum Sizes {Byte, Word, Long, Quad, end_sizes};
+ struct {
+ uint8_t size;
+ uint8_t base;
+ uint32_t offset;
+ uint64_t value;
+ } a;
+ address_range abs;
+
+ if (len < sizeof(a)) {
+ return;
+ }
+ memcpy(&a, data, sizeof(a));
+
+ if (get_mmio_address(&abs, a.base, a.offset) == 0) {
+ return;
+ }
+
+ switch (a.size %= end_sizes) {
+ case Byte:
+ qtest_writeb(s, abs.addr, a.value & 0xFF);
+ break;
+ case Word:
+ if (abs.size >= 2) {
+ qtest_writew(s, abs.addr, a.value & 0xFFFF);
+ }
+ break;
+ case Long:
+ if (abs.size >= 4) {
+ qtest_writel(s, abs.addr, a.value & 0xFFFFFFFF);
+ }
+ break;
+ case Quad:
+ if (abs.size >= 8) {
+ qtest_writeq(s, abs.addr, a.value);
+ }
+ break;
+ }
+}
+
+static void op_pci_read(QTestState *s, const unsigned char * data, size_t len)
+{
+ enum Sizes {Byte, Word, Long, end_sizes};
+ struct {
+ uint8_t size;
+ uint8_t base;
+ uint8_t offset;
+ } a;
+ if (len < sizeof(a) || fuzzable_pci_devices->len == 0 || pci_disabled) {
+ return;
+ }
+ memcpy(&a, data, sizeof(a));
+ PCIDevice *dev = g_ptr_array_index(fuzzable_pci_devices,
+ a.base % fuzzable_pci_devices->len);
+ int devfn = dev->devfn;
+ qtest_outl(s, PCI_HOST_BRIDGE_CFG, (1U << 31) | (devfn << 8) | a.offset);
+ switch (a.size %= end_sizes) {
+ case Byte:
+ qtest_inb(s, PCI_HOST_BRIDGE_DATA);
+ break;
+ case Word:
+ qtest_inw(s, PCI_HOST_BRIDGE_DATA);
+ break;
+ case Long:
+ qtest_inl(s, PCI_HOST_BRIDGE_DATA);
+ break;
+ }
+}
+
+static void op_pci_write(QTestState *s, const unsigned char * data, size_t len)
+{
+ enum Sizes {Byte, Word, Long, end_sizes};
+ struct {
+ uint8_t size;
+ uint8_t base;
+ uint8_t offset;
+ uint32_t value;
+ } a;
+ if (len < sizeof(a) || fuzzable_pci_devices->len == 0 || pci_disabled) {
+ return;
+ }
+ memcpy(&a, data, sizeof(a));
+ PCIDevice *dev = g_ptr_array_index(fuzzable_pci_devices,
+ a.base % fuzzable_pci_devices->len);
+ int devfn = dev->devfn;
+ qtest_outl(s, PCI_HOST_BRIDGE_CFG, (1U << 31) | (devfn << 8) | a.offset);
+ switch (a.size %= end_sizes) {
+ case Byte:
+ qtest_outb(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFF);
+ break;
+ case Word:
+ qtest_outw(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFFFF);
+ break;
+ case Long:
+ qtest_outl(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFFFFFFFF);
+ break;
+ }
+}
+
+static void op_add_dma_pattern(QTestState *s,
+ const unsigned char *data, size_t len)
+{
+ struct {
+ /*
+ * index and stride can be used to increment the index-th byte of the
+ * pattern by the value stride, for each loop of the pattern.
+ */
+ uint8_t index;
+ uint8_t stride;
+ } a;
+
+ if (len < sizeof(a) + 1) {
+ return;
+ }
+ memcpy(&a, data, sizeof(a));
+ pattern p = {a.index, a.stride, len - sizeof(a), data + sizeof(a)};
+ p.index = a.index % p.len;
+ g_array_append_val(dma_patterns, p);
+ return;
+}
+
+static void op_clear_dma_patterns(QTestState *s,
+ const unsigned char *data, size_t len)
+{
+ g_array_set_size(dma_patterns, 0);
+ dma_pattern_index = 0;
+}
+
+static void op_clock_step(QTestState *s, const unsigned char *data, size_t len)
+{
+ qtest_clock_step_next(s);
+}
+
+static void op_disable_pci(QTestState *s, const unsigned char *data, size_t len)
+{
+ pci_disabled = true;
+}
+
+static void handle_timeout(int sig)
+{
+ if (qtest_log_enabled) {
+ fprintf(stderr, "[Timeout]\n");
+ fflush(stderr);
+ }
+ _Exit(0);
+}
+
+/*
+ * Here, we interpret random bytes from the fuzzer, as a sequence of commands.
+ * Some commands can be variable-width, so we use a separator, SEPARATOR, to
+ * specify the boundaries between commands. SEPARATOR is used to separate
+ * "operations" in the fuzz input. Why use a separator, instead of just using
+ * the operations' length to identify operation boundaries?
+ * 1. This is a simple way to support variable-length operations
+ * 2. This adds "stability" to the input.
+ * For example take the input "AbBcgDefg", where there is no separator and
+ * Opcodes are capitalized.
+ * Simply, by removing the first byte, we end up with a very different
+ * sequence:
+ * BbcGdefg...
+ * By adding a separator, we avoid this problem:
+ * Ab SEP Bcg SEP Defg -> B SEP Bcg SEP Defg
+ * Since B uses two additional bytes as operands, the first "B" will be
+ * ignored. The fuzzer actively tries to reduce inputs, so such unused
+ * bytes are likely to be pruned, eventually.
+ *
+ * SEPARATOR is trivial for the fuzzer to discover when using ASan. Optionally,
+ * SEPARATOR can be manually specified as a dictionary value (see libfuzzer's
+ * -dict), though this should not be necessary.
+ *
+ * As a result, the stream of bytes is converted into a sequence of commands.
+ * In a simplified example where SEPARATOR is 0xFF:
+ * 00 01 02 FF 03 04 05 06 FF 01 FF ...
+ * becomes this sequence of commands:
+ * 00 01 02 -> op00 (0102) -> in (0102, 2)
+ * 03 04 05 06 -> op03 (040506) -> write (040506, 3)
+ * 01 -> op01 (-,0) -> out (-,0)
+ * ...
+ *
+ * Note here that it is the job of the individual opcode functions to check
+ * that enough data was provided. I.e. in the last command out (,0), out needs
+ * to check that there is not enough data provided to select an address/value
+ * for the operation.
+ */
+static void generic_fuzz(QTestState *s, const unsigned char *Data, size_t Size)
+{
+ void (*ops[]) (QTestState *s, const unsigned char* , size_t) = {
+ [OP_IN] = op_in,
+ [OP_OUT] = op_out,
+ [OP_READ] = op_read,
+ [OP_WRITE] = op_write,
+ [OP_PCI_READ] = op_pci_read,
+ [OP_PCI_WRITE] = op_pci_write,
+ [OP_DISABLE_PCI] = op_disable_pci,
+ [OP_ADD_DMA_PATTERN] = op_add_dma_pattern,
+ [OP_CLEAR_DMA_PATTERNS] = op_clear_dma_patterns,
+ [OP_CLOCK_STEP] = op_clock_step,
+ };
+ const unsigned char *cmd = Data;
+ const unsigned char *nextcmd;
+ size_t cmd_len;
+ uint8_t op;
+
+ if (fork() == 0) {
+ /*
+ * Sometimes the fuzzer will find inputs that take quite a long time to
+ * process. Often times, these inputs do not result in new coverage.
+ * Even if these inputs might be interesting, they can slow down the
+ * fuzzer, overall. Set a timeout to avoid hurting performance, too much
+ */
+ if (timeout) {
+ struct sigaction sact;
+ struct itimerval timer;
+
+ sigemptyset(&sact.sa_mask);
+ sact.sa_flags = SA_NODEFER;
+ sact.sa_handler = handle_timeout;
+ sigaction(SIGALRM, &sact, NULL);
+
+ memset(&timer, 0, sizeof(timer));
+ timer.it_value.tv_sec = timeout / USEC_IN_SEC;
+ timer.it_value.tv_usec = timeout % USEC_IN_SEC;
+ setitimer(ITIMER_VIRTUAL, &timer, NULL);
+ }
+
+ op_clear_dma_patterns(s, NULL, 0);
+ pci_disabled = false;
+
+ while (cmd && Size) {
+ /* Get the length until the next command or end of input */
+ nextcmd = memmem(cmd, Size, SEPARATOR, strlen(SEPARATOR));
+ cmd_len = nextcmd ? nextcmd - cmd : Size;
+
+ if (cmd_len > 0) {
+ /* Interpret the first byte of the command as an opcode */
+ op = *cmd % (sizeof(ops) / sizeof((ops)[0]));
+ ops[op](s, cmd + 1, cmd_len - 1);
+
+ /* Run the main loop */
+ flush_events(s);
+ }
+ /* Advance to the next command */
+ cmd = nextcmd ? nextcmd + sizeof(SEPARATOR) - 1 : nextcmd;
+ Size = Size - (cmd_len + sizeof(SEPARATOR) - 1);
+ g_array_set_size(dma_regions, 0);
+ }
+ _Exit(0);
+ } else {
+ flush_events(s);
+ wait(0);
+ }
+}
+
+static void usage(void)
+{
+ printf("Please specify the following environment variables:\n");
+ printf("QEMU_FUZZ_ARGS= the command line arguments passed to qemu\n");
+ printf("QEMU_FUZZ_OBJECTS= "
+ "a space separated list of QOM type names for objects to fuzz\n");
+ printf("Optionally: QEMU_AVOID_DOUBLE_FETCH= "
+ "Try to avoid racy DMA double fetch bugs? %d by default\n",
+ avoid_double_fetches);
+ printf("Optionally: QEMU_FUZZ_TIMEOUT= Specify a custom timeout (us). "
+ "0 to disable. %d by default\n", timeout);
+ exit(0);
+}
+
+static int locate_fuzz_memory_regions(Object *child, void *opaque)
+{
+ const char *name;
+ MemoryRegion *mr;
+ if (object_dynamic_cast(child, TYPE_MEMORY_REGION)) {
+ mr = MEMORY_REGION(child);
+ if ((memory_region_is_ram(mr) ||
+ memory_region_is_ram_device(mr) ||
+ memory_region_is_rom(mr)) == false) {
+ name = object_get_canonical_path_component(child);
+ /*
+ * We don't want duplicate pointers to the same MemoryRegion, so
+ * try to remove copies of the pointer, before adding it.
+ */
+ g_hash_table_insert(fuzzable_memoryregions, mr, (gpointer)true);
+ }
+ }
+ return 0;
+}
+
+static int locate_fuzz_objects(Object *child, void *opaque)
+{
+ char *pattern = opaque;
+ if (g_pattern_match_simple(pattern, object_get_typename(child))) {
+ /* Find and save ptrs to any child MemoryRegions */
+ object_child_foreach_recursive(child, locate_fuzz_memory_regions, NULL);
+
+ /*
+ * We matched an object. If its a PCI device, store a pointer to it so
+ * we can map BARs and fuzz its config space.
+ */
+ if (object_dynamic_cast(OBJECT(child), TYPE_PCI_DEVICE)) {
+ /*
+ * Don't want duplicate pointers to the same PCIDevice, so remove
+ * copies of the pointer, before adding it.
+ */
+ g_ptr_array_remove_fast(fuzzable_pci_devices, PCI_DEVICE(child));
+ g_ptr_array_add(fuzzable_pci_devices, PCI_DEVICE(child));
+ }
+ } else if (object_dynamic_cast(OBJECT(child), TYPE_MEMORY_REGION)) {
+ if (g_pattern_match_simple(pattern,
+ object_get_canonical_path_component(child))) {
+ MemoryRegion *mr;
+ mr = MEMORY_REGION(child);
+ if ((memory_region_is_ram(mr) ||
+ memory_region_is_ram_device(mr) ||
+ memory_region_is_rom(mr)) == false) {
+ g_hash_table_insert(fuzzable_memoryregions, mr, (gpointer)true);
+ }
+ }
+ }
+ return 0;
+}
+
+static void generic_pre_fuzz(QTestState *s)
+{
+ GHashTableIter iter;
+ MemoryRegion *mr;
+ char **result;
+
+ if (!getenv("QEMU_FUZZ_OBJECTS")) {
+ usage();
+ }
+ if (getenv("QTEST_LOG")) {
+ qtest_log_enabled = 1;
+ }
+ if (getenv("QEMU_AVOID_DOUBLE_FETCH")) {
+ avoid_double_fetches = 1;
+ }
+ if (getenv("QEMU_FUZZ_TIMEOUT")) {
+ timeout = g_ascii_strtoll(getenv("QEMU_FUZZ_TIMEOUT"), NULL, 0);
+ }
+ qts_global = s;
+
+ dma_regions = g_array_new(false, false, sizeof(address_range));
+ dma_patterns = g_array_new(false, false, sizeof(pattern));
+
+ fuzzable_memoryregions = g_hash_table_new(NULL, NULL);
+ fuzzable_pci_devices = g_ptr_array_new();
+
+ result = g_strsplit(getenv("QEMU_FUZZ_OBJECTS"), " ", -1);
+ for (int i = 0; result[i] != NULL; i++) {
+ printf("Matching objects by name %s\n", result[i]);
+ object_child_foreach_recursive(qdev_get_machine(),
+ locate_fuzz_objects,
+ result[i]);
+ }
+ g_strfreev(result);
+ printf("This process will try to fuzz the following MemoryRegions:\n");
+
+ g_hash_table_iter_init(&iter, fuzzable_memoryregions);
+ while (g_hash_table_iter_next(&iter, (gpointer)&mr, NULL)) {
+ printf(" * %s (size %lx)\n",
+ object_get_canonical_path_component(&(mr->parent_obj)),
+ (uint64_t)mr->size);
+ }
+
+ if (!g_hash_table_size(fuzzable_memoryregions)) {
+ printf("No fuzzable memory regions found...\n");
+ exit(1);
+ }
+
+ counter_shm_init();
+}
+
+/*
+ * When libfuzzer gives us two inputs to combine, return a new input with the
+ * following structure:
+ *
+ * Input 1 (data1)
+ * SEPARATOR
+ * Clear out the DMA Patterns
+ * SEPARATOR
+ * Disable the pci_read/write instructions
+ * SEPARATOR
+ * Input 2 (data2)
+ *
+ * The idea is to collate the core behaviors of the two inputs.
+ * For example:
+ * Input 1: maps a device's BARs, sets up three DMA patterns, and triggers
+ * device functionality A
+ * Input 2: maps a device's BARs, sets up one DMA pattern, and triggers device
+ * functionality B
+ *
+ * This function attempts to produce an input that:
+ * Ouptut: maps a device's BARs, set up three DMA patterns, triggers
+ * functionality A device, replaces the DMA patterns with a single
+ * patten, and triggers device functionality B.
+ */
+static size_t generic_fuzz_crossover(const uint8_t *data1, size_t size1, const
+ uint8_t *data2, size_t size2, uint8_t *out,
+ size_t max_out_size, unsigned int seed)
+{
+ size_t copy_len = 0, size = 0;
+
+ /* Check that we have enough space for data1 and at least part of data2 */
+ if (max_out_size <= size1 + strlen(SEPARATOR) * 3 + 2) {
+ return 0;
+ }
+
+ /* Copy_Len in the first input */
+ copy_len = size1;
+ memcpy(out + size, data1, copy_len);
+ size += copy_len;
+ max_out_size -= copy_len;
+
+ /* Append a separator */
+ copy_len = strlen(SEPARATOR);
+ memcpy(out + size, SEPARATOR, copy_len);
+ size += copy_len;
+ max_out_size -= copy_len;
+
+ /* Clear out the DMA Patterns */
+ copy_len = 1;
+ if (copy_len) {
+ out[size] = OP_CLEAR_DMA_PATTERNS;
+ }
+ size += copy_len;
+ max_out_size -= copy_len;
+
+ /* Append a separator */
+ copy_len = strlen(SEPARATOR);
+ memcpy(out + size, SEPARATOR, copy_len);
+ size += copy_len;
+ max_out_size -= copy_len;
+
+ /* Disable PCI ops. Assume data1 took care of setting up PCI */
+ copy_len = 1;
+ if (copy_len) {
+ out[size] = OP_DISABLE_PCI;
+ }
+ size += copy_len;
+ max_out_size -= copy_len;
+
+ /* Append a separator */
+ copy_len = strlen(SEPARATOR);
+ memcpy(out + size, SEPARATOR, copy_len);
+ size += copy_len;
+ max_out_size -= copy_len;
+
+ /* Copy_Len over the second input */
+ copy_len = MIN(size2, max_out_size);
+ memcpy(out + size, data2, copy_len);
+ size += copy_len;
+ max_out_size -= copy_len;
+
+ return size;
+}
+
+
+static GString *generic_fuzz_cmdline(FuzzTarget *t)
+{
+ GString *cmd_line = g_string_new(TARGET_NAME);
+ if (!getenv("QEMU_FUZZ_ARGS")) {
+ usage();
+ }
+ g_string_append_printf(cmd_line, " -display none \
+ -machine accel=qtest, \
+ -m 512M %s ", getenv("QEMU_FUZZ_ARGS"));
+ return cmd_line;
+}
+
+static GString *generic_fuzz_predefined_config_cmdline(FuzzTarget *t)
+{
+ const generic_fuzz_config *config;
+ g_assert(t->opaque);
+
+ config = t->opaque;
+ setenv("QEMU_FUZZ_ARGS", config->args, 1);
+ setenv("QEMU_FUZZ_OBJECTS", config->objects, 1);
+ return generic_fuzz_cmdline(t);
+}
+
+static void register_generic_fuzz_targets(void)
+{
+ fuzz_add_target(&(FuzzTarget){
+ .name = "generic-fuzz",
+ .description = "Fuzz based on any qemu command-line args. ",
+ .get_init_cmdline = generic_fuzz_cmdline,
+ .pre_fuzz = generic_pre_fuzz,
+ .fuzz = generic_fuzz,
+ .crossover = generic_fuzz_crossover
+ });
+
+ GString *name;
+ const generic_fuzz_config *config;
+
+ for (int i = 0;
+ i < sizeof(predefined_configs) / sizeof(generic_fuzz_config);
+ i++) {
+ config = predefined_configs + i;
+ name = g_string_new("generic-fuzz");
+ g_string_append_printf(name, "-%s", config->name);
+ fuzz_add_target(&(FuzzTarget){
+ .name = name->str,
+ .description = "Predefined generic-fuzz config.",
+ .get_init_cmdline = generic_fuzz_predefined_config_cmdline,
+ .pre_fuzz = generic_pre_fuzz,
+ .fuzz = generic_fuzz,
+ .crossover = generic_fuzz_crossover,
+ .opaque = (void *)config
+ });
+ }
+}
+
+fuzz_target_init(register_generic_fuzz_targets);
diff --git a/tests/qtest/fuzz/generic_fuzz_configs.h b/tests/qtest/fuzz/generic_fuzz_configs.h
new file mode 100644
index 0000000000..c4d925f9e6
--- /dev/null
+++ b/tests/qtest/fuzz/generic_fuzz_configs.h
@@ -0,0 +1,121 @@
+/*
+ * Generic Virtual-Device Fuzzing Target Configs
+ *
+ * Copyright Red Hat Inc., 2020
+ *
+ * Authors:
+ * Alexander Bulekov <alxndr@bu.edu>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef GENERIC_FUZZ_CONFIGS_H
+#define GENERIC_FUZZ_CONFIGS_H
+
+#include "qemu/osdep.h"
+
+typedef struct generic_fuzz_config {
+ const char *name, *args, *objects;
+} generic_fuzz_config;
+
+const generic_fuzz_config predefined_configs[] = {
+ {
+ .name = "virtio-net-pci-slirp",
+ .args = "-M q35 -nodefaults "
+ "-device virtio-net,netdev=net0 -netdev user,id=net0",
+ .objects = "virtio*",
+ },{
+ .name = "virtio-blk",
+ .args = "-machine q35 -device virtio-blk,drive=disk0 "
+ "-drive file=null-co://,id=disk0,if=none,format=raw",
+ .objects = "virtio*",
+ },{
+ .name = "virtio-scsi",
+ .args = "-machine q35 -device virtio-scsi,num_queues=8 "
+ "-device scsi-hd,drive=disk0 "
+ "-drive file=null-co://,id=disk0,if=none,format=raw",
+ .objects = "scsi* virtio*",
+ },{
+ .name = "virtio-gpu",
+ .args = "-machine q35 -nodefaults -device virtio-gpu",
+ .objects = "virtio*",
+ },{
+ .name = "virtio-vga",
+ .args = "-machine q35 -nodefaults -device virtio-vga",
+ .objects = "virtio*",
+ },{
+ .name = "virtio-rng",
+ .args = "-machine q35 -nodefaults -device virtio-rng",
+ .objects = "virtio*",
+ },{
+ .name = "virtio-balloon",
+ .args = "-machine q35 -nodefaults -device virtio-balloon",
+ .objects = "virtio*",
+ },{
+ .name = "virtio-serial",
+ .args = "-machine q35 -nodefaults -device virtio-serial",
+ .objects = "virtio*",
+ },{
+ .name = "virtio-mouse",
+ .args = "-machine q35 -nodefaults -device virtio-mouse",
+ .objects = "virtio*",
+ },{
+ .name = "e1000",
+ .args = "-M q35 -nodefaults "
+ "-device e1000,netdev=net0 -netdev user,id=net0",
+ .objects = "e1000",
+ },{
+ .name = "e1000e",
+ .args = "-M q35 -nodefaults "
+ "-device e1000e,netdev=net0 -netdev user,id=net0",
+ .objects = "e1000e",
+ },{
+ .name = "cirrus-vga",
+ .args = "-machine q35 -nodefaults -device cirrus-vga",
+ .objects = "cirrus*",
+ },{
+ .name = "bochs-display",
+ .args = "-machine q35 -nodefaults -device bochs-display",
+ .objects = "bochs*",
+ },{
+ .name = "intel-hda",
+ .args = "-machine q35 -nodefaults -device intel-hda,id=hda0 "
+ "-device hda-output,bus=hda0.0 -device hda-micro,bus=hda0.0 "
+ "-device hda-duplex,bus=hda0.0",
+ .objects = "intel-hda",
+ },{
+ .name = "ide-hd",
+ .args = "-machine q35 -nodefaults "
+ "-drive file=null-co://,if=none,format=raw,id=disk0 "
+ "-device ide-hd,drive=disk0",
+ .objects = "ahci*",
+ },{
+ .name = "floppy",
+ .args = "-machine pc -nodefaults -device floppy,id=floppy0 "
+ "-drive id=disk0,file=null-co://,file.read-zeroes=on,if=none "
+ "-device floppy,drive=disk0,drive-type=288",
+ .objects = "fd* floppy*",
+ },{
+ .name = "xhci",
+ .args = "-machine q35 -nodefaults "
+ "-drive file=null-co://,if=none,format=raw,id=disk0 "
+ "-device qemu-xhci,id=xhci -device usb-tablet,bus=xhci.0 "
+ "-device usb-bot -device usb-storage,drive=disk0 "
+ "-chardev null,id=cd0 -chardev null,id=cd1 "
+ "-device usb-braille,chardev=cd0 -device usb-ccid -device usb-ccid "
+ "-device usb-kbd -device usb-mouse -device usb-serial,chardev=cd1 "
+ "-device usb-tablet -device usb-wacom-tablet -device usb-audio",
+ .objects = "*usb* *uhci* *xhci*",
+ },{
+ .name = "pc-i440fx",
+ .args = "-machine pc",
+ .objects = "*",
+ },{
+ .name = "pc-q35",
+ .args = "-machine q35",
+ .objects = "*",
+ }
+};
+
+#endif
diff --git a/tests/qtest/fuzz/meson.build b/tests/qtest/fuzz/meson.build
index b31ace7d5a..5162321f30 100644
--- a/tests/qtest/fuzz/meson.build
+++ b/tests/qtest/fuzz/meson.build
@@ -5,6 +5,7 @@ specific_fuzz_ss.add(files('fuzz.c', 'fork_fuzz.c', 'qos_fuzz.c',
specific_fuzz_ss.add(when: 'CONFIG_I440FX', if_true: files('i440fx_fuzz.c'))
specific_fuzz_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('virtio_net_fuzz.c'))
specific_fuzz_ss.add(when: 'CONFIG_VIRTIO_SCSI', if_true: files('virtio_scsi_fuzz.c'))
+specific_fuzz_ss.add(files('generic_fuzz.c'))
fork_fuzz = declare_dependency(
link_args: config_host['FUZZ_EXE_LDFLAGS'].split() +
diff --git a/tests/qtest/libqtest.c b/tests/qtest/libqtest.c
index 08929f5ff6..99deff47ef 100644
--- a/tests/qtest/libqtest.c
+++ b/tests/qtest/libqtest.c
@@ -621,7 +621,7 @@ QDict *qtest_qmp_receive(QTestState *s)
return response;
}
/* Stash the event for a later consumption */
- s->pending_events = g_list_prepend(s->pending_events, response);
+ s->pending_events = g_list_append(s->pending_events, response);
}
}
@@ -795,15 +795,12 @@ void qtest_qmp_send_raw(QTestState *s, const char *fmt, ...)
QDict *qtest_qmp_event_ref(QTestState *s, const char *event)
{
- GList *next = NULL;
- QDict *response;
-
- for (GList *it = s->pending_events; it != NULL; it = next) {
+ while (s->pending_events) {
- next = it->next;
- response = (QDict *)it->data;
+ GList *first = s->pending_events;
+ QDict *response = (QDict *)first->data;
- s->pending_events = g_list_remove_link(s->pending_events, it);
+ s->pending_events = g_list_delete_link(s->pending_events, first);
if (!strcmp(qdict_get_str(response, "event"), event)) {
return response;
@@ -870,9 +867,14 @@ char *qtest_hmp(QTestState *s, const char *fmt, ...)
const char *qtest_get_arch(void)
{
const char *qemu = qtest_qemu_binary();
- const char *end = strrchr(qemu, '/');
+ const char *end = strrchr(qemu, '-');
+
+ if (!end) {
+ fprintf(stderr, "Can't determine architecture from binary name.\n");
+ abort();
+ }
- return end + strlen("/qemu-system-");
+ return end + 1;
}
bool qtest_get_irq(QTestState *s, int num)
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
index 28d4068718..7e0ecaa2c5 100644
--- a/tests/qtest/meson.build
+++ b/tests/qtest/meson.build
@@ -133,12 +133,13 @@ qtests_sparc64 = \
(config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) + \
['prom-env-test', 'boot-serial-test']
+qtests_npcm7xx = ['npcm7xx_timer-test']
qtests_arm = \
(config_all_devices.has_key('CONFIG_PFLASH_CFI02') ? ['pflash-cfi02-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_NPCM7XX') ? qtests_npcm7xx : []) + \
['arm-cpu-features',
'microbit-test',
'm25p80-test',
- 'npcm7xx_timer-test',
'test-arm-mptimer',
'boot-serial-test',
'hexloader-test']
diff --git a/tests/qtest/migration-helpers.c b/tests/qtest/migration-helpers.c
index b799dbafb7..4ee26014b7 100644
--- a/tests/qtest/migration-helpers.c
+++ b/tests/qtest/migration-helpers.c
@@ -32,7 +32,7 @@ static void check_stop_event(QTestState *who)
QDict *wait_command_fd(QTestState *who, int fd, const char *command, ...)
{
va_list ap;
- QDict *resp;
+ QDict *resp, *ret;
va_start(ap, command);
qtest_qmp_vsend_fds(who, &fd, 1, command, ap);
@@ -44,7 +44,11 @@ QDict *wait_command_fd(QTestState *who, int fd, const char *command, ...)
g_assert(!qdict_haskey(resp, "error"));
g_assert(qdict_haskey(resp, "return"));
- return qdict_get_qdict(resp, "return");
+ ret = qdict_get_qdict(resp, "return");
+ qobject_ref(ret);
+ qobject_unref(resp);
+
+ return ret;
}
/*
@@ -53,7 +57,7 @@ QDict *wait_command_fd(QTestState *who, int fd, const char *command, ...)
QDict *wait_command(QTestState *who, const char *command, ...)
{
va_list ap;
- QDict *resp;
+ QDict *resp, *ret;
va_start(ap, command);
resp = qtest_vqmp(who, command, ap);
@@ -64,7 +68,11 @@ QDict *wait_command(QTestState *who, const char *command, ...)
g_assert(!qdict_haskey(resp, "error"));
g_assert(qdict_haskey(resp, "return"));
- return qdict_get_qdict(resp, "return");
+ ret = qdict_get_qdict(resp, "return");
+ qobject_ref(ret);
+ qobject_unref(resp);
+
+ return ret;
}
/*
diff --git a/tests/vhost-user-bridge.c b/tests/vhost-user-bridge.c
index 6c3d490611..bd43607a4d 100644
--- a/tests/vhost-user-bridge.c
+++ b/tests/vhost-user-bridge.c
@@ -520,6 +520,7 @@ vubr_accept_cb(int sock, void *ctx)
VHOST_USER_BRIDGE_MAX_QUEUES,
conn_fd,
vubr_panic,
+ NULL,
vubr_set_watch,
vubr_remove_watch,
&vuiface)) {
@@ -573,6 +574,7 @@ vubr_new(const char *path, bool client)
VHOST_USER_BRIDGE_MAX_QUEUES,
dev->sock,
vubr_panic,
+ NULL,
vubr_set_watch,
vubr_remove_watch,
&vuiface)) {
diff --git a/tools/virtiofsd/fuse_virtio.c b/tools/virtiofsd/fuse_virtio.c
index 89f537f79b..324936948d 100644
--- a/tools/virtiofsd/fuse_virtio.c
+++ b/tools/virtiofsd/fuse_virtio.c
@@ -1013,8 +1013,8 @@ int virtio_session_mount(struct fuse_session *se)
se->vu_socketfd = data_sock;
se->virtio_dev->se = se;
pthread_rwlock_init(&se->virtio_dev->vu_dispatch_rwlock, NULL);
- vu_init(&se->virtio_dev->dev, 2, se->vu_socketfd, fv_panic, fv_set_watch,
- fv_remove_watch, &fv_iface);
+ vu_init(&se->virtio_dev->dev, 2, se->vu_socketfd, fv_panic, NULL,
+ fv_set_watch, fv_remove_watch, &fv_iface);
return 0;
}
diff --git a/util/block-helpers.c b/util/block-helpers.c
new file mode 100644
index 0000000000..c4851432f5
--- /dev/null
+++ b/util/block-helpers.c
@@ -0,0 +1,46 @@
+/*
+ * Block utility functions
+ *
+ * Copyright IBM, Corp. 2011
+ * Copyright (c) 2020 Coiby Xu <coiby.xu@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qapi/qmp/qerror.h"
+#include "block-helpers.h"
+
+/**
+ * check_block_size:
+ * @id: The unique ID of the object
+ * @name: The name of the property being validated
+ * @value: The block size in bytes
+ * @errp: A pointer to an area to store an error
+ *
+ * This function checks that the block size meets the following conditions:
+ * 1. At least MIN_BLOCK_SIZE
+ * 2. No larger than MAX_BLOCK_SIZE
+ * 3. A power of 2
+ */
+void check_block_size(const char *id, const char *name, int64_t value,
+ Error **errp)
+{
+ /* value of 0 means "unset" */
+ if (value && (value < MIN_BLOCK_SIZE || value > MAX_BLOCK_SIZE)) {
+ error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE,
+ id, name, value, MIN_BLOCK_SIZE, MAX_BLOCK_SIZE);
+ return;
+ }
+
+ /* We rely on power-of-2 blocksizes for bitmasks */
+ if ((value & (value - 1)) != 0) {
+ error_setg(errp,
+ "Property %s.%s doesn't take value '%" PRId64
+ "', it's not a power of 2",
+ id, name, value);
+ return;
+ }
+}
diff --git a/util/block-helpers.h b/util/block-helpers.h
new file mode 100644
index 0000000000..b53295a529
--- /dev/null
+++ b/util/block-helpers.h
@@ -0,0 +1,19 @@
+#ifndef BLOCK_HELPERS_H
+#define BLOCK_HELPERS_H
+
+#include "qemu/units.h"
+
+/* lower limit is sector size */
+#define MIN_BLOCK_SIZE INT64_C(512)
+#define MIN_BLOCK_SIZE_STR "512 B"
+/*
+ * upper limit is arbitrary, 2 MiB looks sufficient for all sensible uses, and
+ * matches qcow2 cluster size limit
+ */
+#define MAX_BLOCK_SIZE (2 * MiB)
+#define MAX_BLOCK_SIZE_STR "2 MiB"
+
+void check_block_size(const char *id, const char *name, int64_t value,
+ Error **errp);
+
+#endif /* BLOCK_HELPERS_H */
diff --git a/util/meson.build b/util/meson.build
index e6b207a99e..c5159ad79d 100644
--- a/util/meson.build
+++ b/util/meson.build
@@ -66,6 +66,10 @@ if have_block
util_ss.add(files('main-loop.c'))
util_ss.add(files('nvdimm-utils.c'))
util_ss.add(files('qemu-coroutine.c', 'qemu-coroutine-lock.c', 'qemu-coroutine-io.c'))
+ util_ss.add(when: ['CONFIG_LINUX', 'CONFIG_VHOST_USER'], if_true: [
+ files('vhost-user-server.c'), vhost_user
+ ])
+ util_ss.add(files('block-helpers.c'))
util_ss.add(files('qemu-coroutine-sleep.c'))
util_ss.add(files('qemu-co-shared-resource.c'))
util_ss.add(files('thread-pool.c', 'qemu-timer.c'))
diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c
new file mode 100644
index 0000000000..783d847a6d
--- /dev/null
+++ b/util/vhost-user-server.c
@@ -0,0 +1,446 @@
+/*
+ * Sharing QEMU devices via vhost-user protocol
+ *
+ * Copyright (c) Coiby Xu <coiby.xu@gmail.com>.
+ * Copyright (c) 2020 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "qemu/vhost-user-server.h"
+#include "block/aio-wait.h"
+
+/*
+ * Theory of operation:
+ *
+ * VuServer is started and stopped by vhost_user_server_start() and
+ * vhost_user_server_stop() from the main loop thread. Starting the server
+ * opens a vhost-user UNIX domain socket and listens for incoming connections.
+ * Only one connection is allowed at a time.
+ *
+ * The connection is handled by the vu_client_trip() coroutine in the
+ * VuServer->ctx AioContext. The coroutine consists of a vu_dispatch() loop
+ * where libvhost-user calls vu_message_read() to receive the next vhost-user
+ * protocol messages over the UNIX domain socket.
+ *
+ * When virtqueues are set up libvhost-user calls set_watch() to monitor kick
+ * fds. These fds are also handled in the VuServer->ctx AioContext.
+ *
+ * Both vu_client_trip() and kick fd monitoring can be stopped by shutting down
+ * the socket connection. Shutting down the socket connection causes
+ * vu_message_read() to fail since no more data can be received from the socket.
+ * After vu_dispatch() fails, vu_client_trip() calls vu_deinit() to stop
+ * libvhost-user before terminating the coroutine. vu_deinit() calls
+ * remove_watch() to stop monitoring kick fds and this stops virtqueue
+ * processing.
+ *
+ * When vu_client_trip() has finished cleaning up it schedules a BH in the main
+ * loop thread to accept the next client connection.
+ *
+ * When libvhost-user detects an error it calls panic_cb() and sets the
+ * dev->broken flag. Both vu_client_trip() and kick fd processing stop when
+ * the dev->broken flag is set.
+ *
+ * It is possible to switch AioContexts using
+ * vhost_user_server_detach_aio_context() and
+ * vhost_user_server_attach_aio_context(). They stop monitoring fds in the old
+ * AioContext and resume monitoring in the new AioContext. The vu_client_trip()
+ * coroutine remains in a yielded state during the switch. This is made
+ * possible by QIOChannel's support for spurious coroutine re-entry in
+ * qio_channel_yield(). The coroutine will restart I/O when re-entered from the
+ * new AioContext.
+ */
+
+static void vmsg_close_fds(VhostUserMsg *vmsg)
+{
+ int i;
+ for (i = 0; i < vmsg->fd_num; i++) {
+ close(vmsg->fds[i]);
+ }
+}
+
+static void vmsg_unblock_fds(VhostUserMsg *vmsg)
+{
+ int i;
+ for (i = 0; i < vmsg->fd_num; i++) {
+ qemu_set_nonblock(vmsg->fds[i]);
+ }
+}
+
+static void panic_cb(VuDev *vu_dev, const char *buf)
+{
+ error_report("vu_panic: %s", buf);
+}
+
+static bool coroutine_fn
+vu_message_read(VuDev *vu_dev, int conn_fd, VhostUserMsg *vmsg)
+{
+ struct iovec iov = {
+ .iov_base = (char *)vmsg,
+ .iov_len = VHOST_USER_HDR_SIZE,
+ };
+ int rc, read_bytes = 0;
+ Error *local_err = NULL;
+ const size_t max_fds = G_N_ELEMENTS(vmsg->fds);
+ VuServer *server = container_of(vu_dev, VuServer, vu_dev);
+ QIOChannel *ioc = server->ioc;
+
+ vmsg->fd_num = 0;
+ if (!ioc) {
+ error_report_err(local_err);
+ goto fail;
+ }
+
+ assert(qemu_in_coroutine());
+ do {
+ size_t nfds = 0;
+ int *fds = NULL;
+
+ /*
+ * qio_channel_readv_full may have short reads, keeping calling it
+ * until getting VHOST_USER_HDR_SIZE or 0 bytes in total
+ */
+ rc = qio_channel_readv_full(ioc, &iov, 1, &fds, &nfds, &local_err);
+ if (rc < 0) {
+ if (rc == QIO_CHANNEL_ERR_BLOCK) {
+ assert(local_err == NULL);
+ qio_channel_yield(ioc, G_IO_IN);
+ continue;
+ } else {
+ error_report_err(local_err);
+ goto fail;
+ }
+ }
+
+ if (nfds > 0) {
+ if (vmsg->fd_num + nfds > max_fds) {
+ error_report("A maximum of %zu fds are allowed, "
+ "however got %zu fds now",
+ max_fds, vmsg->fd_num + nfds);
+ g_free(fds);
+ goto fail;
+ }
+ memcpy(vmsg->fds + vmsg->fd_num, fds, nfds * sizeof(vmsg->fds[0]));
+ vmsg->fd_num += nfds;
+ g_free(fds);
+ }
+
+ if (rc == 0) { /* socket closed */
+ goto fail;
+ }
+
+ iov.iov_base += rc;
+ iov.iov_len -= rc;
+ read_bytes += rc;
+ } while (read_bytes != VHOST_USER_HDR_SIZE);
+
+ /* qio_channel_readv_full will make socket fds blocking, unblock them */
+ vmsg_unblock_fds(vmsg);
+ if (vmsg->size > sizeof(vmsg->payload)) {
+ error_report("Error: too big message request: %d, "
+ "size: vmsg->size: %u, "
+ "while sizeof(vmsg->payload) = %zu",
+ vmsg->request, vmsg->size, sizeof(vmsg->payload));
+ goto fail;
+ }
+
+ struct iovec iov_payload = {
+ .iov_base = (char *)&vmsg->payload,
+ .iov_len = vmsg->size,
+ };
+ if (vmsg->size) {
+ rc = qio_channel_readv_all_eof(ioc, &iov_payload, 1, &local_err);
+ if (rc != 1) {
+ if (local_err) {
+ error_report_err(local_err);
+ }
+ goto fail;
+ }
+ }
+
+ return true;
+
+fail:
+ vmsg_close_fds(vmsg);
+
+ return false;
+}
+
+static coroutine_fn void vu_client_trip(void *opaque)
+{
+ VuServer *server = opaque;
+ VuDev *vu_dev = &server->vu_dev;
+
+ while (!vu_dev->broken && vu_dispatch(vu_dev)) {
+ /* Keep running */
+ }
+
+ vu_deinit(vu_dev);
+
+ /* vu_deinit() should have called remove_watch() */
+ assert(QTAILQ_EMPTY(&server->vu_fd_watches));
+
+ object_unref(OBJECT(server->sioc));
+ server->sioc = NULL;
+
+ object_unref(OBJECT(server->ioc));
+ server->ioc = NULL;
+
+ server->co_trip = NULL;
+ if (server->restart_listener_bh) {
+ qemu_bh_schedule(server->restart_listener_bh);
+ }
+ aio_wait_kick();
+}
+
+/*
+ * a wrapper for vu_kick_cb
+ *
+ * since aio_dispatch can only pass one user data pointer to the
+ * callback function, pack VuDev and pvt into a struct. Then unpack it
+ * and pass them to vu_kick_cb
+ */
+static void kick_handler(void *opaque)
+{
+ VuFdWatch *vu_fd_watch = opaque;
+ VuDev *vu_dev = vu_fd_watch->vu_dev;
+
+ vu_fd_watch->cb(vu_dev, 0, vu_fd_watch->pvt);
+
+ /* Stop vu_client_trip() if an error occurred in vu_fd_watch->cb() */
+ if (vu_dev->broken) {
+ VuServer *server = container_of(vu_dev, VuServer, vu_dev);
+
+ qio_channel_shutdown(server->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
+ }
+}
+
+static VuFdWatch *find_vu_fd_watch(VuServer *server, int fd)
+{
+
+ VuFdWatch *vu_fd_watch, *next;
+ QTAILQ_FOREACH_SAFE(vu_fd_watch, &server->vu_fd_watches, next, next) {
+ if (vu_fd_watch->fd == fd) {
+ return vu_fd_watch;
+ }
+ }
+ return NULL;
+}
+
+static void
+set_watch(VuDev *vu_dev, int fd, int vu_evt,
+ vu_watch_cb cb, void *pvt)
+{
+
+ VuServer *server = container_of(vu_dev, VuServer, vu_dev);
+ g_assert(vu_dev);
+ g_assert(fd >= 0);
+ g_assert(cb);
+
+ VuFdWatch *vu_fd_watch = find_vu_fd_watch(server, fd);
+
+ if (!vu_fd_watch) {
+ VuFdWatch *vu_fd_watch = g_new0(VuFdWatch, 1);
+
+ QTAILQ_INSERT_TAIL(&server->vu_fd_watches, vu_fd_watch, next);
+
+ vu_fd_watch->fd = fd;
+ vu_fd_watch->cb = cb;
+ qemu_set_nonblock(fd);
+ aio_set_fd_handler(server->ioc->ctx, fd, true, kick_handler,
+ NULL, NULL, vu_fd_watch);
+ vu_fd_watch->vu_dev = vu_dev;
+ vu_fd_watch->pvt = pvt;
+ }
+}
+
+
+static void remove_watch(VuDev *vu_dev, int fd)
+{
+ VuServer *server;
+ g_assert(vu_dev);
+ g_assert(fd >= 0);
+
+ server = container_of(vu_dev, VuServer, vu_dev);
+
+ VuFdWatch *vu_fd_watch = find_vu_fd_watch(server, fd);
+
+ if (!vu_fd_watch) {
+ return;
+ }
+ aio_set_fd_handler(server->ioc->ctx, fd, true, NULL, NULL, NULL, NULL);
+
+ QTAILQ_REMOVE(&server->vu_fd_watches, vu_fd_watch, next);
+ g_free(vu_fd_watch);
+}
+
+
+static void vu_accept(QIONetListener *listener, QIOChannelSocket *sioc,
+ gpointer opaque)
+{
+ VuServer *server = opaque;
+
+ if (server->sioc) {
+ warn_report("Only one vhost-user client is allowed to "
+ "connect the server one time");
+ return;
+ }
+
+ if (!vu_init(&server->vu_dev, server->max_queues, sioc->fd, panic_cb,
+ vu_message_read, set_watch, remove_watch, server->vu_iface)) {
+ error_report("Failed to initialize libvhost-user");
+ return;
+ }
+
+ /*
+ * Unset the callback function for network listener to make another
+ * vhost-user client keeping waiting until this client disconnects
+ */
+ qio_net_listener_set_client_func(server->listener,
+ NULL,
+ NULL,
+ NULL);
+ server->sioc = sioc;
+ /*
+ * Increase the object reference, so sioc will not freed by
+ * qio_net_listener_channel_func which will call object_unref(OBJECT(sioc))
+ */
+ object_ref(OBJECT(server->sioc));
+ qio_channel_set_name(QIO_CHANNEL(sioc), "vhost-user client");
+ server->ioc = QIO_CHANNEL(sioc);
+ object_ref(OBJECT(server->ioc));
+
+ /* TODO vu_message_write() spins if non-blocking! */
+ qio_channel_set_blocking(server->ioc, false, NULL);
+
+ server->co_trip = qemu_coroutine_create(vu_client_trip, server);
+
+ aio_context_acquire(server->ctx);
+ vhost_user_server_attach_aio_context(server, server->ctx);
+ aio_context_release(server->ctx);
+}
+
+void vhost_user_server_stop(VuServer *server)
+{
+ aio_context_acquire(server->ctx);
+
+ qemu_bh_delete(server->restart_listener_bh);
+ server->restart_listener_bh = NULL;
+
+ if (server->sioc) {
+ VuFdWatch *vu_fd_watch;
+
+ QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
+ aio_set_fd_handler(server->ctx, vu_fd_watch->fd, true,
+ NULL, NULL, NULL, vu_fd_watch);
+ }
+
+ qio_channel_shutdown(server->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
+
+ AIO_WAIT_WHILE(server->ctx, server->co_trip);
+ }
+
+ aio_context_release(server->ctx);
+
+ if (server->listener) {
+ qio_net_listener_disconnect(server->listener);
+ object_unref(OBJECT(server->listener));
+ }
+}
+
+/*
+ * Allow the next client to connect to the server. Called from a BH in the main
+ * loop.
+ */
+static void restart_listener_bh(void *opaque)
+{
+ VuServer *server = opaque;
+
+ qio_net_listener_set_client_func(server->listener, vu_accept, server,
+ NULL);
+}
+
+/* Called with ctx acquired */
+void vhost_user_server_attach_aio_context(VuServer *server, AioContext *ctx)
+{
+ VuFdWatch *vu_fd_watch;
+
+ server->ctx = ctx;
+
+ if (!server->sioc) {
+ return;
+ }
+
+ qio_channel_attach_aio_context(server->ioc, ctx);
+
+ QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
+ aio_set_fd_handler(ctx, vu_fd_watch->fd, true, kick_handler, NULL,
+ NULL, vu_fd_watch);
+ }
+
+ aio_co_schedule(ctx, server->co_trip);
+}
+
+/* Called with server->ctx acquired */
+void vhost_user_server_detach_aio_context(VuServer *server)
+{
+ if (server->sioc) {
+ VuFdWatch *vu_fd_watch;
+
+ QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
+ aio_set_fd_handler(server->ctx, vu_fd_watch->fd, true,
+ NULL, NULL, NULL, vu_fd_watch);
+ }
+
+ qio_channel_detach_aio_context(server->ioc);
+ }
+
+ server->ctx = NULL;
+}
+
+bool vhost_user_server_start(VuServer *server,
+ SocketAddress *socket_addr,
+ AioContext *ctx,
+ uint16_t max_queues,
+ const VuDevIface *vu_iface,
+ Error **errp)
+{
+ QEMUBH *bh;
+ QIONetListener *listener;
+
+ if (socket_addr->type != SOCKET_ADDRESS_TYPE_UNIX &&
+ socket_addr->type != SOCKET_ADDRESS_TYPE_FD) {
+ error_setg(errp, "Only socket address types 'unix' and 'fd' are supported");
+ return false;
+ }
+
+ listener = qio_net_listener_new();
+ if (qio_net_listener_open_sync(listener, socket_addr, 1,
+ errp) < 0) {
+ object_unref(OBJECT(listener));
+ return false;
+ }
+
+ bh = qemu_bh_new(restart_listener_bh, server);
+
+ /* zero out unspecified fields */
+ *server = (VuServer) {
+ .listener = listener,
+ .restart_listener_bh = bh,
+ .vu_iface = vu_iface,
+ .max_queues = max_queues,
+ .ctx = ctx,
+ };
+
+ qio_net_listener_set_name(server->listener, "vhost-user-backend-listener");
+
+ qio_net_listener_set_client_func(server->listener,
+ vu_accept,
+ server,
+ NULL);
+
+ QTAILQ_INIT(&server->vu_fd_watches);
+ return true;
+}