diff options
623 files changed, 13253 insertions, 5711 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index d7e9ba2da7..0b67c4826a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -172,7 +172,8 @@ F: hw/unicore32/ X86 M: Paolo Bonzini <pbonzini@redhat.com> M: Richard Henderson <rth@twiddle.net> -S: Odd Fixes +M: Eduardo Habkost <ehabkost@redhat.com> +S: Maintained F: target-i386/ F: hw/i386/ @@ -703,10 +704,13 @@ F: tests/virtio-9p-test.c T: git git://github.com/kvaneesh/QEMU.git virtio-blk -M: Kevin Wolf <kwolf@redhat.com> M: Stefan Hajnoczi <stefanha@redhat.com> +L: qemu-block@nongnu.org S: Supported F: hw/block/virtio-blk.c +F: hw/block/dataplane/* +F: hw/virtio/dataplane/* +T: git git://github.com/stefanha/qemu.git block virtio-ccw M: Cornelia Huck <cornelia.huck@de.ibm.com> @@ -731,12 +735,14 @@ F: backends/rng*.c nvme M: Keith Busch <keith.busch@intel.com> +L: qemu-block@nongnu.org S: Supported F: hw/block/nvme* F: tests/nvme-test.c megasas M: Hannes Reinecke <hare@suse.de> +L: qemu-block@nongnu.org S: Supported F: hw/scsi/megasas.c F: hw/scsi/mfi.h @@ -766,21 +772,26 @@ F: tests/ac97-test.c F: tests/es1370-test.c F: tests/intel-hda-test.c -Block +Block layer core M: Kevin Wolf <kwolf@redhat.com> -M: Stefan Hajnoczi <stefanha@redhat.com> +L: qemu-block@nongnu.org S: Supported -F: async.c -F: aio-*.c F: block* F: block/ F: hw/block/ -F: migration/block* F: qemu-img* F: qemu-io* -F: tests/image-fuzzer/ F: tests/qemu-iotests/ T: git git://repo.or.cz/qemu/kevin.git block + +Block I/O path +M: Stefan Hajnoczi <stefanha@redhat.com> +L: qemu-block@nongnu.org +S: Supported +F: async.c +F: aio-*.c +F: block/io.c +F: migration/block* T: git git://github.com/stefanha/qemu.git block Block Jobs @@ -905,6 +916,15 @@ F: nbd.* F: qemu-nbd.c T: git git://github.com/bonzini/qemu.git nbd-next +NUMA +M: Eduardo Habkost <ehabkost@redhat.com> +S: Maintained +F: numa.c +F: include/sysemu/numa.h +K: numa|NUMA +K: srat|SRAT +T: git git://github.com/ehabkost/qemu.git numa + QAPI M: Luiz Capitulino <lcapitulino@redhat.com> M: Michael Roth <mdroth@linux.vnet.ibm.com> @@ -1094,6 +1114,7 @@ Block drivers ------------- VMDK M: Fam Zheng <famz@redhat.com> +L: qemu-block@nongnu.org S: Supported F: block/vmdk.c @@ -1124,6 +1145,7 @@ T: git git://github.com/codyprime/qemu-kvm-jtc.git block VDI M: Stefan Weil <sw@weilnetz.de> +L: qemu-block@nongnu.org S: Maintained F: block/vdi.c @@ -1131,6 +1153,7 @@ iSCSI M: Ronnie Sahlberg <ronniesahlberg@gmail.com> M: Paolo Bonzini <pbonzini@redhat.com> M: Peter Lieven <pl@kamp.de> +L: qemu-block@nongnu.org S: Supported F: block/iscsi.c @@ -1172,7 +1195,102 @@ S: Supported F: block/gluster.c T: git git://github.com/codyprime/qemu-kvm-jtc.git block +Null Block Driver +M: Fam Zheng <famz@redhat.com> +L: qemu-block@nongnu.org +S: Supported +F: block/null.c + Bootdevice M: Gonglei <arei.gonglei@huawei.com> S: Maintained F: bootdevice.c + +Quorum +M: Alberto Garcia <berto@igalia.com> +S: Supported +F: block/quorum.c +L: qemu-block@nongnu.org + +blkverify +M: Stefan Hajnoczi <stefanha@redhat.com> +L: qemu-block@nongnu.org +S: Supported +F: block/blkverify.c + +bochs +M: Stefan Hajnoczi <stefanha@redhat.com> +L: qemu-block@nongnu.org +S: Supported +F: block/bochs.c + +cloop +M: Stefan Hajnoczi <stefanha@redhat.com> +L: qemu-block@nongnu.org +S: Supported +F: block/cloop.c + +dmg +M: Stefan Hajnoczi <stefanha@redhat.com> +L: qemu-block@nongnu.org +S: Supported +F: block/dmg.c + +parallels +M: Stefan Hajnoczi <stefanha@redhat.com> +L: qemu-block@nongnu.org +S: Supported +F: block/parallels.c + +qed +M: Stefan Hajnoczi <stefanha@redhat.com> +L: qemu-block@nongnu.org +S: Supported +F: block/qed.c + +raw +M: Kevin Wolf <kwolf@redhat.com> +L: qemu-block@nongnu.org +S: Supported +F: block/linux-aio.c +F: block/raw-aio.h +F: block/raw-posix.c +F: block/raw-win32.c +F: block/raw_bsd.c +F: block/win32-aio.c + +qcow2 +M: Kevin Wolf <kwolf@redhat.com> +L: qemu-block@nongnu.org +S: Supported +F: block/qcow2* + +qcow +M: Kevin Wolf <kwolf@redhat.com> +L: qemu-block@nongnu.org +S: Supported +F: block/qcow.c + +blkdebug +M: Kevin Wolf <kwolf@redhat.com> +L: qemu-block@nongnu.org +S: Supported +F: block/blkdebug.c + +vpc +M: Kevin Wolf <kwolf@redhat.com> +L: qemu-block@nongnu.org +S: Supported +F: block/vpc.c + +vvfat +M: Kevin Wolf <kwolf@redhat.com> +L: qemu-block@nongnu.org +S: Supported +F: block/vvfat.c + +Image format fuzzer +M: Stefan Hajnoczi <stefanha@redhat.com> +L: qemu-block@nongnu.org +S: Supported +F: tests/image-fuzzer/ @@ -296,6 +296,7 @@ clean: rm -f fsdev/*.pod rm -rf .libs */.libs rm -f qemu-img-cmds.h + rm -f ui/shader/*-vert.h ui/shader/*-frag.h @# May not be present in GENERATED_HEADERS rm -f trace/generated-tracers-dtrace.dtrace* rm -f trace/generated-tracers-dtrace.h* @@ -441,6 +442,22 @@ cscope: find "$(SRC_PATH)" -name "*.[chsS]" -print | sed 's,^\./,,' > ./cscope.files cscope -b +# opengl shader programs +ui/shader/%-vert.h: $(SRC_PATH)/ui/shader/%.vert $(SRC_PATH)/scripts/shaderinclude.pl + @mkdir -p $(dir $@) + $(call quiet-command,\ + perl $(SRC_PATH)/scripts/shaderinclude.pl $< > $@,\ + " VERT $@") + +ui/shader/%-frag.h: $(SRC_PATH)/ui/shader/%.frag $(SRC_PATH)/scripts/shaderinclude.pl + @mkdir -p $(dir $@) + $(call quiet-command,\ + perl $(SRC_PATH)/scripts/shaderinclude.pl $< > $@,\ + " FRAG $@") + +ui/console-gl.o: $(SRC_PATH)/ui/console-gl.c \ + ui/shader/texture-blit-vert.h ui/shader/texture-blit-frag.h + # documentation MAKEINFO=makeinfo MAKEINFOFLAGS=--no-headers --no-split --number-sections diff --git a/Makefile.target b/Makefile.target index 2262d89354..1083377403 100644 --- a/Makefile.target +++ b/Makefile.target @@ -134,7 +134,7 @@ obj-$(CONFIG_KVM) += kvm-all.o obj-y += memory.o savevm.o cputlb.o obj-y += memory_mapping.o obj-y += dump.o -LIBS+=$(libs_softmmu) +LIBS := $(libs_softmmu) $(LIBS) # xen support obj-$(CONFIG_XEN) += xen-common.o diff --git a/aio-posix.c b/aio-posix.c index cbd4c3438c..4abec38866 100644 --- a/aio-posix.c +++ b/aio-posix.c @@ -24,7 +24,6 @@ struct AioHandler IOHandler *io_read; IOHandler *io_write; int deleted; - int pollfds_idx; void *opaque; QLIST_ENTRY(AioHandler) node; }; @@ -83,7 +82,6 @@ void aio_set_fd_handler(AioContext *ctx, node->io_read = io_read; node->io_write = io_write; node->opaque = opaque; - node->pollfds_idx = -1; node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0); node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0); @@ -186,13 +184,61 @@ bool aio_dispatch(AioContext *ctx) return progress; } +/* These thread-local variables are used only in a small part of aio_poll + * around the call to the poll() system call. In particular they are not + * used while aio_poll is performing callbacks, which makes it much easier + * to think about reentrancy! + * + * Stack-allocated arrays would be perfect but they have size limitations; + * heap allocation is expensive enough that we want to reuse arrays across + * calls to aio_poll(). And because poll() has to be called without holding + * any lock, the arrays cannot be stored in AioContext. Thread-local data + * has none of the disadvantages of these three options. + */ +static __thread GPollFD *pollfds; +static __thread AioHandler **nodes; +static __thread unsigned npfd, nalloc; +static __thread Notifier pollfds_cleanup_notifier; + +static void pollfds_cleanup(Notifier *n, void *unused) +{ + g_assert(npfd == 0); + g_free(pollfds); + g_free(nodes); + nalloc = 0; +} + +static void add_pollfd(AioHandler *node) +{ + if (npfd == nalloc) { + if (nalloc == 0) { + pollfds_cleanup_notifier.notify = pollfds_cleanup; + qemu_thread_atexit_add(&pollfds_cleanup_notifier); + nalloc = 8; + } else { + g_assert(nalloc <= INT_MAX); + nalloc *= 2; + } + pollfds = g_renew(GPollFD, pollfds, nalloc); + nodes = g_renew(AioHandler *, nodes, nalloc); + } + nodes[npfd] = node; + pollfds[npfd] = (GPollFD) { + .fd = node->pfd.fd, + .events = node->pfd.events, + }; + npfd++; +} + bool aio_poll(AioContext *ctx, bool blocking) { AioHandler *node; bool was_dispatching; - int ret; + int i, ret; bool progress; + int64_t timeout; + aio_context_acquire(ctx); was_dispatching = ctx->dispatching; progress = false; @@ -210,39 +256,36 @@ bool aio_poll(AioContext *ctx, bool blocking) ctx->walking_handlers++; - g_array_set_size(ctx->pollfds, 0); + assert(npfd == 0); /* fill pollfds */ QLIST_FOREACH(node, &ctx->aio_handlers, node) { - node->pollfds_idx = -1; if (!node->deleted && node->pfd.events) { - GPollFD pfd = { - .fd = node->pfd.fd, - .events = node->pfd.events, - }; - node->pollfds_idx = ctx->pollfds->len; - g_array_append_val(ctx->pollfds, pfd); + add_pollfd(node); } } - ctx->walking_handlers--; + timeout = blocking ? aio_compute_timeout(ctx) : 0; /* wait until next event */ - ret = qemu_poll_ns((GPollFD *)ctx->pollfds->data, - ctx->pollfds->len, - blocking ? aio_compute_timeout(ctx) : 0); + if (timeout) { + aio_context_release(ctx); + } + ret = qemu_poll_ns((GPollFD *)pollfds, npfd, timeout); + if (timeout) { + aio_context_acquire(ctx); + } /* if we have any readable fds, dispatch event */ if (ret > 0) { - QLIST_FOREACH(node, &ctx->aio_handlers, node) { - if (node->pollfds_idx != -1) { - GPollFD *pfd = &g_array_index(ctx->pollfds, GPollFD, - node->pollfds_idx); - node->pfd.revents = pfd->revents; - } + for (i = 0; i < npfd; i++) { + nodes[i]->pfd.revents = pollfds[i].revents; } } + npfd = 0; + ctx->walking_handlers--; + /* Run dispatch even if there were no readable fds to run timers */ aio_set_dispatching(ctx, true); if (aio_dispatch(ctx)) { @@ -250,5 +293,7 @@ bool aio_poll(AioContext *ctx, bool blocking) } aio_set_dispatching(ctx, was_dispatching); + aio_context_release(ctx); + return progress; } diff --git a/aio-win32.c b/aio-win32.c index e6f4cedf48..233d8f5d79 100644 --- a/aio-win32.c +++ b/aio-win32.c @@ -283,6 +283,7 @@ bool aio_poll(AioContext *ctx, bool blocking) int count; int timeout; + aio_context_acquire(ctx); have_select_revents = aio_prepare(ctx); if (have_select_revents) { blocking = false; @@ -323,7 +324,13 @@ bool aio_poll(AioContext *ctx, bool blocking) timeout = blocking ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0; + if (timeout) { + aio_context_release(ctx); + } ret = WaitForMultipleObjects(count, events, FALSE, timeout); + if (timeout) { + aio_context_acquire(ctx); + } aio_set_dispatching(ctx, true); if (first && aio_bh_poll(ctx)) { @@ -349,5 +356,6 @@ bool aio_poll(AioContext *ctx, bool blocking) progress |= timerlistgroup_run_timers(&ctx->tlg); aio_set_dispatching(ctx, was_dispatching); + aio_context_release(ctx); return progress; } diff --git a/arch_init.c b/arch_init.c index 4c8fceed95..23d3feba44 100644 --- a/arch_init.c +++ b/arch_init.c @@ -24,6 +24,7 @@ #include <stdint.h> #include <stdarg.h> #include <stdlib.h> +#include <zlib.h> #ifndef _WIN32 #include <sys/types.h> #include <sys/mman.h> @@ -127,6 +128,7 @@ static uint64_t bitmap_sync_count; #define RAM_SAVE_FLAG_CONTINUE 0x20 #define RAM_SAVE_FLAG_XBZRLE 0x40 /* 0x80 is reserved in migration.h start with 0x100 next */ +#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100 static struct defconfig_file { const char *filename; @@ -316,6 +318,147 @@ static uint64_t migration_dirty_pages; static uint32_t last_version; static bool ram_bulk_stage; +struct CompressParam { + bool start; + bool done; + QEMUFile *file; + QemuMutex mutex; + QemuCond cond; + RAMBlock *block; + ram_addr_t offset; +}; +typedef struct CompressParam CompressParam; + +struct DecompressParam { + bool start; + QemuMutex mutex; + QemuCond cond; + void *des; + uint8 *compbuf; + int len; +}; +typedef struct DecompressParam DecompressParam; + +static CompressParam *comp_param; +static QemuThread *compress_threads; +/* comp_done_cond is used to wake up the migration thread when + * one of the compression threads has finished the compression. + * comp_done_lock is used to co-work with comp_done_cond. + */ +static QemuMutex *comp_done_lock; +static QemuCond *comp_done_cond; +/* The empty QEMUFileOps will be used by file in CompressParam */ +static const QEMUFileOps empty_ops = { }; + +static bool compression_switch; +static bool quit_comp_thread; +static bool quit_decomp_thread; +static DecompressParam *decomp_param; +static QemuThread *decompress_threads; +static uint8_t *compressed_data_buf; + +static int do_compress_ram_page(CompressParam *param); + +static void *do_data_compress(void *opaque) +{ + CompressParam *param = opaque; + + while (!quit_comp_thread) { + qemu_mutex_lock(¶m->mutex); + /* Re-check the quit_comp_thread in case of + * terminate_compression_threads is called just before + * qemu_mutex_lock(¶m->mutex) and after + * while(!quit_comp_thread), re-check it here can make + * sure the compression thread terminate as expected. + */ + while (!param->start && !quit_comp_thread) { + qemu_cond_wait(¶m->cond, ¶m->mutex); + } + if (!quit_comp_thread) { + do_compress_ram_page(param); + } + param->start = false; + qemu_mutex_unlock(¶m->mutex); + + qemu_mutex_lock(comp_done_lock); + param->done = true; + qemu_cond_signal(comp_done_cond); + qemu_mutex_unlock(comp_done_lock); + } + + return NULL; +} + +static inline void terminate_compression_threads(void) +{ + int idx, thread_count; + + thread_count = migrate_compress_threads(); + quit_comp_thread = true; + for (idx = 0; idx < thread_count; idx++) { + qemu_mutex_lock(&comp_param[idx].mutex); + qemu_cond_signal(&comp_param[idx].cond); + qemu_mutex_unlock(&comp_param[idx].mutex); + } +} + +void migrate_compress_threads_join(void) +{ + int i, thread_count; + + if (!migrate_use_compression()) { + return; + } + terminate_compression_threads(); + thread_count = migrate_compress_threads(); + for (i = 0; i < thread_count; i++) { + qemu_thread_join(compress_threads + i); + qemu_fclose(comp_param[i].file); + qemu_mutex_destroy(&comp_param[i].mutex); + qemu_cond_destroy(&comp_param[i].cond); + } + qemu_mutex_destroy(comp_done_lock); + qemu_cond_destroy(comp_done_cond); + g_free(compress_threads); + g_free(comp_param); + g_free(comp_done_cond); + g_free(comp_done_lock); + compress_threads = NULL; + comp_param = NULL; + comp_done_cond = NULL; + comp_done_lock = NULL; +} + +void migrate_compress_threads_create(void) +{ + int i, thread_count; + + if (!migrate_use_compression()) { + return; + } + quit_comp_thread = false; + compression_switch = true; + thread_count = migrate_compress_threads(); + compress_threads = g_new0(QemuThread, thread_count); + comp_param = g_new0(CompressParam, thread_count); + comp_done_cond = g_new0(QemuCond, 1); + comp_done_lock = g_new0(QemuMutex, 1); + qemu_cond_init(comp_done_cond); + qemu_mutex_init(comp_done_lock); + for (i = 0; i < thread_count; i++) { + /* com_param[i].file is just used as a dummy buffer to save data, set + * it's ops to empty. + */ + comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops); + comp_param[i].done = true; + qemu_mutex_init(&comp_param[i].mutex); + qemu_cond_init(&comp_param[i].cond); + qemu_thread_create(compress_threads + i, "compress", + do_data_compress, comp_param + i, + QEMU_THREAD_JOINABLE); + } +} + /** * save_page_header: Write page header to wire * @@ -520,12 +663,16 @@ static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length) static int64_t start_time; static int64_t bytes_xfer_prev; static int64_t num_dirty_pages_period; +static uint64_t xbzrle_cache_miss_prev; +static uint64_t iterations_prev; static void migration_bitmap_sync_init(void) { start_time = 0; bytes_xfer_prev = 0; num_dirty_pages_period = 0; + xbzrle_cache_miss_prev = 0; + iterations_prev = 0; } /* Called with iothread lock held, to protect ram_list.dirty_memory[] */ @@ -536,8 +683,6 @@ static void migration_bitmap_sync(void) MigrationState *s = migrate_get_current(); int64_t end_time; int64_t bytes_xfer_now; - static uint64_t xbzrle_cache_miss_prev; - static uint64_t iterations_prev; bitmap_sync_count++; @@ -585,7 +730,7 @@ static void migration_bitmap_sync(void) mig_throttle_on = false; } if (migrate_use_xbzrle()) { - if (iterations_prev != 0) { + if (iterations_prev != acct_info.iterations) { acct_info.xbzrle_cache_miss_rate = (double)(acct_info.xbzrle_cache_miss - xbzrle_cache_miss_prev) / @@ -599,8 +744,36 @@ static void migration_bitmap_sync(void) s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE; start_time = end_time; num_dirty_pages_period = 0; - s->dirty_sync_count = bitmap_sync_count; } + s->dirty_sync_count = bitmap_sync_count; +} + +/** + * save_zero_page: Send the zero page to the stream + * + * Returns: Number of pages written. + * + * @f: QEMUFile where to send the data + * @block: block that contains the page we want to send + * @offset: offset inside the block for the page + * @p: pointer to the page + * @bytes_transferred: increase it with the number of transferred bytes + */ +static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset, + uint8_t *p, uint64_t *bytes_transferred) +{ + int pages = -1; + + if (is_zero_range(p, TARGET_PAGE_SIZE)) { + acct_info.dup_pages++; + *bytes_transferred += save_page_header(f, block, + offset | RAM_SAVE_FLAG_COMPRESS); + qemu_put_byte(f, 0); + *bytes_transferred += 1; + pages = 1; + } + + return pages; } /** @@ -651,25 +824,22 @@ static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset, acct_info.dup_pages++; } } - } else if (is_zero_range(p, TARGET_PAGE_SIZE)) { - acct_info.dup_pages++; - *bytes_transferred += save_page_header(f, block, - offset | RAM_SAVE_FLAG_COMPRESS); - qemu_put_byte(f, 0); - *bytes_transferred += 1; - pages = 1; - /* Must let xbzrle know, otherwise a previous (now 0'd) cached - * page would be stale - */ - xbzrle_cache_zero_page(current_addr); - } else if (!ram_bulk_stage && migrate_use_xbzrle()) { - pages = save_xbzrle_page(f, &p, current_addr, block, - offset, last_stage, bytes_transferred); - if (!last_stage) { - /* Can't send this cached data async, since the cache page - * might get updated before it gets to the wire + } else { + pages = save_zero_page(f, block, offset, p, bytes_transferred); + if (pages > 0) { + /* Must let xbzrle know, otherwise a previous (now 0'd) cached + * page would be stale */ - send_async = false; + xbzrle_cache_zero_page(current_addr); + } else if (!ram_bulk_stage && migrate_use_xbzrle()) { + pages = save_xbzrle_page(f, &p, current_addr, block, + offset, last_stage, bytes_transferred); + if (!last_stage) { + /* Can't send this cached data async, since the cache page + * might get updated before it gets to the wire + */ + send_async = false; + } } } @@ -692,6 +862,178 @@ static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset, return pages; } +static int do_compress_ram_page(CompressParam *param) +{ + int bytes_sent, blen; + uint8_t *p; + RAMBlock *block = param->block; + ram_addr_t offset = param->offset; + + p = memory_region_get_ram_ptr(block->mr) + (offset & TARGET_PAGE_MASK); + + bytes_sent = save_page_header(param->file, block, offset | + RAM_SAVE_FLAG_COMPRESS_PAGE); + blen = qemu_put_compression_data(param->file, p, TARGET_PAGE_SIZE, + migrate_compress_level()); + bytes_sent += blen; + + return bytes_sent; +} + +static inline void start_compression(CompressParam *param) +{ + param->done = false; + qemu_mutex_lock(¶m->mutex); + param->start = true; + qemu_cond_signal(¶m->cond); + qemu_mutex_unlock(¶m->mutex); +} + +static inline void start_decompression(DecompressParam *param) +{ + qemu_mutex_lock(¶m->mutex); + param->start = true; + qemu_cond_signal(¶m->cond); + qemu_mutex_unlock(¶m->mutex); +} + +static uint64_t bytes_transferred; + +static void flush_compressed_data(QEMUFile *f) +{ + int idx, len, thread_count; + + if (!migrate_use_compression()) { + return; + } + thread_count = migrate_compress_threads(); + for (idx = 0; idx < thread_count; idx++) { + if (!comp_param[idx].done) { + qemu_mutex_lock(comp_done_lock); + while (!comp_param[idx].done && !quit_comp_thread) { + qemu_cond_wait(comp_done_cond, comp_done_lock); + } + qemu_mutex_unlock(comp_done_lock); + } + if (!quit_comp_thread) { + len = qemu_put_qemu_file(f, comp_param[idx].file); + bytes_transferred += len; + } + } +} + +static inline void set_compress_params(CompressParam *param, RAMBlock *block, + ram_addr_t offset) +{ + param->block = block; + param->offset = offset; +} + +static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block, + ram_addr_t offset, + uint64_t *bytes_transferred) +{ + int idx, thread_count, bytes_xmit = -1, pages = -1; + + thread_count = migrate_compress_threads(); + qemu_mutex_lock(comp_done_lock); + while (true) { + for (idx = 0; idx < thread_count; idx++) { + if (comp_param[idx].done) { + bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file); + set_compress_params(&comp_param[idx], block, offset); + start_compression(&comp_param[idx]); + pages = 1; + acct_info.norm_pages++; + *bytes_transferred += bytes_xmit; + break; + } + } + if (pages > 0) { + break; + } else { + qemu_cond_wait(comp_done_cond, comp_done_lock); + } + } + qemu_mutex_unlock(comp_done_lock); + + return pages; +} + +/** + * ram_save_compressed_page: compress the given page and send it to the stream + * + * Returns: Number of pages written. + * + * @f: QEMUFile where to send the data + * @block: block that contains the page we want to send + * @offset: offset inside the block for the page + * @last_stage: if we are at the completion stage + * @bytes_transferred: increase it with the number of transferred bytes + */ +static int ram_save_compressed_page(QEMUFile *f, RAMBlock *block, + ram_addr_t offset, bool last_stage, + uint64_t *bytes_transferred) +{ + int pages = -1; + uint64_t bytes_xmit; + MemoryRegion *mr = block->mr; + uint8_t *p; + int ret; + + p = memory_region_get_ram_ptr(mr) + offset; + + bytes_xmit = 0; + ret = ram_control_save_page(f, block->offset, + offset, TARGET_PAGE_SIZE, &bytes_xmit); + if (bytes_xmit) { + *bytes_transferred += bytes_xmit; + pages = 1; + } + if (block == last_sent_block) { + offset |= RAM_SAVE_FLAG_CONTINUE; + } + if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { + if (ret != RAM_SAVE_CONTROL_DELAYED) { + if (bytes_xmit > 0) { + acct_info.norm_pages++; + } else if (bytes_xmit == 0) { + acct_info.dup_pages++; + } + } + } else { + /* When starting the process of a new block, the first page of + * the block should be sent out before other pages in the same + * block, and all the pages in last block should have been sent + * out, keeping this order is important, because the 'cont' flag + * is used to avoid resending the block name. + */ + if (block != last_sent_block) { + flush_compressed_data(f); + pages = save_zero_page(f, block, offset, p, bytes_transferred); + if (pages == -1) { + set_compress_params(&comp_param[0], block, offset); + /* Use the qemu thread to compress the data to make sure the + * first page is sent out before other pages + */ + bytes_xmit = do_compress_ram_page(&comp_param[0]); + acct_info.norm_pages++; + qemu_put_qemu_file(f, comp_param[0].file); + *bytes_transferred += bytes_xmit; + pages = 1; + } + } else { + pages = save_zero_page(f, block, offset, p, bytes_transferred); + if (pages == -1) { + pages = compress_page_with_multi_thread(f, block, offset, + bytes_transferred); + } + } + } + + return pages; +} + /** * ram_find_and_save_block: Finds a dirty page and sends it to f * @@ -731,10 +1073,22 @@ static int ram_find_and_save_block(QEMUFile *f, bool last_stage, block = QLIST_FIRST_RCU(&ram_list.blocks); complete_round = true; ram_bulk_stage = false; + if (migrate_use_xbzrle()) { + /* If xbzrle is on, stop using the data compression at this + * point. In theory, xbzrle can do better than compression. + */ + flush_compressed_data(f); + compression_switch = false; + } } } else { - pages = ram_save_page(f, block, offset, last_stage, - bytes_transferred); + if (compression_switch && migrate_use_compression()) { + pages = ram_save_compressed_page(f, block, offset, last_stage, + bytes_transferred); + } else { + pages = ram_save_page(f, block, offset, last_stage, + bytes_transferred); + } /* if page is unmodified, continue to the next */ if (pages > 0) { @@ -750,8 +1104,6 @@ static int ram_find_and_save_block(QEMUFile *f, bool last_stage, return pages; } -static uint64_t bytes_transferred; - void acct_update_position(QEMUFile *f, size_t size, bool zero) { uint64_t pages = size / TARGET_PAGE_SIZE; @@ -965,6 +1317,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) } i++; } + flush_compressed_data(f); rcu_read_unlock(); /* @@ -1006,6 +1359,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) } } + flush_compressed_data(f); ram_control_after_iterate(f, RAM_CONTROL_FINISH); migration_end(); @@ -1113,10 +1467,104 @@ void ram_handle_compressed(void *host, uint8_t ch, uint64_t size) } } +static void *do_data_decompress(void *opaque) +{ + DecompressParam *param = opaque; + unsigned long pagesize; + + while (!quit_decomp_thread) { + qemu_mutex_lock(¶m->mutex); + while (!param->start && !quit_decomp_thread) { + qemu_cond_wait(¶m->cond, ¶m->mutex); + pagesize = TARGET_PAGE_SIZE; + if (!quit_decomp_thread) { + /* uncompress() will return failed in some case, especially + * when the page is dirted when doing the compression, it's + * not a problem because the dirty page will be retransferred + * and uncompress() won't break the data in other pages. + */ + uncompress((Bytef *)param->des, &pagesize, + (const Bytef *)param->compbuf, param->len); + } + param->start = false; + } + qemu_mutex_unlock(¶m->mutex); + } + + return NULL; +} + +void migrate_decompress_threads_create(void) +{ + int i, thread_count; + + thread_count = migrate_decompress_threads(); + decompress_threads = g_new0(QemuThread, thread_count); + decomp_param = g_new0(DecompressParam, thread_count); + compressed_data_buf = g_malloc0(compressBound(TARGET_PAGE_SIZE)); + quit_decomp_thread = false; + for (i = 0; i < thread_count; i++) { + qemu_mutex_init(&decomp_param[i].mutex); + qemu_cond_init(&decomp_param[i].cond); + decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE)); + qemu_thread_create(decompress_threads + i, "decompress", + do_data_decompress, decomp_param + i, + QEMU_THREAD_JOINABLE); + } +} + +void migrate_decompress_threads_join(void) +{ + int i, thread_count; + + quit_decomp_thread = true; + thread_count = migrate_decompress_threads(); + for (i = 0; i < thread_count; i++) { + qemu_mutex_lock(&decomp_param[i].mutex); + qemu_cond_signal(&decomp_param[i].cond); + qemu_mutex_unlock(&decomp_param[i].mutex); + } + for (i = 0; i < thread_count; i++) { + qemu_thread_join(decompress_threads + i); + qemu_mutex_destroy(&decomp_param[i].mutex); + qemu_cond_destroy(&decomp_param[i].cond); + g_free(decomp_param[i].compbuf); + } + g_free(decompress_threads); + g_free(decomp_param); + g_free(compressed_data_buf); + decompress_threads = NULL; + decomp_param = NULL; + compressed_data_buf = NULL; +} + +static void decompress_data_with_multi_threads(uint8_t *compbuf, + void *host, int len) +{ + int idx, thread_count; + + thread_count = migrate_decompress_threads(); + while (true) { + for (idx = 0; idx < thread_count; idx++) { + if (!decomp_param[idx].start) { + memcpy(decomp_param[idx].compbuf, compbuf, len); + decomp_param[idx].des = host; + decomp_param[idx].len = len; + start_decompression(&decomp_param[idx]); + break; + } + } + if (idx < thread_count) { + break; + } + } +} + static int ram_load(QEMUFile *f, void *opaque, int version_id) { int flags = 0, ret = 0; static uint64_t seq_iter; + int len = 0; seq_iter++; @@ -1196,6 +1644,23 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) } qemu_get_buffer(f, host, TARGET_PAGE_SIZE); break; + case RAM_SAVE_FLAG_COMPRESS_PAGE: + host = host_from_stream_offset(f, addr, flags); + if (!host) { + error_report("Invalid RAM offset " RAM_ADDR_FMT, addr); + ret = -EINVAL; + break; + } + + len = qemu_get_be32(f); + if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) { + error_report("Invalid compressed data length: %d", len); + ret = -EINVAL; + break; + } + qemu_get_buffer(f, compressed_data_buf, len); + decompress_data_with_multi_threads(compressed_data_buf, host, len); + break; case RAM_SAVE_FLAG_XBZRLE: host = host_from_stream_offset(f, addr, flags); if (!host) { @@ -230,7 +230,6 @@ aio_ctx_finalize(GSource *source) event_notifier_cleanup(&ctx->notifier); rfifolock_destroy(&ctx->lock); qemu_mutex_destroy(&ctx->bh_lock); - g_array_free(ctx->pollfds, TRUE); timerlistgroup_deinit(&ctx->tlg); } @@ -281,12 +280,6 @@ static void aio_timerlist_notify(void *opaque) aio_notify(opaque); } -static void aio_rfifolock_cb(void *opaque) -{ - /* Kick owner thread in case they are blocked in aio_poll() */ - aio_notify(opaque); -} - AioContext *aio_context_new(Error **errp) { int ret; @@ -302,10 +295,9 @@ AioContext *aio_context_new(Error **errp) aio_set_event_notifier(ctx, &ctx->notifier, (EventNotifierHandler *) event_notifier_test_and_clear); - ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD)); ctx->thread_pool = NULL; qemu_mutex_init(&ctx->bh_lock); - rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx); + rfifolock_init(&ctx->lock, NULL, NULL); timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); return ctx; diff --git a/backends/hostmem-file.c b/backends/hostmem-file.c index 51799943f1..4b55361010 100644 --- a/backends/hostmem-file.c +++ b/backends/hostmem-file.c @@ -43,7 +43,7 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) return; } if (!fb->mem_path) { - error_setg(errp, "mem_path property not set"); + error_setg(errp, "mem-path property not set"); return; } #ifndef CONFIG_LINUX diff --git a/backends/tpm.c b/backends/tpm.c index 4efe36736e..36c5d46f0a 100644 --- a/backends/tpm.c +++ b/backends/tpm.c @@ -165,17 +165,6 @@ void tpm_backend_thread_end(TPMBackendThread *tbt) } } -void tpm_backend_thread_tpm_reset(TPMBackendThread *tbt, - GFunc func, gpointer user_data) -{ - if (!tbt->pool) { - tpm_backend_thread_create(tbt, func, user_data); - } else { - g_thread_pool_push(tbt->pool, (gpointer)TPM_BACKEND_CMD_TPM_RESET, - NULL); - } -} - static const TypeInfo tpm_backend_info = { .name = TYPE_TPM_BACKEND, .parent = TYPE_OBJECT, @@ -51,43 +51,25 @@ #include <windows.h> #endif +/** + * A BdrvDirtyBitmap can be in three possible states: + * (1) successor is NULL and disabled is false: full r/w mode + * (2) successor is NULL and disabled is true: read only mode ("disabled") + * (3) successor is set: frozen mode. + * A frozen bitmap cannot be renamed, deleted, anonymized, cleared, set, + * or enabled. A frozen bitmap can only abdicate() or reclaim(). + */ struct BdrvDirtyBitmap { - HBitmap *bitmap; + HBitmap *bitmap; /* Dirty sector bitmap implementation */ + BdrvDirtyBitmap *successor; /* Anonymous child; implies frozen status */ + char *name; /* Optional non-empty unique ID */ + int64_t size; /* Size of the bitmap (Number of sectors) */ + bool disabled; /* Bitmap is read-only */ QLIST_ENTRY(BdrvDirtyBitmap) list; }; #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ -static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, - int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, - BlockCompletionFunc *cb, void *opaque); -static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, - int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, - BlockCompletionFunc *cb, void *opaque); -static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, - QEMUIOVector *iov); -static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, - QEMUIOVector *iov); -static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, - int64_t offset, unsigned int bytes, QEMUIOVector *qiov, - BdrvRequestFlags flags); -static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, - int64_t offset, unsigned int bytes, QEMUIOVector *qiov, - BdrvRequestFlags flags); -static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, - int64_t sector_num, - QEMUIOVector *qiov, - int nb_sectors, - BdrvRequestFlags flags, - BlockCompletionFunc *cb, - void *opaque, - bool is_write); -static void coroutine_fn bdrv_co_do_rw(void *opaque); -static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, BdrvRequestFlags flags); - static QTAILQ_HEAD(, BlockDriverState) bdrv_states = QTAILQ_HEAD_INITIALIZER(bdrv_states); @@ -97,10 +79,7 @@ static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states = static QLIST_HEAD(, BlockDriver) bdrv_drivers = QLIST_HEAD_INITIALIZER(bdrv_drivers); -static void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, - int nr_sectors); -static void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, - int nr_sectors); +static void bdrv_dirty_bitmap_truncate(BlockDriverState *bs); /* If non-zero, use only whitelisted block drivers */ static int use_bdrv_whitelist; @@ -124,104 +103,6 @@ int is_windows_drive(const char *filename) } #endif -/* throttling disk I/O limits */ -void bdrv_set_io_limits(BlockDriverState *bs, - ThrottleConfig *cfg) -{ - int i; - - throttle_config(&bs->throttle_state, cfg); - - for (i = 0; i < 2; i++) { - qemu_co_enter_next(&bs->throttled_reqs[i]); - } -} - -/* this function drain all the throttled IOs */ -static bool bdrv_start_throttled_reqs(BlockDriverState *bs) -{ - bool drained = false; - bool enabled = bs->io_limits_enabled; - int i; - - bs->io_limits_enabled = false; - - for (i = 0; i < 2; i++) { - while (qemu_co_enter_next(&bs->throttled_reqs[i])) { - drained = true; - } - } - - bs->io_limits_enabled = enabled; - - return drained; -} - -void bdrv_io_limits_disable(BlockDriverState *bs) -{ - bs->io_limits_enabled = false; - - bdrv_start_throttled_reqs(bs); - - throttle_destroy(&bs->throttle_state); -} - -static void bdrv_throttle_read_timer_cb(void *opaque) -{ - BlockDriverState *bs = opaque; - qemu_co_enter_next(&bs->throttled_reqs[0]); -} - -static void bdrv_throttle_write_timer_cb(void *opaque) -{ - BlockDriverState *bs = opaque; - qemu_co_enter_next(&bs->throttled_reqs[1]); -} - -/* should be called before bdrv_set_io_limits if a limit is set */ -void bdrv_io_limits_enable(BlockDriverState *bs) -{ - assert(!bs->io_limits_enabled); - throttle_init(&bs->throttle_state, - bdrv_get_aio_context(bs), - QEMU_CLOCK_VIRTUAL, - bdrv_throttle_read_timer_cb, - bdrv_throttle_write_timer_cb, - bs); - bs->io_limits_enabled = true; -} - -/* This function makes an IO wait if needed - * - * @nb_sectors: the number of sectors of the IO - * @is_write: is the IO a write - */ -static void bdrv_io_limits_intercept(BlockDriverState *bs, - unsigned int bytes, - bool is_write) -{ - /* does this io must wait */ - bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write); - - /* if must wait or any request of this type throttled queue the IO */ - if (must_wait || - !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) { - qemu_co_queue_wait(&bs->throttled_reqs[is_write]); - } - - /* the IO will be executed, do the accounting */ - throttle_account(&bs->throttle_state, is_write, bytes); - - - /* if the next request must wait -> do nothing */ - if (throttle_schedule_timer(&bs->throttle_state, is_write)) { - return; - } - - /* else queue next request for execution */ - qemu_co_queue_next(&bs->throttled_reqs[is_write]); -} - size_t bdrv_opt_mem_align(BlockDriverState *bs) { if (!bs || !bs->drv) { @@ -335,20 +216,7 @@ void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz, void bdrv_register(BlockDriver *bdrv) { - /* Block drivers without coroutine functions need emulation */ - if (!bdrv->bdrv_co_readv) { - bdrv->bdrv_co_readv = bdrv_co_readv_em; - bdrv->bdrv_co_writev = bdrv_co_writev_em; - - /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if - * the block driver lacks aio we need to emulate that too. - */ - if (!bdrv->bdrv_aio_readv) { - /* add AIO emulation layer */ - bdrv->bdrv_aio_readv = bdrv_aio_readv_em; - bdrv->bdrv_aio_writev = bdrv_aio_writev_em; - } - } + bdrv_setup_io_funcs(bdrv); QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list); } @@ -520,54 +388,6 @@ int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp) return ret; } -void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) -{ - BlockDriver *drv = bs->drv; - Error *local_err = NULL; - - memset(&bs->bl, 0, sizeof(bs->bl)); - - if (!drv) { - return; - } - - /* Take some limits from the children as a default */ - if (bs->file) { - bdrv_refresh_limits(bs->file, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length; - bs->bl.max_transfer_length = bs->file->bl.max_transfer_length; - bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment; - } else { - bs->bl.opt_mem_alignment = 512; - } - - if (bs->backing_hd) { - bdrv_refresh_limits(bs->backing_hd, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - bs->bl.opt_transfer_length = - MAX(bs->bl.opt_transfer_length, - bs->backing_hd->bl.opt_transfer_length); - bs->bl.max_transfer_length = - MIN_NON_ZERO(bs->bl.max_transfer_length, - bs->backing_hd->bl.max_transfer_length); - bs->bl.opt_mem_alignment = - MAX(bs->bl.opt_mem_alignment, - bs->backing_hd->bl.opt_mem_alignment); - } - - /* Then let the driver override it */ - if (drv->bdrv_refresh_limits) { - drv->bdrv_refresh_limits(bs, errp); - } -} - /** * Try to get @bs's logical and physical block size. * On success, store them in @bsz struct and return 0. @@ -841,22 +661,6 @@ int bdrv_parse_cache_flags(const char *mode, int *flags) return 0; } -/** - * The copy-on-read flag is actually a reference count so multiple users may - * use the feature without worrying about clobbering its previous state. - * Copy-on-read stays enabled until all users have called to disable it. - */ -void bdrv_enable_copy_on_read(BlockDriverState *bs) -{ - bs->copy_on_read++; -} - -void bdrv_disable_copy_on_read(BlockDriverState *bs) -{ - assert(bs->copy_on_read > 0); - bs->copy_on_read--; -} - /* * Returns the flags that a temporary snapshot should get, based on the * originally requested flags (the originally requested image will have flags @@ -1224,8 +1028,8 @@ void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd) bdrv_op_unblock_all(bs->backing_hd, bs->backing_blocker); } else if (backing_hd) { error_setg(&bs->backing_blocker, - "device is used as backing hd of '%s'", - bdrv_get_device_name(bs)); + "node is used as backing hd of '%s'", + bdrv_get_device_or_node_name(bs)); } bs->backing_hd = backing_hd; @@ -1812,8 +1616,8 @@ int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue, * to r/w */ if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) && reopen_state->flags & BDRV_O_RDWR) { - error_set(errp, QERR_DEVICE_IS_READ_ONLY, - bdrv_get_device_name(reopen_state->bs)); + error_setg(errp, "Node '%s' is read only", + bdrv_get_device_or_node_name(reopen_state->bs)); goto error; } @@ -1839,9 +1643,9 @@ int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue, } else { /* It is currently mandatory to have a bdrv_reopen_prepare() * handler for each supported drv. */ - error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, - drv->format_name, bdrv_get_device_name(reopen_state->bs), - "reopening of file"); + error_setg(errp, "Block format '%s' used by node '%s' " + "does not support reopening files", drv->format_name, + bdrv_get_device_or_node_name(reopen_state->bs)); ret = -1; goto error; } @@ -1966,86 +1770,6 @@ void bdrv_close_all(void) } } -/* Check if any requests are in-flight (including throttled requests) */ -static bool bdrv_requests_pending(BlockDriverState *bs) -{ - if (!QLIST_EMPTY(&bs->tracked_requests)) { - return true; - } - if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) { - return true; - } - if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) { - return true; - } - if (bs->file && bdrv_requests_pending(bs->file)) { - return true; - } - if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) { - return true; - } - return false; -} - -static bool bdrv_drain_one(BlockDriverState *bs) -{ - bool bs_busy; - - bdrv_flush_io_queue(bs); - bdrv_start_throttled_reqs(bs); - bs_busy = bdrv_requests_pending(bs); - bs_busy |= aio_poll(bdrv_get_aio_context(bs), bs_busy); - return bs_busy; -} - -/* - * Wait for pending requests to complete on a single BlockDriverState subtree - * - * See the warning in bdrv_drain_all(). This function can only be called if - * you are sure nothing can generate I/O because you have op blockers - * installed. - * - * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState - * AioContext. - */ -void bdrv_drain(BlockDriverState *bs) -{ - while (bdrv_drain_one(bs)) { - /* Keep iterating */ - } -} - -/* - * Wait for pending requests to complete across all BlockDriverStates - * - * This function does not flush data to disk, use bdrv_flush_all() for that - * after calling this function. - * - * Note that completion of an asynchronous I/O operation can trigger any - * number of other I/O operations on other devices---for example a coroutine - * can be arbitrarily complex and a constant flow of I/O can come until the - * coroutine is complete. Because of this, it is not possible to have a - * function to drain a single device's I/O queue. - */ -void bdrv_drain_all(void) -{ - /* Always run first iteration so any pending completion BHs run */ - bool busy = true; - BlockDriverState *bs; - - while (busy) { - busy = false; - - QTAILQ_FOREACH(bs, &bdrv_states, device_list) { - AioContext *aio_context = bdrv_get_aio_context(bs); - - aio_context_acquire(aio_context); - busy |= bdrv_drain_one(bs); - aio_context_release(aio_context); - } - } -} - /* make a BlockDriverState anonymous by removing from bdrv_state and * graph_bdrv_state list. Also, NULL terminate the device_name to prevent double remove */ @@ -2367,152 +2091,6 @@ int bdrv_commit_all(void) return 0; } -/** - * Remove an active request from the tracked requests list - * - * This function should be called when a tracked request is completing. - */ -static void tracked_request_end(BdrvTrackedRequest *req) -{ - if (req->serialising) { - req->bs->serialising_in_flight--; - } - - QLIST_REMOVE(req, list); - qemu_co_queue_restart_all(&req->wait_queue); -} - -/** - * Add an active request to the tracked requests list - */ -static void tracked_request_begin(BdrvTrackedRequest *req, - BlockDriverState *bs, - int64_t offset, - unsigned int bytes, bool is_write) -{ - *req = (BdrvTrackedRequest){ - .bs = bs, - .offset = offset, - .bytes = bytes, - .is_write = is_write, - .co = qemu_coroutine_self(), - .serialising = false, - .overlap_offset = offset, - .overlap_bytes = bytes, - }; - - qemu_co_queue_init(&req->wait_queue); - - QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); -} - -static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) -{ - int64_t overlap_offset = req->offset & ~(align - 1); - unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align) - - overlap_offset; - - if (!req->serialising) { - req->bs->serialising_in_flight++; - req->serialising = true; - } - - req->overlap_offset = MIN(req->overlap_offset, overlap_offset); - req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); -} - -/** - * Round a region to cluster boundaries - */ -void bdrv_round_to_clusters(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, - int64_t *cluster_sector_num, - int *cluster_nb_sectors) -{ - BlockDriverInfo bdi; - - if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { - *cluster_sector_num = sector_num; - *cluster_nb_sectors = nb_sectors; - } else { - int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE; - *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c); - *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num + - nb_sectors, c); - } -} - -static int bdrv_get_cluster_size(BlockDriverState *bs) -{ - BlockDriverInfo bdi; - int ret; - - ret = bdrv_get_info(bs, &bdi); - if (ret < 0 || bdi.cluster_size == 0) { - return bs->request_alignment; - } else { - return bdi.cluster_size; - } -} - -static bool tracked_request_overlaps(BdrvTrackedRequest *req, - int64_t offset, unsigned int bytes) -{ - /* aaaa bbbb */ - if (offset >= req->overlap_offset + req->overlap_bytes) { - return false; - } - /* bbbb aaaa */ - if (req->overlap_offset >= offset + bytes) { - return false; - } - return true; -} - -static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) -{ - BlockDriverState *bs = self->bs; - BdrvTrackedRequest *req; - bool retry; - bool waited = false; - - if (!bs->serialising_in_flight) { - return false; - } - - do { - retry = false; - QLIST_FOREACH(req, &bs->tracked_requests, list) { - if (req == self || (!req->serialising && !self->serialising)) { - continue; - } - if (tracked_request_overlaps(req, self->overlap_offset, - self->overlap_bytes)) - { - /* Hitting this means there was a reentrant request, for - * example, a block driver issuing nested requests. This must - * never happen since it means deadlock. - */ - assert(qemu_coroutine_self() != req->co); - - /* If the request is already (indirectly) waiting for us, or - * will wait for us as soon as it wakes up, then just go on - * (instead of producing a deadlock in the former case). */ - if (!req->waiting_for) { - self->waiting_for = req; - qemu_co_queue_wait(&req->wait_queue); - self->waiting_for = NULL; - retry = true; - waited = true; - break; - } - } - } - } while (retry); - - return waited; -} - /* * Return values: * 0 - success @@ -2681,879 +2259,6 @@ exit: return ret; } - -static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, - size_t size) -{ - if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) { - return -EIO; - } - - if (!bdrv_is_inserted(bs)) { - return -ENOMEDIUM; - } - - if (offset < 0) { - return -EIO; - } - - return 0; -} - -static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num, - int nb_sectors) -{ - if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { - return -EIO; - } - - return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE, - nb_sectors * BDRV_SECTOR_SIZE); -} - -typedef struct RwCo { - BlockDriverState *bs; - int64_t offset; - QEMUIOVector *qiov; - bool is_write; - int ret; - BdrvRequestFlags flags; -} RwCo; - -static void coroutine_fn bdrv_rw_co_entry(void *opaque) -{ - RwCo *rwco = opaque; - - if (!rwco->is_write) { - rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset, - rwco->qiov->size, rwco->qiov, - rwco->flags); - } else { - rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset, - rwco->qiov->size, rwco->qiov, - rwco->flags); - } -} - -/* - * Process a vectored synchronous request using coroutines - */ -static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset, - QEMUIOVector *qiov, bool is_write, - BdrvRequestFlags flags) -{ - Coroutine *co; - RwCo rwco = { - .bs = bs, - .offset = offset, - .qiov = qiov, - .is_write = is_write, - .ret = NOT_DONE, - .flags = flags, - }; - - /** - * In sync call context, when the vcpu is blocked, this throttling timer - * will not fire; so the I/O throttling function has to be disabled here - * if it has been enabled. - */ - if (bs->io_limits_enabled) { - fprintf(stderr, "Disabling I/O throttling on '%s' due " - "to synchronous I/O.\n", bdrv_get_device_name(bs)); - bdrv_io_limits_disable(bs); - } - - if (qemu_in_coroutine()) { - /* Fast-path if already in coroutine context */ - bdrv_rw_co_entry(&rwco); - } else { - AioContext *aio_context = bdrv_get_aio_context(bs); - - co = qemu_coroutine_create(bdrv_rw_co_entry); - qemu_coroutine_enter(co, &rwco); - while (rwco.ret == NOT_DONE) { - aio_poll(aio_context, true); - } - } - return rwco.ret; -} - -/* - * Process a synchronous request using coroutines - */ -static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, - int nb_sectors, bool is_write, BdrvRequestFlags flags) -{ - QEMUIOVector qiov; - struct iovec iov = { - .iov_base = (void *)buf, - .iov_len = nb_sectors * BDRV_SECTOR_SIZE, - }; - - if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { - return -EINVAL; - } - - qemu_iovec_init_external(&qiov, &iov, 1); - return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS, - &qiov, is_write, flags); -} - -/* return < 0 if error. See bdrv_write() for the return codes */ -int bdrv_read(BlockDriverState *bs, int64_t sector_num, - uint8_t *buf, int nb_sectors) -{ - return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0); -} - -/* Just like bdrv_read(), but with I/O throttling temporarily disabled */ -int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num, - uint8_t *buf, int nb_sectors) -{ - bool enabled; - int ret; - - enabled = bs->io_limits_enabled; - bs->io_limits_enabled = false; - ret = bdrv_read(bs, sector_num, buf, nb_sectors); - bs->io_limits_enabled = enabled; - return ret; -} - -/* Return < 0 if error. Important errors are: - -EIO generic I/O error (may happen for all errors) - -ENOMEDIUM No media inserted. - -EINVAL Invalid sector number or nb_sectors - -EACCES Trying to write a read-only device -*/ -int bdrv_write(BlockDriverState *bs, int64_t sector_num, - const uint8_t *buf, int nb_sectors) -{ - return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0); -} - -int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num, - int nb_sectors, BdrvRequestFlags flags) -{ - return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true, - BDRV_REQ_ZERO_WRITE | flags); -} - -/* - * Completely zero out a block device with the help of bdrv_write_zeroes. - * The operation is sped up by checking the block status and only writing - * zeroes to the device if they currently do not return zeroes. Optional - * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP). - * - * Returns < 0 on error, 0 on success. For error codes see bdrv_write(). - */ -int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags) -{ - int64_t target_sectors, ret, nb_sectors, sector_num = 0; - int n; - - target_sectors = bdrv_nb_sectors(bs); - if (target_sectors < 0) { - return target_sectors; - } - - for (;;) { - nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS); - if (nb_sectors <= 0) { - return 0; - } - ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n); - if (ret < 0) { - error_report("error getting block status at sector %" PRId64 ": %s", - sector_num, strerror(-ret)); - return ret; - } - if (ret & BDRV_BLOCK_ZERO) { - sector_num += n; - continue; - } - ret = bdrv_write_zeroes(bs, sector_num, n, flags); - if (ret < 0) { - error_report("error writing zeroes at sector %" PRId64 ": %s", - sector_num, strerror(-ret)); - return ret; - } - sector_num += n; - } -} - -int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes) -{ - QEMUIOVector qiov; - struct iovec iov = { - .iov_base = (void *)buf, - .iov_len = bytes, - }; - int ret; - - if (bytes < 0) { - return -EINVAL; - } - - qemu_iovec_init_external(&qiov, &iov, 1); - ret = bdrv_prwv_co(bs, offset, &qiov, false, 0); - if (ret < 0) { - return ret; - } - - return bytes; -} - -int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov) -{ - int ret; - - ret = bdrv_prwv_co(bs, offset, qiov, true, 0); - if (ret < 0) { - return ret; - } - - return qiov->size; -} - -int bdrv_pwrite(BlockDriverState *bs, int64_t offset, - const void *buf, int bytes) -{ - QEMUIOVector qiov; - struct iovec iov = { - .iov_base = (void *) buf, - .iov_len = bytes, - }; - - if (bytes < 0) { - return -EINVAL; - } - - qemu_iovec_init_external(&qiov, &iov, 1); - return bdrv_pwritev(bs, offset, &qiov); -} - -/* - * Writes to the file and ensures that no writes are reordered across this - * request (acts as a barrier) - * - * Returns 0 on success, -errno in error cases. - */ -int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset, - const void *buf, int count) -{ - int ret; - - ret = bdrv_pwrite(bs, offset, buf, count); - if (ret < 0) { - return ret; - } - - /* No flush needed for cache modes that already do it */ - if (bs->enable_write_cache) { - bdrv_flush(bs); - } - - return 0; -} - -static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) -{ - /* Perform I/O through a temporary buffer so that users who scribble over - * their read buffer while the operation is in progress do not end up - * modifying the image file. This is critical for zero-copy guest I/O - * where anything might happen inside guest memory. - */ - void *bounce_buffer; - - BlockDriver *drv = bs->drv; - struct iovec iov; - QEMUIOVector bounce_qiov; - int64_t cluster_sector_num; - int cluster_nb_sectors; - size_t skip_bytes; - int ret; - - /* Cover entire cluster so no additional backing file I/O is required when - * allocating cluster in the image file. - */ - bdrv_round_to_clusters(bs, sector_num, nb_sectors, - &cluster_sector_num, &cluster_nb_sectors); - - trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, - cluster_sector_num, cluster_nb_sectors); - - iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE; - iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len); - if (bounce_buffer == NULL) { - ret = -ENOMEM; - goto err; - } - - qemu_iovec_init_external(&bounce_qiov, &iov, 1); - - ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors, - &bounce_qiov); - if (ret < 0) { - goto err; - } - - if (drv->bdrv_co_write_zeroes && - buffer_is_zero(bounce_buffer, iov.iov_len)) { - ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num, - cluster_nb_sectors, 0); - } else { - /* This does not change the data on the disk, it is not necessary - * to flush even in cache=writethrough mode. - */ - ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors, - &bounce_qiov); - } - - if (ret < 0) { - /* It might be okay to ignore write errors for guest requests. If this - * is a deliberate copy-on-read then we don't want to ignore the error. - * Simply report it in all cases. - */ - goto err; - } - - skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE; - qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, - nb_sectors * BDRV_SECTOR_SIZE); - -err: - qemu_vfree(bounce_buffer); - return ret; -} - -/* - * Forwards an already correctly aligned request to the BlockDriver. This - * handles copy on read and zeroing after EOF; any other features must be - * implemented by the caller. - */ -static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs, - BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, - int64_t align, QEMUIOVector *qiov, int flags) -{ - BlockDriver *drv = bs->drv; - int ret; - - int64_t sector_num = offset >> BDRV_SECTOR_BITS; - unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; - - assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); - assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); - assert(!qiov || bytes == qiov->size); - - /* Handle Copy on Read and associated serialisation */ - if (flags & BDRV_REQ_COPY_ON_READ) { - /* If we touch the same cluster it counts as an overlap. This - * guarantees that allocating writes will be serialized and not race - * with each other for the same cluster. For example, in copy-on-read - * it ensures that the CoR read and write operations are atomic and - * guest writes cannot interleave between them. */ - mark_request_serialising(req, bdrv_get_cluster_size(bs)); - } - - wait_serialising_requests(req); - - if (flags & BDRV_REQ_COPY_ON_READ) { - int pnum; - - ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum); - if (ret < 0) { - goto out; - } - - if (!ret || pnum != nb_sectors) { - ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov); - goto out; - } - } - - /* Forward the request to the BlockDriver */ - if (!bs->zero_beyond_eof) { - ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); - } else { - /* Read zeros after EOF */ - int64_t total_sectors, max_nb_sectors; - - total_sectors = bdrv_nb_sectors(bs); - if (total_sectors < 0) { - ret = total_sectors; - goto out; - } - - max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num), - align >> BDRV_SECTOR_BITS); - if (nb_sectors < max_nb_sectors) { - ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); - } else if (max_nb_sectors > 0) { - QEMUIOVector local_qiov; - - qemu_iovec_init(&local_qiov, qiov->niov); - qemu_iovec_concat(&local_qiov, qiov, 0, - max_nb_sectors * BDRV_SECTOR_SIZE); - - ret = drv->bdrv_co_readv(bs, sector_num, max_nb_sectors, - &local_qiov); - - qemu_iovec_destroy(&local_qiov); - } else { - ret = 0; - } - - /* Reading beyond end of file is supposed to produce zeroes */ - if (ret == 0 && total_sectors < sector_num + nb_sectors) { - uint64_t offset = MAX(0, total_sectors - sector_num); - uint64_t bytes = (sector_num + nb_sectors - offset) * - BDRV_SECTOR_SIZE; - qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes); - } - } - -out: - return ret; -} - -static inline uint64_t bdrv_get_align(BlockDriverState *bs) -{ - /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ - return MAX(BDRV_SECTOR_SIZE, bs->request_alignment); -} - -static inline bool bdrv_req_is_aligned(BlockDriverState *bs, - int64_t offset, size_t bytes) -{ - int64_t align = bdrv_get_align(bs); - return !(offset & (align - 1) || (bytes & (align - 1))); -} - -/* - * Handle a read request in coroutine context - */ -static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, - int64_t offset, unsigned int bytes, QEMUIOVector *qiov, - BdrvRequestFlags flags) -{ - BlockDriver *drv = bs->drv; - BdrvTrackedRequest req; - - uint64_t align = bdrv_get_align(bs); - uint8_t *head_buf = NULL; - uint8_t *tail_buf = NULL; - QEMUIOVector local_qiov; - bool use_local_qiov = false; - int ret; - - if (!drv) { - return -ENOMEDIUM; - } - - ret = bdrv_check_byte_request(bs, offset, bytes); - if (ret < 0) { - return ret; - } - - if (bs->copy_on_read) { - flags |= BDRV_REQ_COPY_ON_READ; - } - - /* throttling disk I/O */ - if (bs->io_limits_enabled) { - bdrv_io_limits_intercept(bs, bytes, false); - } - - /* Align read if necessary by padding qiov */ - if (offset & (align - 1)) { - head_buf = qemu_blockalign(bs, align); - qemu_iovec_init(&local_qiov, qiov->niov + 2); - qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); - qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); - use_local_qiov = true; - - bytes += offset & (align - 1); - offset = offset & ~(align - 1); - } - - if ((offset + bytes) & (align - 1)) { - if (!use_local_qiov) { - qemu_iovec_init(&local_qiov, qiov->niov + 1); - qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); - use_local_qiov = true; - } - tail_buf = qemu_blockalign(bs, align); - qemu_iovec_add(&local_qiov, tail_buf, - align - ((offset + bytes) & (align - 1))); - - bytes = ROUND_UP(bytes, align); - } - - tracked_request_begin(&req, bs, offset, bytes, false); - ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align, - use_local_qiov ? &local_qiov : qiov, - flags); - tracked_request_end(&req); - - if (use_local_qiov) { - qemu_iovec_destroy(&local_qiov); - qemu_vfree(head_buf); - qemu_vfree(tail_buf); - } - - return ret; -} - -static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, - BdrvRequestFlags flags) -{ - if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { - return -EINVAL; - } - - return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS, - nb_sectors << BDRV_SECTOR_BITS, qiov, flags); -} - -int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num, - int nb_sectors, QEMUIOVector *qiov) -{ - trace_bdrv_co_readv(bs, sector_num, nb_sectors); - - return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0); -} - -int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) -{ - trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors); - - return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, - BDRV_REQ_COPY_ON_READ); -} - -#define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768 - -static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) -{ - BlockDriver *drv = bs->drv; - QEMUIOVector qiov; - struct iovec iov = {0}; - int ret = 0; - - int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_write_zeroes, - BDRV_REQUEST_MAX_SECTORS); - - while (nb_sectors > 0 && !ret) { - int num = nb_sectors; - - /* Align request. Block drivers can expect the "bulk" of the request - * to be aligned. - */ - if (bs->bl.write_zeroes_alignment - && num > bs->bl.write_zeroes_alignment) { - if (sector_num % bs->bl.write_zeroes_alignment != 0) { - /* Make a small request up to the first aligned sector. */ - num = bs->bl.write_zeroes_alignment; - num -= sector_num % bs->bl.write_zeroes_alignment; - } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) { - /* Shorten the request to the last aligned sector. num cannot - * underflow because num > bs->bl.write_zeroes_alignment. - */ - num -= (sector_num + num) % bs->bl.write_zeroes_alignment; - } - } - - /* limit request size */ - if (num > max_write_zeroes) { - num = max_write_zeroes; - } - - ret = -ENOTSUP; - /* First try the efficient write zeroes operation */ - if (drv->bdrv_co_write_zeroes) { - ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags); - } - - if (ret == -ENOTSUP) { - /* Fall back to bounce buffer if write zeroes is unsupported */ - int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length, - MAX_WRITE_ZEROES_BOUNCE_BUFFER); - num = MIN(num, max_xfer_len); - iov.iov_len = num * BDRV_SECTOR_SIZE; - if (iov.iov_base == NULL) { - iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE); - if (iov.iov_base == NULL) { - ret = -ENOMEM; - goto fail; - } - memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE); - } - qemu_iovec_init_external(&qiov, &iov, 1); - - ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov); - - /* Keep bounce buffer around if it is big enough for all - * all future requests. - */ - if (num < max_xfer_len) { - qemu_vfree(iov.iov_base); - iov.iov_base = NULL; - } - } - - sector_num += num; - nb_sectors -= num; - } - -fail: - qemu_vfree(iov.iov_base); - return ret; -} - -/* - * Forwards an already correctly aligned write request to the BlockDriver. - */ -static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs, - BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, - QEMUIOVector *qiov, int flags) -{ - BlockDriver *drv = bs->drv; - bool waited; - int ret; - - int64_t sector_num = offset >> BDRV_SECTOR_BITS; - unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; - - assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); - assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); - assert(!qiov || bytes == qiov->size); - - waited = wait_serialising_requests(req); - assert(!waited || !req->serialising); - assert(req->overlap_offset <= offset); - assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); - - ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req); - - if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && - !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes && - qemu_iovec_is_zero(qiov)) { - flags |= BDRV_REQ_ZERO_WRITE; - if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { - flags |= BDRV_REQ_MAY_UNMAP; - } - } - - if (ret < 0) { - /* Do nothing, write notifier decided to fail this request */ - } else if (flags & BDRV_REQ_ZERO_WRITE) { - BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO); - ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags); - } else { - BLKDBG_EVENT(bs, BLKDBG_PWRITEV); - ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov); - } - BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE); - - if (ret == 0 && !bs->enable_write_cache) { - ret = bdrv_co_flush(bs); - } - - bdrv_set_dirty(bs, sector_num, nb_sectors); - - block_acct_highest_sector(&bs->stats, sector_num, nb_sectors); - - if (ret >= 0) { - bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors); - } - - return ret; -} - -/* - * Handle a write request in coroutine context - */ -static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, - int64_t offset, unsigned int bytes, QEMUIOVector *qiov, - BdrvRequestFlags flags) -{ - BdrvTrackedRequest req; - uint64_t align = bdrv_get_align(bs); - uint8_t *head_buf = NULL; - uint8_t *tail_buf = NULL; - QEMUIOVector local_qiov; - bool use_local_qiov = false; - int ret; - - if (!bs->drv) { - return -ENOMEDIUM; - } - if (bs->read_only) { - return -EACCES; - } - - ret = bdrv_check_byte_request(bs, offset, bytes); - if (ret < 0) { - return ret; - } - - /* throttling disk I/O */ - if (bs->io_limits_enabled) { - bdrv_io_limits_intercept(bs, bytes, true); - } - - /* - * Align write if necessary by performing a read-modify-write cycle. - * Pad qiov with the read parts and be sure to have a tracked request not - * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. - */ - tracked_request_begin(&req, bs, offset, bytes, true); - - if (offset & (align - 1)) { - QEMUIOVector head_qiov; - struct iovec head_iov; - - mark_request_serialising(&req, align); - wait_serialising_requests(&req); - - head_buf = qemu_blockalign(bs, align); - head_iov = (struct iovec) { - .iov_base = head_buf, - .iov_len = align, - }; - qemu_iovec_init_external(&head_qiov, &head_iov, 1); - - BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD); - ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align, - align, &head_qiov, 0); - if (ret < 0) { - goto fail; - } - BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); - - qemu_iovec_init(&local_qiov, qiov->niov + 2); - qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); - qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); - use_local_qiov = true; - - bytes += offset & (align - 1); - offset = offset & ~(align - 1); - } - - if ((offset + bytes) & (align - 1)) { - QEMUIOVector tail_qiov; - struct iovec tail_iov; - size_t tail_bytes; - bool waited; - - mark_request_serialising(&req, align); - waited = wait_serialising_requests(&req); - assert(!waited || !use_local_qiov); - - tail_buf = qemu_blockalign(bs, align); - tail_iov = (struct iovec) { - .iov_base = tail_buf, - .iov_len = align, - }; - qemu_iovec_init_external(&tail_qiov, &tail_iov, 1); - - BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL); - ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align, - align, &tail_qiov, 0); - if (ret < 0) { - goto fail; - } - BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); - - if (!use_local_qiov) { - qemu_iovec_init(&local_qiov, qiov->niov + 1); - qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); - use_local_qiov = true; - } - - tail_bytes = (offset + bytes) & (align - 1); - qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes); - - bytes = ROUND_UP(bytes, align); - } - - if (use_local_qiov) { - /* Local buffer may have non-zero data. */ - flags &= ~BDRV_REQ_ZERO_WRITE; - } - ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, - use_local_qiov ? &local_qiov : qiov, - flags); - -fail: - tracked_request_end(&req); - - if (use_local_qiov) { - qemu_iovec_destroy(&local_qiov); - } - qemu_vfree(head_buf); - qemu_vfree(tail_buf); - - return ret; -} - -static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, - BdrvRequestFlags flags) -{ - if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { - return -EINVAL; - } - - return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS, - nb_sectors << BDRV_SECTOR_BITS, qiov, flags); -} - -int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num, - int nb_sectors, QEMUIOVector *qiov) -{ - trace_bdrv_co_writev(bs, sector_num, nb_sectors); - - return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0); -} - -int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, - BdrvRequestFlags flags) -{ - int ret; - - trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags); - - if (!(bs->open_flags & BDRV_O_UNMAP)) { - flags &= ~BDRV_REQ_MAY_UNMAP; - } - if (bdrv_req_is_aligned(bs, sector_num << BDRV_SECTOR_BITS, - nb_sectors << BDRV_SECTOR_BITS)) { - ret = bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL, - BDRV_REQ_ZERO_WRITE | flags); - } else { - uint8_t *buf; - QEMUIOVector local_qiov; - size_t bytes = nb_sectors << BDRV_SECTOR_BITS; - - buf = qemu_memalign(bdrv_opt_mem_align(bs), bytes); - memset(buf, 0, bytes); - qemu_iovec_init(&local_qiov, 1); - qemu_iovec_add(&local_qiov, buf, bytes); - - ret = bdrv_co_do_writev(bs, sector_num, nb_sectors, &local_qiov, - BDRV_REQ_ZERO_WRITE | flags); - qemu_vfree(buf); - } - return ret; -} - /** * Truncate file to 'offset' bytes (needed only for file protocols) */ @@ -3571,6 +2276,7 @@ int bdrv_truncate(BlockDriverState *bs, int64_t offset) ret = drv->bdrv_truncate(bs, offset); if (ret == 0) { ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); + bdrv_dirty_bitmap_truncate(bs); if (bs->blk) { blk_dev_resize_cb(bs->blk); } @@ -3797,8 +2503,8 @@ void bdrv_add_key(BlockDriverState *bs, const char *key, Error **errp) { if (key) { if (!bdrv_is_encrypted(bs)) { - error_setg(errp, "Device '%s' is not encrypted", - bdrv_get_device_name(bs)); + error_setg(errp, "Node '%s' is not encrypted", + bdrv_get_device_or_node_name(bs)); } else if (bdrv_set_key(bs, key) < 0) { error_set(errp, QERR_INVALID_PASSWORD); } @@ -3806,7 +2512,7 @@ void bdrv_add_key(BlockDriverState *bs, const char *key, Error **errp) if (bdrv_key_required(bs)) { error_set(errp, ERROR_CLASS_DEVICE_ENCRYPTED, "'%s' (%s) is encrypted", - bdrv_get_device_name(bs), + bdrv_get_device_or_node_name(bs), bdrv_get_encrypted_filename(bs)); } } @@ -3870,15 +2576,20 @@ BlockDriverState *bdrv_find_node(const char *node_name) } /* Put this QMP function here so it can access the static graph_bdrv_states. */ -BlockDeviceInfoList *bdrv_named_nodes_list(void) +BlockDeviceInfoList *bdrv_named_nodes_list(Error **errp) { BlockDeviceInfoList *list, *entry; BlockDriverState *bs; list = NULL; QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) { + BlockDeviceInfo *info = bdrv_block_device_info(bs, errp); + if (!info) { + qapi_free_BlockDeviceInfoList(list); + return NULL; + } entry = g_malloc0(sizeof(*entry)); - entry->value = bdrv_block_device_info(bs); + entry->value = info; entry->next = list; list = entry; } @@ -3953,29 +2664,18 @@ const char *bdrv_get_device_name(const BlockDriverState *bs) return bs->blk ? blk_name(bs->blk) : ""; } -int bdrv_get_flags(BlockDriverState *bs) +/* This can be used to identify nodes that might not have a device + * name associated. Since node and device names live in the same + * namespace, the result is unambiguous. The exception is if both are + * absent, then this returns an empty (non-null) string. */ +const char *bdrv_get_device_or_node_name(const BlockDriverState *bs) { - return bs->open_flags; + return bs->blk ? blk_name(bs->blk) : bs->node_name; } -int bdrv_flush_all(void) +int bdrv_get_flags(BlockDriverState *bs) { - BlockDriverState *bs; - int result = 0; - - QTAILQ_FOREACH(bs, &bdrv_states, device_list) { - AioContext *aio_context = bdrv_get_aio_context(bs); - int ret; - - aio_context_acquire(aio_context); - ret = bdrv_flush(bs); - if (ret < 0 && !result) { - result = ret; - } - aio_context_release(aio_context); - } - - return result; + return bs->open_flags; } int bdrv_has_zero_init_1(BlockDriverState *bs) @@ -4030,222 +2730,6 @@ bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs) return false; } -typedef struct BdrvCoGetBlockStatusData { - BlockDriverState *bs; - BlockDriverState *base; - int64_t sector_num; - int nb_sectors; - int *pnum; - int64_t ret; - bool done; -} BdrvCoGetBlockStatusData; - -/* - * Returns the allocation status of the specified sectors. - * Drivers not implementing the functionality are assumed to not support - * backing files, hence all their sectors are reported as allocated. - * - * If 'sector_num' is beyond the end of the disk image the return value is 0 - * and 'pnum' is set to 0. - * - * 'pnum' is set to the number of sectors (including and immediately following - * the specified sector) that are known to be in the same - * allocated/unallocated state. - * - * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes - * beyond the end of the disk image it will be clamped. - */ -static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs, - int64_t sector_num, - int nb_sectors, int *pnum) -{ - int64_t total_sectors; - int64_t n; - int64_t ret, ret2; - - total_sectors = bdrv_nb_sectors(bs); - if (total_sectors < 0) { - return total_sectors; - } - - if (sector_num >= total_sectors) { - *pnum = 0; - return 0; - } - - n = total_sectors - sector_num; - if (n < nb_sectors) { - nb_sectors = n; - } - - if (!bs->drv->bdrv_co_get_block_status) { - *pnum = nb_sectors; - ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; - if (bs->drv->protocol_name) { - ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE); - } - return ret; - } - - ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum); - if (ret < 0) { - *pnum = 0; - return ret; - } - - if (ret & BDRV_BLOCK_RAW) { - assert(ret & BDRV_BLOCK_OFFSET_VALID); - return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS, - *pnum, pnum); - } - - if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { - ret |= BDRV_BLOCK_ALLOCATED; - } - - if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) { - if (bdrv_unallocated_blocks_are_zero(bs)) { - ret |= BDRV_BLOCK_ZERO; - } else if (bs->backing_hd) { - BlockDriverState *bs2 = bs->backing_hd; - int64_t nb_sectors2 = bdrv_nb_sectors(bs2); - if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) { - ret |= BDRV_BLOCK_ZERO; - } - } - } - - if (bs->file && - (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && - (ret & BDRV_BLOCK_OFFSET_VALID)) { - int file_pnum; - - ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS, - *pnum, &file_pnum); - if (ret2 >= 0) { - /* Ignore errors. This is just providing extra information, it - * is useful but not necessary. - */ - if (!file_pnum) { - /* !file_pnum indicates an offset at or beyond the EOF; it is - * perfectly valid for the format block driver to point to such - * offsets, so catch it and mark everything as zero */ - ret |= BDRV_BLOCK_ZERO; - } else { - /* Limit request to the range reported by the protocol driver */ - *pnum = file_pnum; - ret |= (ret2 & BDRV_BLOCK_ZERO); - } - } - } - - return ret; -} - -/* Coroutine wrapper for bdrv_get_block_status() */ -static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque) -{ - BdrvCoGetBlockStatusData *data = opaque; - BlockDriverState *bs = data->bs; - - data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors, - data->pnum); - data->done = true; -} - -/* - * Synchronous wrapper around bdrv_co_get_block_status(). - * - * See bdrv_co_get_block_status() for details. - */ -int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num, - int nb_sectors, int *pnum) -{ - Coroutine *co; - BdrvCoGetBlockStatusData data = { - .bs = bs, - .sector_num = sector_num, - .nb_sectors = nb_sectors, - .pnum = pnum, - .done = false, - }; - - if (qemu_in_coroutine()) { - /* Fast-path if already in coroutine context */ - bdrv_get_block_status_co_entry(&data); - } else { - AioContext *aio_context = bdrv_get_aio_context(bs); - - co = qemu_coroutine_create(bdrv_get_block_status_co_entry); - qemu_coroutine_enter(co, &data); - while (!data.done) { - aio_poll(aio_context, true); - } - } - return data.ret; -} - -int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, - int nb_sectors, int *pnum) -{ - int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum); - if (ret < 0) { - return ret; - } - return !!(ret & BDRV_BLOCK_ALLOCATED); -} - -/* - * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] - * - * Return true if the given sector is allocated in any image between - * BASE and TOP (inclusive). BASE can be NULL to check if the given - * sector is allocated in any image of the chain. Return false otherwise. - * - * 'pnum' is set to the number of sectors (including and immediately following - * the specified sector) that are known to be in the same - * allocated/unallocated state. - * - */ -int bdrv_is_allocated_above(BlockDriverState *top, - BlockDriverState *base, - int64_t sector_num, - int nb_sectors, int *pnum) -{ - BlockDriverState *intermediate; - int ret, n = nb_sectors; - - intermediate = top; - while (intermediate && intermediate != base) { - int pnum_inter; - ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors, - &pnum_inter); - if (ret < 0) { - return ret; - } else if (ret) { - *pnum = pnum_inter; - return 1; - } - - /* - * [sector_num, nb_sectors] is unallocated on top but intermediate - * might have - * - * [sector_num+x, nr_sectors] allocated. - */ - if (n > pnum_inter && - (intermediate == top || - sector_num + pnum_inter < intermediate->total_sectors)) { - n = pnum_inter; - } - - intermediate = intermediate->backing_hd; - } - - *pnum = n; - return 0; -} - const char *bdrv_get_encrypted_filename(BlockDriverState *bs) { if (bs->backing_hd && bs->backing_hd->encrypted) @@ -4262,28 +2746,6 @@ void bdrv_get_backing_filename(BlockDriverState *bs, pstrcpy(filename, filename_size, bs->backing_file); } -int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, - const uint8_t *buf, int nb_sectors) -{ - BlockDriver *drv = bs->drv; - int ret; - - if (!drv) { - return -ENOMEDIUM; - } - if (!drv->bdrv_write_compressed) { - return -ENOTSUP; - } - ret = bdrv_check_request(bs, sector_num, nb_sectors); - if (ret < 0) { - return ret; - } - - assert(QLIST_EMPTY(&bs->dirty_bitmaps)); - - return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors); -} - int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) { BlockDriver *drv = bs->drv; @@ -4304,47 +2766,6 @@ ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs) return NULL; } -int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, - int64_t pos, int size) -{ - QEMUIOVector qiov; - struct iovec iov = { - .iov_base = (void *) buf, - .iov_len = size, - }; - - qemu_iovec_init_external(&qiov, &iov, 1); - return bdrv_writev_vmstate(bs, &qiov, pos); -} - -int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) -{ - BlockDriver *drv = bs->drv; - - if (!drv) { - return -ENOMEDIUM; - } else if (drv->bdrv_save_vmstate) { - return drv->bdrv_save_vmstate(bs, qiov, pos); - } else if (bs->file) { - return bdrv_writev_vmstate(bs->file, qiov, pos); - } - - return -ENOTSUP; -} - -int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, - int64_t pos, int size) -{ - BlockDriver *drv = bs->drv; - if (!drv) - return -ENOMEDIUM; - if (drv->bdrv_load_vmstate) - return drv->bdrv_load_vmstate(bs, buf, pos, size); - if (bs->file) - return bdrv_load_vmstate(bs->file, buf, pos, size); - return -ENOTSUP; -} - void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event) { if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) { @@ -4491,452 +2912,6 @@ int bdrv_get_backing_file_depth(BlockDriverState *bs) return 1 + bdrv_get_backing_file_depth(bs->backing_hd); } -/**************************************************************/ -/* async I/Os */ - -BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, - QEMUIOVector *qiov, int nb_sectors, - BlockCompletionFunc *cb, void *opaque) -{ - trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque); - - return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, - cb, opaque, false); -} - -BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, - QEMUIOVector *qiov, int nb_sectors, - BlockCompletionFunc *cb, void *opaque) -{ - trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque); - - return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, - cb, opaque, true); -} - -BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, BdrvRequestFlags flags, - BlockCompletionFunc *cb, void *opaque) -{ - trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque); - - return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors, - BDRV_REQ_ZERO_WRITE | flags, - cb, opaque, true); -} - - -typedef struct MultiwriteCB { - int error; - int num_requests; - int num_callbacks; - struct { - BlockCompletionFunc *cb; - void *opaque; - QEMUIOVector *free_qiov; - } callbacks[]; -} MultiwriteCB; - -static void multiwrite_user_cb(MultiwriteCB *mcb) -{ - int i; - - for (i = 0; i < mcb->num_callbacks; i++) { - mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error); - if (mcb->callbacks[i].free_qiov) { - qemu_iovec_destroy(mcb->callbacks[i].free_qiov); - } - g_free(mcb->callbacks[i].free_qiov); - } -} - -static void multiwrite_cb(void *opaque, int ret) -{ - MultiwriteCB *mcb = opaque; - - trace_multiwrite_cb(mcb, ret); - - if (ret < 0 && !mcb->error) { - mcb->error = ret; - } - - mcb->num_requests--; - if (mcb->num_requests == 0) { - multiwrite_user_cb(mcb); - g_free(mcb); - } -} - -static int multiwrite_req_compare(const void *a, const void *b) -{ - const BlockRequest *req1 = a, *req2 = b; - - /* - * Note that we can't simply subtract req2->sector from req1->sector - * here as that could overflow the return value. - */ - if (req1->sector > req2->sector) { - return 1; - } else if (req1->sector < req2->sector) { - return -1; - } else { - return 0; - } -} - -/* - * Takes a bunch of requests and tries to merge them. Returns the number of - * requests that remain after merging. - */ -static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs, - int num_reqs, MultiwriteCB *mcb) -{ - int i, outidx; - - // Sort requests by start sector - qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare); - - // Check if adjacent requests touch the same clusters. If so, combine them, - // filling up gaps with zero sectors. - outidx = 0; - for (i = 1; i < num_reqs; i++) { - int merge = 0; - int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors; - - // Handle exactly sequential writes and overlapping writes. - if (reqs[i].sector <= oldreq_last) { - merge = 1; - } - - if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) { - merge = 0; - } - - if (bs->bl.max_transfer_length && reqs[outidx].nb_sectors + - reqs[i].nb_sectors > bs->bl.max_transfer_length) { - merge = 0; - } - - if (merge) { - size_t size; - QEMUIOVector *qiov = g_malloc0(sizeof(*qiov)); - qemu_iovec_init(qiov, - reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1); - - // Add the first request to the merged one. If the requests are - // overlapping, drop the last sectors of the first request. - size = (reqs[i].sector - reqs[outidx].sector) << 9; - qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size); - - // We should need to add any zeros between the two requests - assert (reqs[i].sector <= oldreq_last); - - // Add the second request - qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size); - - // Add tail of first request, if necessary - if (qiov->size < reqs[outidx].qiov->size) { - qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size, - reqs[outidx].qiov->size - qiov->size); - } - - reqs[outidx].nb_sectors = qiov->size >> 9; - reqs[outidx].qiov = qiov; - - mcb->callbacks[i].free_qiov = reqs[outidx].qiov; - } else { - outidx++; - reqs[outidx].sector = reqs[i].sector; - reqs[outidx].nb_sectors = reqs[i].nb_sectors; - reqs[outidx].qiov = reqs[i].qiov; - } - } - - block_acct_merge_done(&bs->stats, BLOCK_ACCT_WRITE, num_reqs - outidx - 1); - - return outidx + 1; -} - -/* - * Submit multiple AIO write requests at once. - * - * On success, the function returns 0 and all requests in the reqs array have - * been submitted. In error case this function returns -1, and any of the - * requests may or may not be submitted yet. In particular, this means that the - * callback will be called for some of the requests, for others it won't. The - * caller must check the error field of the BlockRequest to wait for the right - * callbacks (if error != 0, no callback will be called). - * - * The implementation may modify the contents of the reqs array, e.g. to merge - * requests. However, the fields opaque and error are left unmodified as they - * are used to signal failure for a single request to the caller. - */ -int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs) -{ - MultiwriteCB *mcb; - int i; - - /* don't submit writes if we don't have a medium */ - if (bs->drv == NULL) { - for (i = 0; i < num_reqs; i++) { - reqs[i].error = -ENOMEDIUM; - } - return -1; - } - - if (num_reqs == 0) { - return 0; - } - - // Create MultiwriteCB structure - mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks)); - mcb->num_requests = 0; - mcb->num_callbacks = num_reqs; - - for (i = 0; i < num_reqs; i++) { - mcb->callbacks[i].cb = reqs[i].cb; - mcb->callbacks[i].opaque = reqs[i].opaque; - } - - // Check for mergable requests - num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb); - - trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs); - - /* Run the aio requests. */ - mcb->num_requests = num_reqs; - for (i = 0; i < num_reqs; i++) { - bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov, - reqs[i].nb_sectors, reqs[i].flags, - multiwrite_cb, mcb, - true); - } - - return 0; -} - -void bdrv_aio_cancel(BlockAIOCB *acb) -{ - qemu_aio_ref(acb); - bdrv_aio_cancel_async(acb); - while (acb->refcnt > 1) { - if (acb->aiocb_info->get_aio_context) { - aio_poll(acb->aiocb_info->get_aio_context(acb), true); - } else if (acb->bs) { - aio_poll(bdrv_get_aio_context(acb->bs), true); - } else { - abort(); - } - } - qemu_aio_unref(acb); -} - -/* Async version of aio cancel. The caller is not blocked if the acb implements - * cancel_async, otherwise we do nothing and let the request normally complete. - * In either case the completion callback must be called. */ -void bdrv_aio_cancel_async(BlockAIOCB *acb) -{ - if (acb->aiocb_info->cancel_async) { - acb->aiocb_info->cancel_async(acb); - } -} - -/**************************************************************/ -/* async block device emulation */ - -typedef struct BlockAIOCBSync { - BlockAIOCB common; - QEMUBH *bh; - int ret; - /* vector translation state */ - QEMUIOVector *qiov; - uint8_t *bounce; - int is_write; -} BlockAIOCBSync; - -static const AIOCBInfo bdrv_em_aiocb_info = { - .aiocb_size = sizeof(BlockAIOCBSync), -}; - -static void bdrv_aio_bh_cb(void *opaque) -{ - BlockAIOCBSync *acb = opaque; - - if (!acb->is_write && acb->ret >= 0) { - qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); - } - qemu_vfree(acb->bounce); - acb->common.cb(acb->common.opaque, acb->ret); - qemu_bh_delete(acb->bh); - acb->bh = NULL; - qemu_aio_unref(acb); -} - -static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs, - int64_t sector_num, - QEMUIOVector *qiov, - int nb_sectors, - BlockCompletionFunc *cb, - void *opaque, - int is_write) - -{ - BlockAIOCBSync *acb; - - acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque); - acb->is_write = is_write; - acb->qiov = qiov; - acb->bounce = qemu_try_blockalign(bs, qiov->size); - acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb); - - if (acb->bounce == NULL) { - acb->ret = -ENOMEM; - } else if (is_write) { - qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size); - acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors); - } else { - acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors); - } - - qemu_bh_schedule(acb->bh); - - return &acb->common; -} - -static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, - int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, - BlockCompletionFunc *cb, void *opaque) -{ - return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); -} - -static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, - int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, - BlockCompletionFunc *cb, void *opaque) -{ - return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1); -} - - -typedef struct BlockAIOCBCoroutine { - BlockAIOCB common; - BlockRequest req; - bool is_write; - bool *done; - QEMUBH* bh; -} BlockAIOCBCoroutine; - -static const AIOCBInfo bdrv_em_co_aiocb_info = { - .aiocb_size = sizeof(BlockAIOCBCoroutine), -}; - -static void bdrv_co_em_bh(void *opaque) -{ - BlockAIOCBCoroutine *acb = opaque; - - acb->common.cb(acb->common.opaque, acb->req.error); - - qemu_bh_delete(acb->bh); - qemu_aio_unref(acb); -} - -/* Invoke bdrv_co_do_readv/bdrv_co_do_writev */ -static void coroutine_fn bdrv_co_do_rw(void *opaque) -{ - BlockAIOCBCoroutine *acb = opaque; - BlockDriverState *bs = acb->common.bs; - - if (!acb->is_write) { - acb->req.error = bdrv_co_do_readv(bs, acb->req.sector, - acb->req.nb_sectors, acb->req.qiov, acb->req.flags); - } else { - acb->req.error = bdrv_co_do_writev(bs, acb->req.sector, - acb->req.nb_sectors, acb->req.qiov, acb->req.flags); - } - - acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); - qemu_bh_schedule(acb->bh); -} - -static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, - int64_t sector_num, - QEMUIOVector *qiov, - int nb_sectors, - BdrvRequestFlags flags, - BlockCompletionFunc *cb, - void *opaque, - bool is_write) -{ - Coroutine *co; - BlockAIOCBCoroutine *acb; - - acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); - acb->req.sector = sector_num; - acb->req.nb_sectors = nb_sectors; - acb->req.qiov = qiov; - acb->req.flags = flags; - acb->is_write = is_write; - - co = qemu_coroutine_create(bdrv_co_do_rw); - qemu_coroutine_enter(co, acb); - - return &acb->common; -} - -static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque) -{ - BlockAIOCBCoroutine *acb = opaque; - BlockDriverState *bs = acb->common.bs; - - acb->req.error = bdrv_co_flush(bs); - acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); - qemu_bh_schedule(acb->bh); -} - -BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs, - BlockCompletionFunc *cb, void *opaque) -{ - trace_bdrv_aio_flush(bs, opaque); - - Coroutine *co; - BlockAIOCBCoroutine *acb; - - acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); - - co = qemu_coroutine_create(bdrv_aio_flush_co_entry); - qemu_coroutine_enter(co, acb); - - return &acb->common; -} - -static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque) -{ - BlockAIOCBCoroutine *acb = opaque; - BlockDriverState *bs = acb->common.bs; - - acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors); - acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); - qemu_bh_schedule(acb->bh); -} - -BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, - BlockCompletionFunc *cb, void *opaque) -{ - Coroutine *co; - BlockAIOCBCoroutine *acb; - - trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque); - - acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); - acb->req.sector = sector_num; - acb->req.nb_sectors = nb_sectors; - co = qemu_coroutine_create(bdrv_aio_discard_co_entry); - qemu_coroutine_enter(co, acb); - - return &acb->common; -} - void bdrv_init(void) { module_call_init(MODULE_INIT_BLOCK); @@ -4948,161 +2923,6 @@ void bdrv_init_with_whitelist(void) bdrv_init(); } -void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, - BlockCompletionFunc *cb, void *opaque) -{ - BlockAIOCB *acb; - - acb = g_slice_alloc(aiocb_info->aiocb_size); - acb->aiocb_info = aiocb_info; - acb->bs = bs; - acb->cb = cb; - acb->opaque = opaque; - acb->refcnt = 1; - return acb; -} - -void qemu_aio_ref(void *p) -{ - BlockAIOCB *acb = p; - acb->refcnt++; -} - -void qemu_aio_unref(void *p) -{ - BlockAIOCB *acb = p; - assert(acb->refcnt > 0); - if (--acb->refcnt == 0) { - g_slice_free1(acb->aiocb_info->aiocb_size, acb); - } -} - -/**************************************************************/ -/* Coroutine block device emulation */ - -typedef struct CoroutineIOCompletion { - Coroutine *coroutine; - int ret; -} CoroutineIOCompletion; - -static void bdrv_co_io_em_complete(void *opaque, int ret) -{ - CoroutineIOCompletion *co = opaque; - - co->ret = ret; - qemu_coroutine_enter(co->coroutine, NULL); -} - -static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num, - int nb_sectors, QEMUIOVector *iov, - bool is_write) -{ - CoroutineIOCompletion co = { - .coroutine = qemu_coroutine_self(), - }; - BlockAIOCB *acb; - - if (is_write) { - acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors, - bdrv_co_io_em_complete, &co); - } else { - acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors, - bdrv_co_io_em_complete, &co); - } - - trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb); - if (!acb) { - return -EIO; - } - qemu_coroutine_yield(); - - return co.ret; -} - -static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, - QEMUIOVector *iov) -{ - return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false); -} - -static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, - QEMUIOVector *iov) -{ - return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true); -} - -static void coroutine_fn bdrv_flush_co_entry(void *opaque) -{ - RwCo *rwco = opaque; - - rwco->ret = bdrv_co_flush(rwco->bs); -} - -int coroutine_fn bdrv_co_flush(BlockDriverState *bs) -{ - int ret; - - if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) { - return 0; - } - - /* Write back cached data to the OS even with cache=unsafe */ - BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS); - if (bs->drv->bdrv_co_flush_to_os) { - ret = bs->drv->bdrv_co_flush_to_os(bs); - if (ret < 0) { - return ret; - } - } - - /* But don't actually force it to the disk with cache=unsafe */ - if (bs->open_flags & BDRV_O_NO_FLUSH) { - goto flush_parent; - } - - BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK); - if (bs->drv->bdrv_co_flush_to_disk) { - ret = bs->drv->bdrv_co_flush_to_disk(bs); - } else if (bs->drv->bdrv_aio_flush) { - BlockAIOCB *acb; - CoroutineIOCompletion co = { - .coroutine = qemu_coroutine_self(), - }; - - acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); - if (acb == NULL) { - ret = -EIO; - } else { - qemu_coroutine_yield(); - ret = co.ret; - } - } else { - /* - * Some block drivers always operate in either writethrough or unsafe - * mode and don't support bdrv_flush therefore. Usually qemu doesn't - * know how the server works (because the behaviour is hardcoded or - * depends on server-side configuration), so we can't ensure that - * everything is safe on disk. Returning an error doesn't work because - * that would break guests even if the server operates in writethrough - * mode. - * - * Let's hope the user knows what he's doing. - */ - ret = 0; - } - if (ret < 0) { - return ret; - } - - /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH - * in the case of cache=unsafe, so there are no useless flushes. - */ -flush_parent: - return bdrv_co_flush(bs->file); -} - void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp) { Error *local_err = NULL; @@ -5152,143 +2972,6 @@ void bdrv_invalidate_cache_all(Error **errp) } } -int bdrv_flush(BlockDriverState *bs) -{ - Coroutine *co; - RwCo rwco = { - .bs = bs, - .ret = NOT_DONE, - }; - - if (qemu_in_coroutine()) { - /* Fast-path if already in coroutine context */ - bdrv_flush_co_entry(&rwco); - } else { - AioContext *aio_context = bdrv_get_aio_context(bs); - - co = qemu_coroutine_create(bdrv_flush_co_entry); - qemu_coroutine_enter(co, &rwco); - while (rwco.ret == NOT_DONE) { - aio_poll(aio_context, true); - } - } - - return rwco.ret; -} - -typedef struct DiscardCo { - BlockDriverState *bs; - int64_t sector_num; - int nb_sectors; - int ret; -} DiscardCo; -static void coroutine_fn bdrv_discard_co_entry(void *opaque) -{ - DiscardCo *rwco = opaque; - - rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors); -} - -int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, - int nb_sectors) -{ - int max_discard, ret; - - if (!bs->drv) { - return -ENOMEDIUM; - } - - ret = bdrv_check_request(bs, sector_num, nb_sectors); - if (ret < 0) { - return ret; - } else if (bs->read_only) { - return -EROFS; - } - - bdrv_reset_dirty(bs, sector_num, nb_sectors); - - /* Do nothing if disabled. */ - if (!(bs->open_flags & BDRV_O_UNMAP)) { - return 0; - } - - if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) { - return 0; - } - - max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS); - while (nb_sectors > 0) { - int ret; - int num = nb_sectors; - - /* align request */ - if (bs->bl.discard_alignment && - num >= bs->bl.discard_alignment && - sector_num % bs->bl.discard_alignment) { - if (num > bs->bl.discard_alignment) { - num = bs->bl.discard_alignment; - } - num -= sector_num % bs->bl.discard_alignment; - } - - /* limit request size */ - if (num > max_discard) { - num = max_discard; - } - - if (bs->drv->bdrv_co_discard) { - ret = bs->drv->bdrv_co_discard(bs, sector_num, num); - } else { - BlockAIOCB *acb; - CoroutineIOCompletion co = { - .coroutine = qemu_coroutine_self(), - }; - - acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors, - bdrv_co_io_em_complete, &co); - if (acb == NULL) { - return -EIO; - } else { - qemu_coroutine_yield(); - ret = co.ret; - } - } - if (ret && ret != -ENOTSUP) { - return ret; - } - - sector_num += num; - nb_sectors -= num; - } - return 0; -} - -int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) -{ - Coroutine *co; - DiscardCo rwco = { - .bs = bs, - .sector_num = sector_num, - .nb_sectors = nb_sectors, - .ret = NOT_DONE, - }; - - if (qemu_in_coroutine()) { - /* Fast-path if already in coroutine context */ - bdrv_discard_co_entry(&rwco); - } else { - AioContext *aio_context = bdrv_get_aio_context(bs); - - co = qemu_coroutine_create(bdrv_discard_co_entry); - qemu_coroutine_enter(co, &rwco); - while (rwco.ret == NOT_DONE) { - aio_poll(aio_context, true); - } - } - - return rwco.ret; -} - /**************************************************************/ /* removable device support */ @@ -5354,107 +3037,171 @@ void bdrv_lock_medium(BlockDriverState *bs, bool locked) } } -/* needed for generic scsi interface */ - -int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) +void bdrv_set_guest_block_size(BlockDriverState *bs, int align) { - BlockDriver *drv = bs->drv; - - if (drv && drv->bdrv_ioctl) - return drv->bdrv_ioctl(bs, req, buf); - return -ENOTSUP; + bs->guest_block_size = align; } -BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, - unsigned long int req, void *buf, - BlockCompletionFunc *cb, void *opaque) +BdrvDirtyBitmap *bdrv_find_dirty_bitmap(BlockDriverState *bs, const char *name) { - BlockDriver *drv = bs->drv; + BdrvDirtyBitmap *bm; - if (drv && drv->bdrv_aio_ioctl) - return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque); + assert(name); + QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) { + if (bm->name && !strcmp(name, bm->name)) { + return bm; + } + } return NULL; } -void bdrv_set_guest_block_size(BlockDriverState *bs, int align) +void bdrv_dirty_bitmap_make_anon(BdrvDirtyBitmap *bitmap) { - bs->guest_block_size = align; + assert(!bdrv_dirty_bitmap_frozen(bitmap)); + g_free(bitmap->name); + bitmap->name = NULL; +} + +BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, + uint32_t granularity, + const char *name, + Error **errp) +{ + int64_t bitmap_size; + BdrvDirtyBitmap *bitmap; + uint32_t sector_granularity; + + assert((granularity & (granularity - 1)) == 0); + + if (name && bdrv_find_dirty_bitmap(bs, name)) { + error_setg(errp, "Bitmap already exists: %s", name); + return NULL; + } + sector_granularity = granularity >> BDRV_SECTOR_BITS; + assert(sector_granularity); + bitmap_size = bdrv_nb_sectors(bs); + if (bitmap_size < 0) { + error_setg_errno(errp, -bitmap_size, "could not get length of device"); + errno = -bitmap_size; + return NULL; + } + bitmap = g_new0(BdrvDirtyBitmap, 1); + bitmap->bitmap = hbitmap_alloc(bitmap_size, ctz32(sector_granularity)); + bitmap->size = bitmap_size; + bitmap->name = g_strdup(name); + bitmap->disabled = false; + QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list); + return bitmap; } -void *qemu_blockalign(BlockDriverState *bs, size_t size) +bool bdrv_dirty_bitmap_frozen(BdrvDirtyBitmap *bitmap) { - return qemu_memalign(bdrv_opt_mem_align(bs), size); + return bitmap->successor; } -void *qemu_blockalign0(BlockDriverState *bs, size_t size) +bool bdrv_dirty_bitmap_enabled(BdrvDirtyBitmap *bitmap) { - return memset(qemu_blockalign(bs, size), 0, size); + return !(bitmap->disabled || bitmap->successor); } -void *qemu_try_blockalign(BlockDriverState *bs, size_t size) +/** + * Create a successor bitmap destined to replace this bitmap after an operation. + * Requires that the bitmap is not frozen and has no successor. + */ +int bdrv_dirty_bitmap_create_successor(BlockDriverState *bs, + BdrvDirtyBitmap *bitmap, Error **errp) { - size_t align = bdrv_opt_mem_align(bs); + uint64_t granularity; + BdrvDirtyBitmap *child; + + if (bdrv_dirty_bitmap_frozen(bitmap)) { + error_setg(errp, "Cannot create a successor for a bitmap that is " + "currently frozen"); + return -1; + } + assert(!bitmap->successor); - /* Ensure that NULL is never returned on success */ - assert(align > 0); - if (size == 0) { - size = align; + /* Create an anonymous successor */ + granularity = bdrv_dirty_bitmap_granularity(bitmap); + child = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); + if (!child) { + return -1; } - return qemu_try_memalign(align, size); + /* Successor will be on or off based on our current state. */ + child->disabled = bitmap->disabled; + + /* Install the successor and freeze the parent */ + bitmap->successor = child; + return 0; } -void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) +/** + * For a bitmap with a successor, yield our name to the successor, + * delete the old bitmap, and return a handle to the new bitmap. + */ +BdrvDirtyBitmap *bdrv_dirty_bitmap_abdicate(BlockDriverState *bs, + BdrvDirtyBitmap *bitmap, + Error **errp) { - void *mem = qemu_try_blockalign(bs, size); + char *name; + BdrvDirtyBitmap *successor = bitmap->successor; - if (mem) { - memset(mem, 0, size); + if (successor == NULL) { + error_setg(errp, "Cannot relinquish control if " + "there's no successor present"); + return NULL; } - return mem; + name = bitmap->name; + bitmap->name = NULL; + successor->name = name; + bitmap->successor = NULL; + bdrv_release_dirty_bitmap(bs, bitmap); + + return successor; } -/* - * Check if all memory in this vector is sector aligned. +/** + * In cases of failure where we can no longer safely delete the parent, + * we may wish to re-join the parent and child/successor. + * The merged parent will be un-frozen, but not explicitly re-enabled. */ -bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) +BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap(BlockDriverState *bs, + BdrvDirtyBitmap *parent, + Error **errp) { - int i; - size_t alignment = bdrv_opt_mem_align(bs); + BdrvDirtyBitmap *successor = parent->successor; - for (i = 0; i < qiov->niov; i++) { - if ((uintptr_t) qiov->iov[i].iov_base % alignment) { - return false; - } - if (qiov->iov[i].iov_len % alignment) { - return false; - } + if (!successor) { + error_setg(errp, "Cannot reclaim a successor when none is present"); + return NULL; } - return true; + if (!hbitmap_merge(parent->bitmap, successor->bitmap)) { + error_setg(errp, "Merging of parent and successor bitmap failed"); + return NULL; + } + bdrv_release_dirty_bitmap(bs, successor); + parent->successor = NULL; + + return parent; } -BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity, - Error **errp) +/** + * Truncates _all_ bitmaps attached to a BDS. + */ +static void bdrv_dirty_bitmap_truncate(BlockDriverState *bs) { - int64_t bitmap_size; BdrvDirtyBitmap *bitmap; + uint64_t size = bdrv_nb_sectors(bs); - assert((granularity & (granularity - 1)) == 0); - - granularity >>= BDRV_SECTOR_BITS; - assert(granularity); - bitmap_size = bdrv_nb_sectors(bs); - if (bitmap_size < 0) { - error_setg_errno(errp, -bitmap_size, "could not get length of device"); - errno = -bitmap_size; - return NULL; + QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) { + if (bdrv_dirty_bitmap_frozen(bitmap)) { + continue; + } + hbitmap_truncate(bitmap->bitmap, size); } - bitmap = g_new0(BdrvDirtyBitmap, 1); - bitmap->bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1); - QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list); - return bitmap; } void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap) @@ -5462,14 +3209,28 @@ void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap) BdrvDirtyBitmap *bm, *next; QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) { if (bm == bitmap) { + assert(!bdrv_dirty_bitmap_frozen(bm)); QLIST_REMOVE(bitmap, list); hbitmap_free(bitmap->bitmap); + g_free(bitmap->name); g_free(bitmap); return; } } } +void bdrv_disable_dirty_bitmap(BdrvDirtyBitmap *bitmap) +{ + assert(!bdrv_dirty_bitmap_frozen(bitmap)); + bitmap->disabled = true; +} + +void bdrv_enable_dirty_bitmap(BdrvDirtyBitmap *bitmap) +{ + assert(!bdrv_dirty_bitmap_frozen(bitmap)); + bitmap->disabled = false; +} + BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs) { BdrvDirtyBitmap *bm; @@ -5479,9 +3240,11 @@ BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs) QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) { BlockDirtyInfo *info = g_new0(BlockDirtyInfo, 1); BlockDirtyInfoList *entry = g_new0(BlockDirtyInfoList, 1); - info->count = bdrv_get_dirty_count(bs, bm); - info->granularity = - ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bm->bitmap)); + info->count = bdrv_get_dirty_count(bm); + info->granularity = bdrv_dirty_bitmap_granularity(bm); + info->has_name = !!bm->name; + info->name = g_strdup(bm->name); + info->frozen = bdrv_dirty_bitmap_frozen(bm); entry->value = info; *plist = entry; plist = &entry->next; @@ -5499,43 +3262,90 @@ int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector } } -void bdrv_dirty_iter_init(BlockDriverState *bs, - BdrvDirtyBitmap *bitmap, HBitmapIter *hbi) +/** + * Chooses a default granularity based on the existing cluster size, + * but clamped between [4K, 64K]. Defaults to 64K in the case that there + * is no cluster size information available. + */ +uint32_t bdrv_get_default_bitmap_granularity(BlockDriverState *bs) +{ + BlockDriverInfo bdi; + uint32_t granularity; + + if (bdrv_get_info(bs, &bdi) >= 0 && bdi.cluster_size > 0) { + granularity = MAX(4096, bdi.cluster_size); + granularity = MIN(65536, granularity); + } else { + granularity = 65536; + } + + return granularity; +} + +uint32_t bdrv_dirty_bitmap_granularity(BdrvDirtyBitmap *bitmap) +{ + return BDRV_SECTOR_SIZE << hbitmap_granularity(bitmap->bitmap); +} + +void bdrv_dirty_iter_init(BdrvDirtyBitmap *bitmap, HBitmapIter *hbi) { hbitmap_iter_init(hbi, bitmap->bitmap, 0); } -void bdrv_set_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, +void bdrv_set_dirty_bitmap(BdrvDirtyBitmap *bitmap, int64_t cur_sector, int nr_sectors) { + assert(bdrv_dirty_bitmap_enabled(bitmap)); hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors); } -void bdrv_reset_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, +void bdrv_reset_dirty_bitmap(BdrvDirtyBitmap *bitmap, int64_t cur_sector, int nr_sectors) { + assert(bdrv_dirty_bitmap_enabled(bitmap)); hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors); } -static void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, - int nr_sectors) +void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap) +{ + assert(bdrv_dirty_bitmap_enabled(bitmap)); + hbitmap_reset(bitmap->bitmap, 0, bitmap->size); +} + +void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, + int nr_sectors) { BdrvDirtyBitmap *bitmap; QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) { + if (!bdrv_dirty_bitmap_enabled(bitmap)) { + continue; + } hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors); } } -static void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, - int nr_sectors) +void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, + int nr_sectors) { BdrvDirtyBitmap *bitmap; QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) { + if (!bdrv_dirty_bitmap_enabled(bitmap)) { + continue; + } hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors); } } -int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap) +/** + * Advance an HBitmapIter to an arbitrary offset. + */ +void bdrv_set_dirty_iter(HBitmapIter *hbi, int64_t offset) +{ + assert(hbi->hb); + hbitmap_iter_init(hbi, hbi->hb, offset); +} + +int64_t bdrv_get_dirty_count(BdrvDirtyBitmap *bitmap) { return hbitmap_count(bitmap->bitmap); } @@ -5572,8 +3382,8 @@ bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp) if (!QLIST_EMPTY(&bs->op_blockers[op])) { blocker = QLIST_FIRST(&bs->op_blockers[op]); if (errp) { - error_setg(errp, "Device '%s' is busy: %s", - bdrv_get_device_name(bs), + error_setg(errp, "Node '%s' is busy: %s", + bdrv_get_device_or_node_name(bs), error_get_pretty(blocker->reason)); } return true; @@ -5953,12 +3763,6 @@ void bdrv_remove_aio_context_notifier(BlockDriverState *bs, abort(); } -void bdrv_add_before_write_notifier(BlockDriverState *bs, - NotifierWithReturn *notifier) -{ - notifier_with_return_list_add(&bs->before_write_notifiers, notifier); -} - int bdrv_amend_options(BlockDriverState *bs, QemuOpts *opts, BlockDriverAmendStatusCB *status_cb) { @@ -6059,36 +3863,6 @@ out: return to_replace_bs; } -void bdrv_io_plug(BlockDriverState *bs) -{ - BlockDriver *drv = bs->drv; - if (drv && drv->bdrv_io_plug) { - drv->bdrv_io_plug(bs); - } else if (bs->file) { - bdrv_io_plug(bs->file); - } -} - -void bdrv_io_unplug(BlockDriverState *bs) -{ - BlockDriver *drv = bs->drv; - if (drv && drv->bdrv_io_unplug) { - drv->bdrv_io_unplug(bs); - } else if (bs->file) { - bdrv_io_unplug(bs->file); - } -} - -void bdrv_flush_io_queue(BlockDriverState *bs) -{ - BlockDriver *drv = bs->drv; - if (drv && drv->bdrv_flush_io_queue) { - drv->bdrv_flush_io_queue(bs); - } else if (bs->file) { - bdrv_flush_io_queue(bs->file); - } -} - static bool append_open_options(QDict *d, BlockDriverState *bs) { const QDictEntry *entry; diff --git a/block/Makefile.objs b/block/Makefile.objs index db2933e469..0d8c2a4ab6 100644 --- a/block/Makefile.objs +++ b/block/Makefile.objs @@ -1,4 +1,4 @@ -block-obj-y += raw_bsd.o qcow.o vdi.o vmdk.o cloop.o dmg.o bochs.o vpc.o vvfat.o +block-obj-y += raw_bsd.o qcow.o vdi.o vmdk.o cloop.o bochs.o vpc.o vvfat.o block-obj-y += qcow2.o qcow2-refcount.o qcow2-cluster.o qcow2-snapshot.o qcow2-cache.o block-obj-y += qed.o qed-gencb.o qed-l2-cache.o qed-table.o qed-cluster.o block-obj-y += qed-check.o @@ -9,7 +9,7 @@ block-obj-y += block-backend.o snapshot.o qapi.o block-obj-$(CONFIG_WIN32) += raw-win32.o win32-aio.o block-obj-$(CONFIG_POSIX) += raw-posix.o block-obj-$(CONFIG_LINUX_AIO) += linux-aio.o -block-obj-y += null.o mirror.o +block-obj-y += null.o mirror.o io.o block-obj-y += nbd.o nbd-client.o sheepdog.o block-obj-$(CONFIG_LIBISCSI) += iscsi.o @@ -37,6 +37,7 @@ gluster.o-libs := $(GLUSTERFS_LIBS) ssh.o-cflags := $(LIBSSH2_CFLAGS) ssh.o-libs := $(LIBSSH2_LIBS) archipelago.o-libs := $(ARCHIPELAGO_LIBS) +block-obj-m += dmg.o dmg.o-libs := $(BZIP2_LIBS) qcow.o-libs := -lz linux-aio.o-libs := -laio diff --git a/block/backup.c b/block/backup.c index 1c535b1ab9..d3f648ddd7 100644 --- a/block/backup.c +++ b/block/backup.c @@ -37,6 +37,8 @@ typedef struct CowRequest { typedef struct BackupBlockJob { BlockJob common; BlockDriverState *target; + /* bitmap for sync=dirty-bitmap */ + BdrvDirtyBitmap *sync_bitmap; MirrorSyncMode sync_mode; RateLimit limit; BlockdevOnError on_source_error; @@ -242,6 +244,91 @@ static void backup_complete(BlockJob *job, void *opaque) g_free(data); } +static bool coroutine_fn yield_and_check(BackupBlockJob *job) +{ + if (block_job_is_cancelled(&job->common)) { + return true; + } + + /* we need to yield so that bdrv_drain_all() returns. + * (without, VM does not reboot) + */ + if (job->common.speed) { + uint64_t delay_ns = ratelimit_calculate_delay(&job->limit, + job->sectors_read); + job->sectors_read = 0; + block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns); + } else { + block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0); + } + + if (block_job_is_cancelled(&job->common)) { + return true; + } + + return false; +} + +static int coroutine_fn backup_run_incremental(BackupBlockJob *job) +{ + bool error_is_read; + int ret = 0; + int clusters_per_iter; + uint32_t granularity; + int64_t sector; + int64_t cluster; + int64_t end; + int64_t last_cluster = -1; + BlockDriverState *bs = job->common.bs; + HBitmapIter hbi; + + granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap); + clusters_per_iter = MAX((granularity / BACKUP_CLUSTER_SIZE), 1); + bdrv_dirty_iter_init(job->sync_bitmap, &hbi); + + /* Find the next dirty sector(s) */ + while ((sector = hbitmap_iter_next(&hbi)) != -1) { + cluster = sector / BACKUP_SECTORS_PER_CLUSTER; + + /* Fake progress updates for any clusters we skipped */ + if (cluster != last_cluster + 1) { + job->common.offset += ((cluster - last_cluster - 1) * + BACKUP_CLUSTER_SIZE); + } + + for (end = cluster + clusters_per_iter; cluster < end; cluster++) { + do { + if (yield_and_check(job)) { + return ret; + } + ret = backup_do_cow(bs, cluster * BACKUP_SECTORS_PER_CLUSTER, + BACKUP_SECTORS_PER_CLUSTER, &error_is_read); + if ((ret < 0) && + backup_error_action(job, error_is_read, -ret) == + BLOCK_ERROR_ACTION_REPORT) { + return ret; + } + } while (ret < 0); + } + + /* If the bitmap granularity is smaller than the backup granularity, + * we need to advance the iterator pointer to the next cluster. */ + if (granularity < BACKUP_CLUSTER_SIZE) { + bdrv_set_dirty_iter(&hbi, cluster * BACKUP_SECTORS_PER_CLUSTER); + } + + last_cluster = cluster - 1; + } + + /* Play some final catchup with the progress meter */ + end = DIV_ROUND_UP(job->common.len, BACKUP_CLUSTER_SIZE); + if (last_cluster + 1 < end) { + job->common.offset += ((end - last_cluster - 1) * BACKUP_CLUSTER_SIZE); + } + + return ret; +} + static void coroutine_fn backup_run(void *opaque) { BackupBlockJob *job = opaque; @@ -259,8 +346,7 @@ static void coroutine_fn backup_run(void *opaque) qemu_co_rwlock_init(&job->flush_rwlock); start = 0; - end = DIV_ROUND_UP(job->common.len / BDRV_SECTOR_SIZE, - BACKUP_SECTORS_PER_CLUSTER); + end = DIV_ROUND_UP(job->common.len, BACKUP_CLUSTER_SIZE); job->bitmap = hbitmap_alloc(end, 0); @@ -278,28 +364,13 @@ static void coroutine_fn backup_run(void *opaque) qemu_coroutine_yield(); job->common.busy = true; } + } else if (job->sync_mode == MIRROR_SYNC_MODE_DIRTY_BITMAP) { + ret = backup_run_incremental(job); } else { /* Both FULL and TOP SYNC_MODE's require copying.. */ for (; start < end; start++) { bool error_is_read; - - if (block_job_is_cancelled(&job->common)) { - break; - } - - /* we need to yield so that qemu_aio_flush() returns. - * (without, VM does not reboot) - */ - if (job->common.speed) { - uint64_t delay_ns = ratelimit_calculate_delay( - &job->limit, job->sectors_read); - job->sectors_read = 0; - block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns); - } else { - block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0); - } - - if (block_job_is_cancelled(&job->common)) { + if (yield_and_check(job)) { break; } @@ -357,6 +428,18 @@ static void coroutine_fn backup_run(void *opaque) qemu_co_rwlock_wrlock(&job->flush_rwlock); qemu_co_rwlock_unlock(&job->flush_rwlock); + if (job->sync_bitmap) { + BdrvDirtyBitmap *bm; + if (ret < 0) { + /* Merge the successor back into the parent, delete nothing. */ + bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL); + assert(bm); + } else { + /* Everything is fine, delete this bitmap and install the backup. */ + bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL); + assert(bm); + } + } hbitmap_free(job->bitmap); bdrv_iostatus_disable(target); @@ -369,6 +452,7 @@ static void coroutine_fn backup_run(void *opaque) void backup_start(BlockDriverState *bs, BlockDriverState *target, int64_t speed, MirrorSyncMode sync_mode, + BdrvDirtyBitmap *sync_bitmap, BlockdevOnError on_source_error, BlockdevOnError on_target_error, BlockCompletionFunc *cb, void *opaque, @@ -412,17 +496,36 @@ void backup_start(BlockDriverState *bs, BlockDriverState *target, return; } + if (sync_mode == MIRROR_SYNC_MODE_DIRTY_BITMAP) { + if (!sync_bitmap) { + error_setg(errp, "must provide a valid bitmap name for " + "\"dirty-bitmap\" sync mode"); + return; + } + + /* Create a new bitmap, and freeze/disable this one. */ + if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) { + return; + } + } else if (sync_bitmap) { + error_setg(errp, + "a sync_bitmap was provided to backup_run, " + "but received an incompatible sync_mode (%s)", + MirrorSyncMode_lookup[sync_mode]); + return; + } + len = bdrv_getlength(bs); if (len < 0) { error_setg_errno(errp, -len, "unable to get length for '%s'", bdrv_get_device_name(bs)); - return; + goto error; } BackupBlockJob *job = block_job_create(&backup_job_driver, bs, speed, cb, opaque, errp); if (!job) { - return; + goto error; } bdrv_op_block_all(target, job->common.blocker); @@ -431,7 +534,15 @@ void backup_start(BlockDriverState *bs, BlockDriverState *target, job->on_target_error = on_target_error; job->target = target; job->sync_mode = sync_mode; + job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_DIRTY_BITMAP ? + sync_bitmap : NULL; job->common.len = len; job->common.co = qemu_coroutine_create(backup_run); qemu_coroutine_enter(job->common.co, job); + return; + + error: + if (sync_bitmap) { + bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL); + } } diff --git a/block/blkdebug.c b/block/blkdebug.c index 63611e0a33..3c30edba73 100644 --- a/block/blkdebug.c +++ b/block/blkdebug.c @@ -721,6 +721,11 @@ static int64_t blkdebug_getlength(BlockDriverState *bs) return bdrv_getlength(bs->file); } +static int blkdebug_truncate(BlockDriverState *bs, int64_t offset) +{ + return bdrv_truncate(bs->file, offset); +} + static void blkdebug_refresh_filename(BlockDriverState *bs) { QDict *opts; @@ -779,6 +784,7 @@ static BlockDriver bdrv_blkdebug = { .bdrv_file_open = blkdebug_open, .bdrv_close = blkdebug_close, .bdrv_getlength = blkdebug_getlength, + .bdrv_truncate = blkdebug_truncate, .bdrv_refresh_filename = blkdebug_refresh_filename, .bdrv_aio_readv = blkdebug_aio_readv, diff --git a/block/block-backend.c b/block/block-backend.c index 48b6e4c05c..93e46f376a 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -515,6 +515,17 @@ int blk_write(BlockBackend *blk, int64_t sector_num, const uint8_t *buf, return bdrv_write(blk->bs, sector_num, buf, nb_sectors); } +int blk_write_zeroes(BlockBackend *blk, int64_t sector_num, + int nb_sectors, BdrvRequestFlags flags) +{ + int ret = blk_check_request(blk, sector_num, nb_sectors); + if (ret < 0) { + return ret; + } + + return bdrv_write_zeroes(blk->bs, sector_num, nb_sectors, flags); +} + static void error_callback_bh(void *opaque) { struct BlockBackendAIOCB *acb = opaque; diff --git a/block/io.c b/block/io.c new file mode 100644 index 0000000000..1ce62c4fbc --- /dev/null +++ b/block/io.c @@ -0,0 +1,2540 @@ +/* + * Block layer I/O functions + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "trace.h" +#include "sysemu/qtest.h" +#include "block/blockjob.h" +#include "block/block_int.h" + +#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ + +static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, + int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, + BlockCompletionFunc *cb, void *opaque); +static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, + int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, + BlockCompletionFunc *cb, void *opaque); +static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, + QEMUIOVector *iov); +static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, + QEMUIOVector *iov); +static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, + int64_t offset, unsigned int bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags); +static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, + int64_t offset, unsigned int bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags); +static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, + int64_t sector_num, + QEMUIOVector *qiov, + int nb_sectors, + BdrvRequestFlags flags, + BlockCompletionFunc *cb, + void *opaque, + bool is_write); +static void coroutine_fn bdrv_co_do_rw(void *opaque); +static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, BdrvRequestFlags flags); + +/* throttling disk I/O limits */ +void bdrv_set_io_limits(BlockDriverState *bs, + ThrottleConfig *cfg) +{ + int i; + + throttle_config(&bs->throttle_state, cfg); + + for (i = 0; i < 2; i++) { + qemu_co_enter_next(&bs->throttled_reqs[i]); + } +} + +/* this function drain all the throttled IOs */ +static bool bdrv_start_throttled_reqs(BlockDriverState *bs) +{ + bool drained = false; + bool enabled = bs->io_limits_enabled; + int i; + + bs->io_limits_enabled = false; + + for (i = 0; i < 2; i++) { + while (qemu_co_enter_next(&bs->throttled_reqs[i])) { + drained = true; + } + } + + bs->io_limits_enabled = enabled; + + return drained; +} + +void bdrv_io_limits_disable(BlockDriverState *bs) +{ + bs->io_limits_enabled = false; + + bdrv_start_throttled_reqs(bs); + + throttle_destroy(&bs->throttle_state); +} + +static void bdrv_throttle_read_timer_cb(void *opaque) +{ + BlockDriverState *bs = opaque; + qemu_co_enter_next(&bs->throttled_reqs[0]); +} + +static void bdrv_throttle_write_timer_cb(void *opaque) +{ + BlockDriverState *bs = opaque; + qemu_co_enter_next(&bs->throttled_reqs[1]); +} + +/* should be called before bdrv_set_io_limits if a limit is set */ +void bdrv_io_limits_enable(BlockDriverState *bs) +{ + int clock_type = QEMU_CLOCK_REALTIME; + + if (qtest_enabled()) { + /* For testing block IO throttling only */ + clock_type = QEMU_CLOCK_VIRTUAL; + } + assert(!bs->io_limits_enabled); + throttle_init(&bs->throttle_state, + bdrv_get_aio_context(bs), + clock_type, + bdrv_throttle_read_timer_cb, + bdrv_throttle_write_timer_cb, + bs); + bs->io_limits_enabled = true; +} + +/* This function makes an IO wait if needed + * + * @nb_sectors: the number of sectors of the IO + * @is_write: is the IO a write + */ +static void bdrv_io_limits_intercept(BlockDriverState *bs, + unsigned int bytes, + bool is_write) +{ + /* does this io must wait */ + bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write); + + /* if must wait or any request of this type throttled queue the IO */ + if (must_wait || + !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) { + qemu_co_queue_wait(&bs->throttled_reqs[is_write]); + } + + /* the IO will be executed, do the accounting */ + throttle_account(&bs->throttle_state, is_write, bytes); + + + /* if the next request must wait -> do nothing */ + if (throttle_schedule_timer(&bs->throttle_state, is_write)) { + return; + } + + /* else queue next request for execution */ + qemu_co_queue_next(&bs->throttled_reqs[is_write]); +} + +void bdrv_setup_io_funcs(BlockDriver *bdrv) +{ + /* Block drivers without coroutine functions need emulation */ + if (!bdrv->bdrv_co_readv) { + bdrv->bdrv_co_readv = bdrv_co_readv_em; + bdrv->bdrv_co_writev = bdrv_co_writev_em; + + /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if + * the block driver lacks aio we need to emulate that too. + */ + if (!bdrv->bdrv_aio_readv) { + /* add AIO emulation layer */ + bdrv->bdrv_aio_readv = bdrv_aio_readv_em; + bdrv->bdrv_aio_writev = bdrv_aio_writev_em; + } + } +} + +void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) +{ + BlockDriver *drv = bs->drv; + Error *local_err = NULL; + + memset(&bs->bl, 0, sizeof(bs->bl)); + + if (!drv) { + return; + } + + /* Take some limits from the children as a default */ + if (bs->file) { + bdrv_refresh_limits(bs->file, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length; + bs->bl.max_transfer_length = bs->file->bl.max_transfer_length; + bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment; + } else { + bs->bl.opt_mem_alignment = 512; + } + + if (bs->backing_hd) { + bdrv_refresh_limits(bs->backing_hd, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + bs->bl.opt_transfer_length = + MAX(bs->bl.opt_transfer_length, + bs->backing_hd->bl.opt_transfer_length); + bs->bl.max_transfer_length = + MIN_NON_ZERO(bs->bl.max_transfer_length, + bs->backing_hd->bl.max_transfer_length); + bs->bl.opt_mem_alignment = + MAX(bs->bl.opt_mem_alignment, + bs->backing_hd->bl.opt_mem_alignment); + } + + /* Then let the driver override it */ + if (drv->bdrv_refresh_limits) { + drv->bdrv_refresh_limits(bs, errp); + } +} + +/** + * The copy-on-read flag is actually a reference count so multiple users may + * use the feature without worrying about clobbering its previous state. + * Copy-on-read stays enabled until all users have called to disable it. + */ +void bdrv_enable_copy_on_read(BlockDriverState *bs) +{ + bs->copy_on_read++; +} + +void bdrv_disable_copy_on_read(BlockDriverState *bs) +{ + assert(bs->copy_on_read > 0); + bs->copy_on_read--; +} + +/* Check if any requests are in-flight (including throttled requests) */ +static bool bdrv_requests_pending(BlockDriverState *bs) +{ + if (!QLIST_EMPTY(&bs->tracked_requests)) { + return true; + } + if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) { + return true; + } + if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) { + return true; + } + if (bs->file && bdrv_requests_pending(bs->file)) { + return true; + } + if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) { + return true; + } + return false; +} + +static bool bdrv_drain_one(BlockDriverState *bs) +{ + bool bs_busy; + + bdrv_flush_io_queue(bs); + bdrv_start_throttled_reqs(bs); + bs_busy = bdrv_requests_pending(bs); + bs_busy |= aio_poll(bdrv_get_aio_context(bs), bs_busy); + return bs_busy; +} + +/* + * Wait for pending requests to complete on a single BlockDriverState subtree + * + * See the warning in bdrv_drain_all(). This function can only be called if + * you are sure nothing can generate I/O because you have op blockers + * installed. + * + * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState + * AioContext. + */ +void bdrv_drain(BlockDriverState *bs) +{ + while (bdrv_drain_one(bs)) { + /* Keep iterating */ + } +} + +/* + * Wait for pending requests to complete across all BlockDriverStates + * + * This function does not flush data to disk, use bdrv_flush_all() for that + * after calling this function. + * + * Note that completion of an asynchronous I/O operation can trigger any + * number of other I/O operations on other devices---for example a coroutine + * can be arbitrarily complex and a constant flow of I/O can come until the + * coroutine is complete. Because of this, it is not possible to have a + * function to drain a single device's I/O queue. + */ +void bdrv_drain_all(void) +{ + /* Always run first iteration so any pending completion BHs run */ + bool busy = true; + BlockDriverState *bs = NULL; + + while ((bs = bdrv_next(bs))) { + AioContext *aio_context = bdrv_get_aio_context(bs); + + aio_context_acquire(aio_context); + if (bs->job) { + block_job_pause(bs->job); + } + aio_context_release(aio_context); + } + + while (busy) { + busy = false; + bs = NULL; + + while ((bs = bdrv_next(bs))) { + AioContext *aio_context = bdrv_get_aio_context(bs); + + aio_context_acquire(aio_context); + busy |= bdrv_drain_one(bs); + aio_context_release(aio_context); + } + } + + bs = NULL; + while ((bs = bdrv_next(bs))) { + AioContext *aio_context = bdrv_get_aio_context(bs); + + aio_context_acquire(aio_context); + if (bs->job) { + block_job_resume(bs->job); + } + aio_context_release(aio_context); + } +} + +/** + * Remove an active request from the tracked requests list + * + * This function should be called when a tracked request is completing. + */ +static void tracked_request_end(BdrvTrackedRequest *req) +{ + if (req->serialising) { + req->bs->serialising_in_flight--; + } + + QLIST_REMOVE(req, list); + qemu_co_queue_restart_all(&req->wait_queue); +} + +/** + * Add an active request to the tracked requests list + */ +static void tracked_request_begin(BdrvTrackedRequest *req, + BlockDriverState *bs, + int64_t offset, + unsigned int bytes, bool is_write) +{ + *req = (BdrvTrackedRequest){ + .bs = bs, + .offset = offset, + .bytes = bytes, + .is_write = is_write, + .co = qemu_coroutine_self(), + .serialising = false, + .overlap_offset = offset, + .overlap_bytes = bytes, + }; + + qemu_co_queue_init(&req->wait_queue); + + QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); +} + +static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) +{ + int64_t overlap_offset = req->offset & ~(align - 1); + unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align) + - overlap_offset; + + if (!req->serialising) { + req->bs->serialising_in_flight++; + req->serialising = true; + } + + req->overlap_offset = MIN(req->overlap_offset, overlap_offset); + req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); +} + +/** + * Round a region to cluster boundaries + */ +void bdrv_round_to_clusters(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, + int64_t *cluster_sector_num, + int *cluster_nb_sectors) +{ + BlockDriverInfo bdi; + + if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { + *cluster_sector_num = sector_num; + *cluster_nb_sectors = nb_sectors; + } else { + int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE; + *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c); + *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num + + nb_sectors, c); + } +} + +static int bdrv_get_cluster_size(BlockDriverState *bs) +{ + BlockDriverInfo bdi; + int ret; + + ret = bdrv_get_info(bs, &bdi); + if (ret < 0 || bdi.cluster_size == 0) { + return bs->request_alignment; + } else { + return bdi.cluster_size; + } +} + +static bool tracked_request_overlaps(BdrvTrackedRequest *req, + int64_t offset, unsigned int bytes) +{ + /* aaaa bbbb */ + if (offset >= req->overlap_offset + req->overlap_bytes) { + return false; + } + /* bbbb aaaa */ + if (req->overlap_offset >= offset + bytes) { + return false; + } + return true; +} + +static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) +{ + BlockDriverState *bs = self->bs; + BdrvTrackedRequest *req; + bool retry; + bool waited = false; + + if (!bs->serialising_in_flight) { + return false; + } + + do { + retry = false; + QLIST_FOREACH(req, &bs->tracked_requests, list) { + if (req == self || (!req->serialising && !self->serialising)) { + continue; + } + if (tracked_request_overlaps(req, self->overlap_offset, + self->overlap_bytes)) + { + /* Hitting this means there was a reentrant request, for + * example, a block driver issuing nested requests. This must + * never happen since it means deadlock. + */ + assert(qemu_coroutine_self() != req->co); + + /* If the request is already (indirectly) waiting for us, or + * will wait for us as soon as it wakes up, then just go on + * (instead of producing a deadlock in the former case). */ + if (!req->waiting_for) { + self->waiting_for = req; + qemu_co_queue_wait(&req->wait_queue); + self->waiting_for = NULL; + retry = true; + waited = true; + break; + } + } + } + } while (retry); + + return waited; +} + +static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, + size_t size) +{ + if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) { + return -EIO; + } + + if (!bdrv_is_inserted(bs)) { + return -ENOMEDIUM; + } + + if (offset < 0) { + return -EIO; + } + + return 0; +} + +static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num, + int nb_sectors) +{ + if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { + return -EIO; + } + + return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE, + nb_sectors * BDRV_SECTOR_SIZE); +} + +typedef struct RwCo { + BlockDriverState *bs; + int64_t offset; + QEMUIOVector *qiov; + bool is_write; + int ret; + BdrvRequestFlags flags; +} RwCo; + +static void coroutine_fn bdrv_rw_co_entry(void *opaque) +{ + RwCo *rwco = opaque; + + if (!rwco->is_write) { + rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset, + rwco->qiov->size, rwco->qiov, + rwco->flags); + } else { + rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset, + rwco->qiov->size, rwco->qiov, + rwco->flags); + } +} + +/* + * Process a vectored synchronous request using coroutines + */ +static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset, + QEMUIOVector *qiov, bool is_write, + BdrvRequestFlags flags) +{ + Coroutine *co; + RwCo rwco = { + .bs = bs, + .offset = offset, + .qiov = qiov, + .is_write = is_write, + .ret = NOT_DONE, + .flags = flags, + }; + + /** + * In sync call context, when the vcpu is blocked, this throttling timer + * will not fire; so the I/O throttling function has to be disabled here + * if it has been enabled. + */ + if (bs->io_limits_enabled) { + fprintf(stderr, "Disabling I/O throttling on '%s' due " + "to synchronous I/O.\n", bdrv_get_device_name(bs)); + bdrv_io_limits_disable(bs); + } + + if (qemu_in_coroutine()) { + /* Fast-path if already in coroutine context */ + bdrv_rw_co_entry(&rwco); + } else { + AioContext *aio_context = bdrv_get_aio_context(bs); + + co = qemu_coroutine_create(bdrv_rw_co_entry); + qemu_coroutine_enter(co, &rwco); + while (rwco.ret == NOT_DONE) { + aio_poll(aio_context, true); + } + } + return rwco.ret; +} + +/* + * Process a synchronous request using coroutines + */ +static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, + int nb_sectors, bool is_write, BdrvRequestFlags flags) +{ + QEMUIOVector qiov; + struct iovec iov = { + .iov_base = (void *)buf, + .iov_len = nb_sectors * BDRV_SECTOR_SIZE, + }; + + if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { + return -EINVAL; + } + + qemu_iovec_init_external(&qiov, &iov, 1); + return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS, + &qiov, is_write, flags); +} + +/* return < 0 if error. See bdrv_write() for the return codes */ +int bdrv_read(BlockDriverState *bs, int64_t sector_num, + uint8_t *buf, int nb_sectors) +{ + return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0); +} + +/* Just like bdrv_read(), but with I/O throttling temporarily disabled */ +int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num, + uint8_t *buf, int nb_sectors) +{ + bool enabled; + int ret; + + enabled = bs->io_limits_enabled; + bs->io_limits_enabled = false; + ret = bdrv_read(bs, sector_num, buf, nb_sectors); + bs->io_limits_enabled = enabled; + return ret; +} + +/* Return < 0 if error. Important errors are: + -EIO generic I/O error (may happen for all errors) + -ENOMEDIUM No media inserted. + -EINVAL Invalid sector number or nb_sectors + -EACCES Trying to write a read-only device +*/ +int bdrv_write(BlockDriverState *bs, int64_t sector_num, + const uint8_t *buf, int nb_sectors) +{ + return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0); +} + +int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, BdrvRequestFlags flags) +{ + return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true, + BDRV_REQ_ZERO_WRITE | flags); +} + +/* + * Completely zero out a block device with the help of bdrv_write_zeroes. + * The operation is sped up by checking the block status and only writing + * zeroes to the device if they currently do not return zeroes. Optional + * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP). + * + * Returns < 0 on error, 0 on success. For error codes see bdrv_write(). + */ +int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags) +{ + int64_t target_sectors, ret, nb_sectors, sector_num = 0; + int n; + + target_sectors = bdrv_nb_sectors(bs); + if (target_sectors < 0) { + return target_sectors; + } + + for (;;) { + nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS); + if (nb_sectors <= 0) { + return 0; + } + ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n); + if (ret < 0) { + error_report("error getting block status at sector %" PRId64 ": %s", + sector_num, strerror(-ret)); + return ret; + } + if (ret & BDRV_BLOCK_ZERO) { + sector_num += n; + continue; + } + ret = bdrv_write_zeroes(bs, sector_num, n, flags); + if (ret < 0) { + error_report("error writing zeroes at sector %" PRId64 ": %s", + sector_num, strerror(-ret)); + return ret; + } + sector_num += n; + } +} + +int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes) +{ + QEMUIOVector qiov; + struct iovec iov = { + .iov_base = (void *)buf, + .iov_len = bytes, + }; + int ret; + + if (bytes < 0) { + return -EINVAL; + } + + qemu_iovec_init_external(&qiov, &iov, 1); + ret = bdrv_prwv_co(bs, offset, &qiov, false, 0); + if (ret < 0) { + return ret; + } + + return bytes; +} + +int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov) +{ + int ret; + + ret = bdrv_prwv_co(bs, offset, qiov, true, 0); + if (ret < 0) { + return ret; + } + + return qiov->size; +} + +int bdrv_pwrite(BlockDriverState *bs, int64_t offset, + const void *buf, int bytes) +{ + QEMUIOVector qiov; + struct iovec iov = { + .iov_base = (void *) buf, + .iov_len = bytes, + }; + + if (bytes < 0) { + return -EINVAL; + } + + qemu_iovec_init_external(&qiov, &iov, 1); + return bdrv_pwritev(bs, offset, &qiov); +} + +/* + * Writes to the file and ensures that no writes are reordered across this + * request (acts as a barrier) + * + * Returns 0 on success, -errno in error cases. + */ +int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset, + const void *buf, int count) +{ + int ret; + + ret = bdrv_pwrite(bs, offset, buf, count); + if (ret < 0) { + return ret; + } + + /* No flush needed for cache modes that already do it */ + if (bs->enable_write_cache) { + bdrv_flush(bs); + } + + return 0; +} + +static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) +{ + /* Perform I/O through a temporary buffer so that users who scribble over + * their read buffer while the operation is in progress do not end up + * modifying the image file. This is critical for zero-copy guest I/O + * where anything might happen inside guest memory. + */ + void *bounce_buffer; + + BlockDriver *drv = bs->drv; + struct iovec iov; + QEMUIOVector bounce_qiov; + int64_t cluster_sector_num; + int cluster_nb_sectors; + size_t skip_bytes; + int ret; + + /* Cover entire cluster so no additional backing file I/O is required when + * allocating cluster in the image file. + */ + bdrv_round_to_clusters(bs, sector_num, nb_sectors, + &cluster_sector_num, &cluster_nb_sectors); + + trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, + cluster_sector_num, cluster_nb_sectors); + + iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE; + iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len); + if (bounce_buffer == NULL) { + ret = -ENOMEM; + goto err; + } + + qemu_iovec_init_external(&bounce_qiov, &iov, 1); + + ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors, + &bounce_qiov); + if (ret < 0) { + goto err; + } + + if (drv->bdrv_co_write_zeroes && + buffer_is_zero(bounce_buffer, iov.iov_len)) { + ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num, + cluster_nb_sectors, 0); + } else { + /* This does not change the data on the disk, it is not necessary + * to flush even in cache=writethrough mode. + */ + ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors, + &bounce_qiov); + } + + if (ret < 0) { + /* It might be okay to ignore write errors for guest requests. If this + * is a deliberate copy-on-read then we don't want to ignore the error. + * Simply report it in all cases. + */ + goto err; + } + + skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE; + qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, + nb_sectors * BDRV_SECTOR_SIZE); + +err: + qemu_vfree(bounce_buffer); + return ret; +} + +/* + * Forwards an already correctly aligned request to the BlockDriver. This + * handles copy on read and zeroing after EOF; any other features must be + * implemented by the caller. + */ +static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs, + BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, + int64_t align, QEMUIOVector *qiov, int flags) +{ + BlockDriver *drv = bs->drv; + int ret; + + int64_t sector_num = offset >> BDRV_SECTOR_BITS; + unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; + + assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); + assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); + assert(!qiov || bytes == qiov->size); + + /* Handle Copy on Read and associated serialisation */ + if (flags & BDRV_REQ_COPY_ON_READ) { + /* If we touch the same cluster it counts as an overlap. This + * guarantees that allocating writes will be serialized and not race + * with each other for the same cluster. For example, in copy-on-read + * it ensures that the CoR read and write operations are atomic and + * guest writes cannot interleave between them. */ + mark_request_serialising(req, bdrv_get_cluster_size(bs)); + } + + wait_serialising_requests(req); + + if (flags & BDRV_REQ_COPY_ON_READ) { + int pnum; + + ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum); + if (ret < 0) { + goto out; + } + + if (!ret || pnum != nb_sectors) { + ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov); + goto out; + } + } + + /* Forward the request to the BlockDriver */ + if (!bs->zero_beyond_eof) { + ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); + } else { + /* Read zeros after EOF */ + int64_t total_sectors, max_nb_sectors; + + total_sectors = bdrv_nb_sectors(bs); + if (total_sectors < 0) { + ret = total_sectors; + goto out; + } + + max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num), + align >> BDRV_SECTOR_BITS); + if (nb_sectors < max_nb_sectors) { + ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); + } else if (max_nb_sectors > 0) { + QEMUIOVector local_qiov; + + qemu_iovec_init(&local_qiov, qiov->niov); + qemu_iovec_concat(&local_qiov, qiov, 0, + max_nb_sectors * BDRV_SECTOR_SIZE); + + ret = drv->bdrv_co_readv(bs, sector_num, max_nb_sectors, + &local_qiov); + + qemu_iovec_destroy(&local_qiov); + } else { + ret = 0; + } + + /* Reading beyond end of file is supposed to produce zeroes */ + if (ret == 0 && total_sectors < sector_num + nb_sectors) { + uint64_t offset = MAX(0, total_sectors - sector_num); + uint64_t bytes = (sector_num + nb_sectors - offset) * + BDRV_SECTOR_SIZE; + qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes); + } + } + +out: + return ret; +} + +static inline uint64_t bdrv_get_align(BlockDriverState *bs) +{ + /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ + return MAX(BDRV_SECTOR_SIZE, bs->request_alignment); +} + +static inline bool bdrv_req_is_aligned(BlockDriverState *bs, + int64_t offset, size_t bytes) +{ + int64_t align = bdrv_get_align(bs); + return !(offset & (align - 1) || (bytes & (align - 1))); +} + +/* + * Handle a read request in coroutine context + */ +static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, + int64_t offset, unsigned int bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) +{ + BlockDriver *drv = bs->drv; + BdrvTrackedRequest req; + + uint64_t align = bdrv_get_align(bs); + uint8_t *head_buf = NULL; + uint8_t *tail_buf = NULL; + QEMUIOVector local_qiov; + bool use_local_qiov = false; + int ret; + + if (!drv) { + return -ENOMEDIUM; + } + + ret = bdrv_check_byte_request(bs, offset, bytes); + if (ret < 0) { + return ret; + } + + if (bs->copy_on_read) { + flags |= BDRV_REQ_COPY_ON_READ; + } + + /* throttling disk I/O */ + if (bs->io_limits_enabled) { + bdrv_io_limits_intercept(bs, bytes, false); + } + + /* Align read if necessary by padding qiov */ + if (offset & (align - 1)) { + head_buf = qemu_blockalign(bs, align); + qemu_iovec_init(&local_qiov, qiov->niov + 2); + qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); + qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); + use_local_qiov = true; + + bytes += offset & (align - 1); + offset = offset & ~(align - 1); + } + + if ((offset + bytes) & (align - 1)) { + if (!use_local_qiov) { + qemu_iovec_init(&local_qiov, qiov->niov + 1); + qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); + use_local_qiov = true; + } + tail_buf = qemu_blockalign(bs, align); + qemu_iovec_add(&local_qiov, tail_buf, + align - ((offset + bytes) & (align - 1))); + + bytes = ROUND_UP(bytes, align); + } + + tracked_request_begin(&req, bs, offset, bytes, false); + ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align, + use_local_qiov ? &local_qiov : qiov, + flags); + tracked_request_end(&req); + + if (use_local_qiov) { + qemu_iovec_destroy(&local_qiov); + qemu_vfree(head_buf); + qemu_vfree(tail_buf); + } + + return ret; +} + +static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, + BdrvRequestFlags flags) +{ + if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { + return -EINVAL; + } + + return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS, + nb_sectors << BDRV_SECTOR_BITS, qiov, flags); +} + +int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov) +{ + trace_bdrv_co_readv(bs, sector_num, nb_sectors); + + return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0); +} + +int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) +{ + trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors); + + return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, + BDRV_REQ_COPY_ON_READ); +} + +#define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768 + +static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) +{ + BlockDriver *drv = bs->drv; + QEMUIOVector qiov; + struct iovec iov = {0}; + int ret = 0; + + int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_write_zeroes, + BDRV_REQUEST_MAX_SECTORS); + + while (nb_sectors > 0 && !ret) { + int num = nb_sectors; + + /* Align request. Block drivers can expect the "bulk" of the request + * to be aligned. + */ + if (bs->bl.write_zeroes_alignment + && num > bs->bl.write_zeroes_alignment) { + if (sector_num % bs->bl.write_zeroes_alignment != 0) { + /* Make a small request up to the first aligned sector. */ + num = bs->bl.write_zeroes_alignment; + num -= sector_num % bs->bl.write_zeroes_alignment; + } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) { + /* Shorten the request to the last aligned sector. num cannot + * underflow because num > bs->bl.write_zeroes_alignment. + */ + num -= (sector_num + num) % bs->bl.write_zeroes_alignment; + } + } + + /* limit request size */ + if (num > max_write_zeroes) { + num = max_write_zeroes; + } + + ret = -ENOTSUP; + /* First try the efficient write zeroes operation */ + if (drv->bdrv_co_write_zeroes) { + ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags); + } + + if (ret == -ENOTSUP) { + /* Fall back to bounce buffer if write zeroes is unsupported */ + int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length, + MAX_WRITE_ZEROES_BOUNCE_BUFFER); + num = MIN(num, max_xfer_len); + iov.iov_len = num * BDRV_SECTOR_SIZE; + if (iov.iov_base == NULL) { + iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE); + if (iov.iov_base == NULL) { + ret = -ENOMEM; + goto fail; + } + memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE); + } + qemu_iovec_init_external(&qiov, &iov, 1); + + ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov); + + /* Keep bounce buffer around if it is big enough for all + * all future requests. + */ + if (num < max_xfer_len) { + qemu_vfree(iov.iov_base); + iov.iov_base = NULL; + } + } + + sector_num += num; + nb_sectors -= num; + } + +fail: + qemu_vfree(iov.iov_base); + return ret; +} + +/* + * Forwards an already correctly aligned write request to the BlockDriver. + */ +static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs, + BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, + QEMUIOVector *qiov, int flags) +{ + BlockDriver *drv = bs->drv; + bool waited; + int ret; + + int64_t sector_num = offset >> BDRV_SECTOR_BITS; + unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; + + assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); + assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); + assert(!qiov || bytes == qiov->size); + + waited = wait_serialising_requests(req); + assert(!waited || !req->serialising); + assert(req->overlap_offset <= offset); + assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); + + ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req); + + if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && + !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes && + qemu_iovec_is_zero(qiov)) { + flags |= BDRV_REQ_ZERO_WRITE; + if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { + flags |= BDRV_REQ_MAY_UNMAP; + } + } + + if (ret < 0) { + /* Do nothing, write notifier decided to fail this request */ + } else if (flags & BDRV_REQ_ZERO_WRITE) { + BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO); + ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags); + } else { + BLKDBG_EVENT(bs, BLKDBG_PWRITEV); + ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov); + } + BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE); + + if (ret == 0 && !bs->enable_write_cache) { + ret = bdrv_co_flush(bs); + } + + bdrv_set_dirty(bs, sector_num, nb_sectors); + + block_acct_highest_sector(&bs->stats, sector_num, nb_sectors); + + if (ret >= 0) { + bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors); + } + + return ret; +} + +/* + * Handle a write request in coroutine context + */ +static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, + int64_t offset, unsigned int bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) +{ + BdrvTrackedRequest req; + uint64_t align = bdrv_get_align(bs); + uint8_t *head_buf = NULL; + uint8_t *tail_buf = NULL; + QEMUIOVector local_qiov; + bool use_local_qiov = false; + int ret; + + if (!bs->drv) { + return -ENOMEDIUM; + } + if (bs->read_only) { + return -EACCES; + } + + ret = bdrv_check_byte_request(bs, offset, bytes); + if (ret < 0) { + return ret; + } + + /* throttling disk I/O */ + if (bs->io_limits_enabled) { + bdrv_io_limits_intercept(bs, bytes, true); + } + + /* + * Align write if necessary by performing a read-modify-write cycle. + * Pad qiov with the read parts and be sure to have a tracked request not + * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. + */ + tracked_request_begin(&req, bs, offset, bytes, true); + + if (offset & (align - 1)) { + QEMUIOVector head_qiov; + struct iovec head_iov; + + mark_request_serialising(&req, align); + wait_serialising_requests(&req); + + head_buf = qemu_blockalign(bs, align); + head_iov = (struct iovec) { + .iov_base = head_buf, + .iov_len = align, + }; + qemu_iovec_init_external(&head_qiov, &head_iov, 1); + + BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD); + ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align, + align, &head_qiov, 0); + if (ret < 0) { + goto fail; + } + BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); + + qemu_iovec_init(&local_qiov, qiov->niov + 2); + qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); + qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); + use_local_qiov = true; + + bytes += offset & (align - 1); + offset = offset & ~(align - 1); + } + + if ((offset + bytes) & (align - 1)) { + QEMUIOVector tail_qiov; + struct iovec tail_iov; + size_t tail_bytes; + bool waited; + + mark_request_serialising(&req, align); + waited = wait_serialising_requests(&req); + assert(!waited || !use_local_qiov); + + tail_buf = qemu_blockalign(bs, align); + tail_iov = (struct iovec) { + .iov_base = tail_buf, + .iov_len = align, + }; + qemu_iovec_init_external(&tail_qiov, &tail_iov, 1); + + BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL); + ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align, + align, &tail_qiov, 0); + if (ret < 0) { + goto fail; + } + BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); + + if (!use_local_qiov) { + qemu_iovec_init(&local_qiov, qiov->niov + 1); + qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); + use_local_qiov = true; + } + + tail_bytes = (offset + bytes) & (align - 1); + qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes); + + bytes = ROUND_UP(bytes, align); + } + + if (use_local_qiov) { + /* Local buffer may have non-zero data. */ + flags &= ~BDRV_REQ_ZERO_WRITE; + } + ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, + use_local_qiov ? &local_qiov : qiov, + flags); + +fail: + tracked_request_end(&req); + + if (use_local_qiov) { + qemu_iovec_destroy(&local_qiov); + } + qemu_vfree(head_buf); + qemu_vfree(tail_buf); + + return ret; +} + +static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, + BdrvRequestFlags flags) +{ + if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { + return -EINVAL; + } + + return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS, + nb_sectors << BDRV_SECTOR_BITS, qiov, flags); +} + +int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov) +{ + trace_bdrv_co_writev(bs, sector_num, nb_sectors); + + return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0); +} + +int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, + BdrvRequestFlags flags) +{ + int ret; + + trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags); + + if (!(bs->open_flags & BDRV_O_UNMAP)) { + flags &= ~BDRV_REQ_MAY_UNMAP; + } + if (bdrv_req_is_aligned(bs, sector_num << BDRV_SECTOR_BITS, + nb_sectors << BDRV_SECTOR_BITS)) { + ret = bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL, + BDRV_REQ_ZERO_WRITE | flags); + } else { + uint8_t *buf; + QEMUIOVector local_qiov; + size_t bytes = nb_sectors << BDRV_SECTOR_BITS; + + buf = qemu_memalign(bdrv_opt_mem_align(bs), bytes); + memset(buf, 0, bytes); + qemu_iovec_init(&local_qiov, 1); + qemu_iovec_add(&local_qiov, buf, bytes); + + ret = bdrv_co_do_writev(bs, sector_num, nb_sectors, &local_qiov, + BDRV_REQ_ZERO_WRITE | flags); + qemu_vfree(buf); + } + return ret; +} + +int bdrv_flush_all(void) +{ + BlockDriverState *bs = NULL; + int result = 0; + + while ((bs = bdrv_next(bs))) { + AioContext *aio_context = bdrv_get_aio_context(bs); + int ret; + + aio_context_acquire(aio_context); + ret = bdrv_flush(bs); + if (ret < 0 && !result) { + result = ret; + } + aio_context_release(aio_context); + } + + return result; +} + +typedef struct BdrvCoGetBlockStatusData { + BlockDriverState *bs; + BlockDriverState *base; + int64_t sector_num; + int nb_sectors; + int *pnum; + int64_t ret; + bool done; +} BdrvCoGetBlockStatusData; + +/* + * Returns the allocation status of the specified sectors. + * Drivers not implementing the functionality are assumed to not support + * backing files, hence all their sectors are reported as allocated. + * + * If 'sector_num' is beyond the end of the disk image the return value is 0 + * and 'pnum' is set to 0. + * + * 'pnum' is set to the number of sectors (including and immediately following + * the specified sector) that are known to be in the same + * allocated/unallocated state. + * + * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes + * beyond the end of the disk image it will be clamped. + */ +static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs, + int64_t sector_num, + int nb_sectors, int *pnum) +{ + int64_t total_sectors; + int64_t n; + int64_t ret, ret2; + + total_sectors = bdrv_nb_sectors(bs); + if (total_sectors < 0) { + return total_sectors; + } + + if (sector_num >= total_sectors) { + *pnum = 0; + return 0; + } + + n = total_sectors - sector_num; + if (n < nb_sectors) { + nb_sectors = n; + } + + if (!bs->drv->bdrv_co_get_block_status) { + *pnum = nb_sectors; + ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; + if (bs->drv->protocol_name) { + ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE); + } + return ret; + } + + ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum); + if (ret < 0) { + *pnum = 0; + return ret; + } + + if (ret & BDRV_BLOCK_RAW) { + assert(ret & BDRV_BLOCK_OFFSET_VALID); + return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS, + *pnum, pnum); + } + + if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { + ret |= BDRV_BLOCK_ALLOCATED; + } + + if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) { + if (bdrv_unallocated_blocks_are_zero(bs)) { + ret |= BDRV_BLOCK_ZERO; + } else if (bs->backing_hd) { + BlockDriverState *bs2 = bs->backing_hd; + int64_t nb_sectors2 = bdrv_nb_sectors(bs2); + if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) { + ret |= BDRV_BLOCK_ZERO; + } + } + } + + if (bs->file && + (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && + (ret & BDRV_BLOCK_OFFSET_VALID)) { + int file_pnum; + + ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS, + *pnum, &file_pnum); + if (ret2 >= 0) { + /* Ignore errors. This is just providing extra information, it + * is useful but not necessary. + */ + if (!file_pnum) { + /* !file_pnum indicates an offset at or beyond the EOF; it is + * perfectly valid for the format block driver to point to such + * offsets, so catch it and mark everything as zero */ + ret |= BDRV_BLOCK_ZERO; + } else { + /* Limit request to the range reported by the protocol driver */ + *pnum = file_pnum; + ret |= (ret2 & BDRV_BLOCK_ZERO); + } + } + } + + return ret; +} + +/* Coroutine wrapper for bdrv_get_block_status() */ +static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque) +{ + BdrvCoGetBlockStatusData *data = opaque; + BlockDriverState *bs = data->bs; + + data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors, + data->pnum); + data->done = true; +} + +/* + * Synchronous wrapper around bdrv_co_get_block_status(). + * + * See bdrv_co_get_block_status() for details. + */ +int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, int *pnum) +{ + Coroutine *co; + BdrvCoGetBlockStatusData data = { + .bs = bs, + .sector_num = sector_num, + .nb_sectors = nb_sectors, + .pnum = pnum, + .done = false, + }; + + if (qemu_in_coroutine()) { + /* Fast-path if already in coroutine context */ + bdrv_get_block_status_co_entry(&data); + } else { + AioContext *aio_context = bdrv_get_aio_context(bs); + + co = qemu_coroutine_create(bdrv_get_block_status_co_entry); + qemu_coroutine_enter(co, &data); + while (!data.done) { + aio_poll(aio_context, true); + } + } + return data.ret; +} + +int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, int *pnum) +{ + int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum); + if (ret < 0) { + return ret; + } + return !!(ret & BDRV_BLOCK_ALLOCATED); +} + +/* + * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] + * + * Return true if the given sector is allocated in any image between + * BASE and TOP (inclusive). BASE can be NULL to check if the given + * sector is allocated in any image of the chain. Return false otherwise. + * + * 'pnum' is set to the number of sectors (including and immediately following + * the specified sector) that are known to be in the same + * allocated/unallocated state. + * + */ +int bdrv_is_allocated_above(BlockDriverState *top, + BlockDriverState *base, + int64_t sector_num, + int nb_sectors, int *pnum) +{ + BlockDriverState *intermediate; + int ret, n = nb_sectors; + + intermediate = top; + while (intermediate && intermediate != base) { + int pnum_inter; + ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors, + &pnum_inter); + if (ret < 0) { + return ret; + } else if (ret) { + *pnum = pnum_inter; + return 1; + } + + /* + * [sector_num, nb_sectors] is unallocated on top but intermediate + * might have + * + * [sector_num+x, nr_sectors] allocated. + */ + if (n > pnum_inter && + (intermediate == top || + sector_num + pnum_inter < intermediate->total_sectors)) { + n = pnum_inter; + } + + intermediate = intermediate->backing_hd; + } + + *pnum = n; + return 0; +} + +int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, + const uint8_t *buf, int nb_sectors) +{ + BlockDriver *drv = bs->drv; + int ret; + + if (!drv) { + return -ENOMEDIUM; + } + if (!drv->bdrv_write_compressed) { + return -ENOTSUP; + } + ret = bdrv_check_request(bs, sector_num, nb_sectors); + if (ret < 0) { + return ret; + } + + assert(QLIST_EMPTY(&bs->dirty_bitmaps)); + + return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors); +} + +int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, + int64_t pos, int size) +{ + QEMUIOVector qiov; + struct iovec iov = { + .iov_base = (void *) buf, + .iov_len = size, + }; + + qemu_iovec_init_external(&qiov, &iov, 1); + return bdrv_writev_vmstate(bs, &qiov, pos); +} + +int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) +{ + BlockDriver *drv = bs->drv; + + if (!drv) { + return -ENOMEDIUM; + } else if (drv->bdrv_save_vmstate) { + return drv->bdrv_save_vmstate(bs, qiov, pos); + } else if (bs->file) { + return bdrv_writev_vmstate(bs->file, qiov, pos); + } + + return -ENOTSUP; +} + +int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, + int64_t pos, int size) +{ + BlockDriver *drv = bs->drv; + if (!drv) + return -ENOMEDIUM; + if (drv->bdrv_load_vmstate) + return drv->bdrv_load_vmstate(bs, buf, pos, size); + if (bs->file) + return bdrv_load_vmstate(bs->file, buf, pos, size); + return -ENOTSUP; +} + +/**************************************************************/ +/* async I/Os */ + +BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, + QEMUIOVector *qiov, int nb_sectors, + BlockCompletionFunc *cb, void *opaque) +{ + trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque); + + return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, + cb, opaque, false); +} + +BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, + QEMUIOVector *qiov, int nb_sectors, + BlockCompletionFunc *cb, void *opaque) +{ + trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque); + + return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0, + cb, opaque, true); +} + +BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, BdrvRequestFlags flags, + BlockCompletionFunc *cb, void *opaque) +{ + trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque); + + return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors, + BDRV_REQ_ZERO_WRITE | flags, + cb, opaque, true); +} + + +typedef struct MultiwriteCB { + int error; + int num_requests; + int num_callbacks; + struct { + BlockCompletionFunc *cb; + void *opaque; + QEMUIOVector *free_qiov; + } callbacks[]; +} MultiwriteCB; + +static void multiwrite_user_cb(MultiwriteCB *mcb) +{ + int i; + + for (i = 0; i < mcb->num_callbacks; i++) { + mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error); + if (mcb->callbacks[i].free_qiov) { + qemu_iovec_destroy(mcb->callbacks[i].free_qiov); + } + g_free(mcb->callbacks[i].free_qiov); + } +} + +static void multiwrite_cb(void *opaque, int ret) +{ + MultiwriteCB *mcb = opaque; + + trace_multiwrite_cb(mcb, ret); + + if (ret < 0 && !mcb->error) { + mcb->error = ret; + } + + mcb->num_requests--; + if (mcb->num_requests == 0) { + multiwrite_user_cb(mcb); + g_free(mcb); + } +} + +static int multiwrite_req_compare(const void *a, const void *b) +{ + const BlockRequest *req1 = a, *req2 = b; + + /* + * Note that we can't simply subtract req2->sector from req1->sector + * here as that could overflow the return value. + */ + if (req1->sector > req2->sector) { + return 1; + } else if (req1->sector < req2->sector) { + return -1; + } else { + return 0; + } +} + +/* + * Takes a bunch of requests and tries to merge them. Returns the number of + * requests that remain after merging. + */ +static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs, + int num_reqs, MultiwriteCB *mcb) +{ + int i, outidx; + + // Sort requests by start sector + qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare); + + // Check if adjacent requests touch the same clusters. If so, combine them, + // filling up gaps with zero sectors. + outidx = 0; + for (i = 1; i < num_reqs; i++) { + int merge = 0; + int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors; + + // Handle exactly sequential writes and overlapping writes. + if (reqs[i].sector <= oldreq_last) { + merge = 1; + } + + if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) { + merge = 0; + } + + if (bs->bl.max_transfer_length && reqs[outidx].nb_sectors + + reqs[i].nb_sectors > bs->bl.max_transfer_length) { + merge = 0; + } + + if (merge) { + size_t size; + QEMUIOVector *qiov = g_malloc0(sizeof(*qiov)); + qemu_iovec_init(qiov, + reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1); + + // Add the first request to the merged one. If the requests are + // overlapping, drop the last sectors of the first request. + size = (reqs[i].sector - reqs[outidx].sector) << 9; + qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size); + + // We should need to add any zeros between the two requests + assert (reqs[i].sector <= oldreq_last); + + // Add the second request + qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size); + + // Add tail of first request, if necessary + if (qiov->size < reqs[outidx].qiov->size) { + qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size, + reqs[outidx].qiov->size - qiov->size); + } + + reqs[outidx].nb_sectors = qiov->size >> 9; + reqs[outidx].qiov = qiov; + + mcb->callbacks[i].free_qiov = reqs[outidx].qiov; + } else { + outidx++; + reqs[outidx].sector = reqs[i].sector; + reqs[outidx].nb_sectors = reqs[i].nb_sectors; + reqs[outidx].qiov = reqs[i].qiov; + } + } + + block_acct_merge_done(&bs->stats, BLOCK_ACCT_WRITE, num_reqs - outidx - 1); + + return outidx + 1; +} + +/* + * Submit multiple AIO write requests at once. + * + * On success, the function returns 0 and all requests in the reqs array have + * been submitted. In error case this function returns -1, and any of the + * requests may or may not be submitted yet. In particular, this means that the + * callback will be called for some of the requests, for others it won't. The + * caller must check the error field of the BlockRequest to wait for the right + * callbacks (if error != 0, no callback will be called). + * + * The implementation may modify the contents of the reqs array, e.g. to merge + * requests. However, the fields opaque and error are left unmodified as they + * are used to signal failure for a single request to the caller. + */ +int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs) +{ + MultiwriteCB *mcb; + int i; + + /* don't submit writes if we don't have a medium */ + if (bs->drv == NULL) { + for (i = 0; i < num_reqs; i++) { + reqs[i].error = -ENOMEDIUM; + } + return -1; + } + + if (num_reqs == 0) { + return 0; + } + + // Create MultiwriteCB structure + mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks)); + mcb->num_requests = 0; + mcb->num_callbacks = num_reqs; + + for (i = 0; i < num_reqs; i++) { + mcb->callbacks[i].cb = reqs[i].cb; + mcb->callbacks[i].opaque = reqs[i].opaque; + } + + // Check for mergable requests + num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb); + + trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs); + + /* Run the aio requests. */ + mcb->num_requests = num_reqs; + for (i = 0; i < num_reqs; i++) { + bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov, + reqs[i].nb_sectors, reqs[i].flags, + multiwrite_cb, mcb, + true); + } + + return 0; +} + +void bdrv_aio_cancel(BlockAIOCB *acb) +{ + qemu_aio_ref(acb); + bdrv_aio_cancel_async(acb); + while (acb->refcnt > 1) { + if (acb->aiocb_info->get_aio_context) { + aio_poll(acb->aiocb_info->get_aio_context(acb), true); + } else if (acb->bs) { + aio_poll(bdrv_get_aio_context(acb->bs), true); + } else { + abort(); + } + } + qemu_aio_unref(acb); +} + +/* Async version of aio cancel. The caller is not blocked if the acb implements + * cancel_async, otherwise we do nothing and let the request normally complete. + * In either case the completion callback must be called. */ +void bdrv_aio_cancel_async(BlockAIOCB *acb) +{ + if (acb->aiocb_info->cancel_async) { + acb->aiocb_info->cancel_async(acb); + } +} + +/**************************************************************/ +/* async block device emulation */ + +typedef struct BlockAIOCBSync { + BlockAIOCB common; + QEMUBH *bh; + int ret; + /* vector translation state */ + QEMUIOVector *qiov; + uint8_t *bounce; + int is_write; +} BlockAIOCBSync; + +static const AIOCBInfo bdrv_em_aiocb_info = { + .aiocb_size = sizeof(BlockAIOCBSync), +}; + +static void bdrv_aio_bh_cb(void *opaque) +{ + BlockAIOCBSync *acb = opaque; + + if (!acb->is_write && acb->ret >= 0) { + qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); + } + qemu_vfree(acb->bounce); + acb->common.cb(acb->common.opaque, acb->ret); + qemu_bh_delete(acb->bh); + acb->bh = NULL; + qemu_aio_unref(acb); +} + +static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs, + int64_t sector_num, + QEMUIOVector *qiov, + int nb_sectors, + BlockCompletionFunc *cb, + void *opaque, + int is_write) + +{ + BlockAIOCBSync *acb; + + acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque); + acb->is_write = is_write; + acb->qiov = qiov; + acb->bounce = qemu_try_blockalign(bs, qiov->size); + acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb); + + if (acb->bounce == NULL) { + acb->ret = -ENOMEM; + } else if (is_write) { + qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size); + acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors); + } else { + acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors); + } + + qemu_bh_schedule(acb->bh); + + return &acb->common; +} + +static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, + int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, + BlockCompletionFunc *cb, void *opaque) +{ + return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); +} + +static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs, + int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, + BlockCompletionFunc *cb, void *opaque) +{ + return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1); +} + + +typedef struct BlockAIOCBCoroutine { + BlockAIOCB common; + BlockRequest req; + bool is_write; + bool need_bh; + bool *done; + QEMUBH* bh; +} BlockAIOCBCoroutine; + +static const AIOCBInfo bdrv_em_co_aiocb_info = { + .aiocb_size = sizeof(BlockAIOCBCoroutine), +}; + +static void bdrv_co_complete(BlockAIOCBCoroutine *acb) +{ + if (!acb->need_bh) { + acb->common.cb(acb->common.opaque, acb->req.error); + qemu_aio_unref(acb); + } +} + +static void bdrv_co_em_bh(void *opaque) +{ + BlockAIOCBCoroutine *acb = opaque; + + assert(!acb->need_bh); + qemu_bh_delete(acb->bh); + bdrv_co_complete(acb); +} + +static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb) +{ + acb->need_bh = false; + if (acb->req.error != -EINPROGRESS) { + BlockDriverState *bs = acb->common.bs; + + acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb); + qemu_bh_schedule(acb->bh); + } +} + +/* Invoke bdrv_co_do_readv/bdrv_co_do_writev */ +static void coroutine_fn bdrv_co_do_rw(void *opaque) +{ + BlockAIOCBCoroutine *acb = opaque; + BlockDriverState *bs = acb->common.bs; + + if (!acb->is_write) { + acb->req.error = bdrv_co_do_readv(bs, acb->req.sector, + acb->req.nb_sectors, acb->req.qiov, acb->req.flags); + } else { + acb->req.error = bdrv_co_do_writev(bs, acb->req.sector, + acb->req.nb_sectors, acb->req.qiov, acb->req.flags); + } + + bdrv_co_complete(acb); +} + +static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs, + int64_t sector_num, + QEMUIOVector *qiov, + int nb_sectors, + BdrvRequestFlags flags, + BlockCompletionFunc *cb, + void *opaque, + bool is_write) +{ + Coroutine *co; + BlockAIOCBCoroutine *acb; + + acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); + acb->need_bh = true; + acb->req.error = -EINPROGRESS; + acb->req.sector = sector_num; + acb->req.nb_sectors = nb_sectors; + acb->req.qiov = qiov; + acb->req.flags = flags; + acb->is_write = is_write; + + co = qemu_coroutine_create(bdrv_co_do_rw); + qemu_coroutine_enter(co, acb); + + bdrv_co_maybe_schedule_bh(acb); + return &acb->common; +} + +static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque) +{ + BlockAIOCBCoroutine *acb = opaque; + BlockDriverState *bs = acb->common.bs; + + acb->req.error = bdrv_co_flush(bs); + bdrv_co_complete(acb); +} + +BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs, + BlockCompletionFunc *cb, void *opaque) +{ + trace_bdrv_aio_flush(bs, opaque); + + Coroutine *co; + BlockAIOCBCoroutine *acb; + + acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); + acb->need_bh = true; + acb->req.error = -EINPROGRESS; + + co = qemu_coroutine_create(bdrv_aio_flush_co_entry); + qemu_coroutine_enter(co, acb); + + bdrv_co_maybe_schedule_bh(acb); + return &acb->common; +} + +static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque) +{ + BlockAIOCBCoroutine *acb = opaque; + BlockDriverState *bs = acb->common.bs; + + acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors); + bdrv_co_complete(acb); +} + +BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, + BlockCompletionFunc *cb, void *opaque) +{ + Coroutine *co; + BlockAIOCBCoroutine *acb; + + trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque); + + acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); + acb->need_bh = true; + acb->req.error = -EINPROGRESS; + acb->req.sector = sector_num; + acb->req.nb_sectors = nb_sectors; + co = qemu_coroutine_create(bdrv_aio_discard_co_entry); + qemu_coroutine_enter(co, acb); + + bdrv_co_maybe_schedule_bh(acb); + return &acb->common; +} + +void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, + BlockCompletionFunc *cb, void *opaque) +{ + BlockAIOCB *acb; + + acb = g_slice_alloc(aiocb_info->aiocb_size); + acb->aiocb_info = aiocb_info; + acb->bs = bs; + acb->cb = cb; + acb->opaque = opaque; + acb->refcnt = 1; + return acb; +} + +void qemu_aio_ref(void *p) +{ + BlockAIOCB *acb = p; + acb->refcnt++; +} + +void qemu_aio_unref(void *p) +{ + BlockAIOCB *acb = p; + assert(acb->refcnt > 0); + if (--acb->refcnt == 0) { + g_slice_free1(acb->aiocb_info->aiocb_size, acb); + } +} + +/**************************************************************/ +/* Coroutine block device emulation */ + +typedef struct CoroutineIOCompletion { + Coroutine *coroutine; + int ret; +} CoroutineIOCompletion; + +static void bdrv_co_io_em_complete(void *opaque, int ret) +{ + CoroutineIOCompletion *co = opaque; + + co->ret = ret; + qemu_coroutine_enter(co->coroutine, NULL); +} + +static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *iov, + bool is_write) +{ + CoroutineIOCompletion co = { + .coroutine = qemu_coroutine_self(), + }; + BlockAIOCB *acb; + + if (is_write) { + acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors, + bdrv_co_io_em_complete, &co); + } else { + acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors, + bdrv_co_io_em_complete, &co); + } + + trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb); + if (!acb) { + return -EIO; + } + qemu_coroutine_yield(); + + return co.ret; +} + +static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, + QEMUIOVector *iov) +{ + return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false); +} + +static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, + QEMUIOVector *iov) +{ + return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true); +} + +static void coroutine_fn bdrv_flush_co_entry(void *opaque) +{ + RwCo *rwco = opaque; + + rwco->ret = bdrv_co_flush(rwco->bs); +} + +int coroutine_fn bdrv_co_flush(BlockDriverState *bs) +{ + int ret; + + if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) { + return 0; + } + + /* Write back cached data to the OS even with cache=unsafe */ + BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS); + if (bs->drv->bdrv_co_flush_to_os) { + ret = bs->drv->bdrv_co_flush_to_os(bs); + if (ret < 0) { + return ret; + } + } + + /* But don't actually force it to the disk with cache=unsafe */ + if (bs->open_flags & BDRV_O_NO_FLUSH) { + goto flush_parent; + } + + BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK); + if (bs->drv->bdrv_co_flush_to_disk) { + ret = bs->drv->bdrv_co_flush_to_disk(bs); + } else if (bs->drv->bdrv_aio_flush) { + BlockAIOCB *acb; + CoroutineIOCompletion co = { + .coroutine = qemu_coroutine_self(), + }; + + acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); + if (acb == NULL) { + ret = -EIO; + } else { + qemu_coroutine_yield(); + ret = co.ret; + } + } else { + /* + * Some block drivers always operate in either writethrough or unsafe + * mode and don't support bdrv_flush therefore. Usually qemu doesn't + * know how the server works (because the behaviour is hardcoded or + * depends on server-side configuration), so we can't ensure that + * everything is safe on disk. Returning an error doesn't work because + * that would break guests even if the server operates in writethrough + * mode. + * + * Let's hope the user knows what he's doing. + */ + ret = 0; + } + if (ret < 0) { + return ret; + } + + /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH + * in the case of cache=unsafe, so there are no useless flushes. + */ +flush_parent: + return bdrv_co_flush(bs->file); +} + +int bdrv_flush(BlockDriverState *bs) +{ + Coroutine *co; + RwCo rwco = { + .bs = bs, + .ret = NOT_DONE, + }; + + if (qemu_in_coroutine()) { + /* Fast-path if already in coroutine context */ + bdrv_flush_co_entry(&rwco); + } else { + AioContext *aio_context = bdrv_get_aio_context(bs); + + co = qemu_coroutine_create(bdrv_flush_co_entry); + qemu_coroutine_enter(co, &rwco); + while (rwco.ret == NOT_DONE) { + aio_poll(aio_context, true); + } + } + + return rwco.ret; +} + +typedef struct DiscardCo { + BlockDriverState *bs; + int64_t sector_num; + int nb_sectors; + int ret; +} DiscardCo; +static void coroutine_fn bdrv_discard_co_entry(void *opaque) +{ + DiscardCo *rwco = opaque; + + rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors); +} + +int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, + int nb_sectors) +{ + int max_discard, ret; + + if (!bs->drv) { + return -ENOMEDIUM; + } + + ret = bdrv_check_request(bs, sector_num, nb_sectors); + if (ret < 0) { + return ret; + } else if (bs->read_only) { + return -EROFS; + } + + bdrv_reset_dirty(bs, sector_num, nb_sectors); + + /* Do nothing if disabled. */ + if (!(bs->open_flags & BDRV_O_UNMAP)) { + return 0; + } + + if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) { + return 0; + } + + max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS); + while (nb_sectors > 0) { + int ret; + int num = nb_sectors; + + /* align request */ + if (bs->bl.discard_alignment && + num >= bs->bl.discard_alignment && + sector_num % bs->bl.discard_alignment) { + if (num > bs->bl.discard_alignment) { + num = bs->bl.discard_alignment; + } + num -= sector_num % bs->bl.discard_alignment; + } + + /* limit request size */ + if (num > max_discard) { + num = max_discard; + } + + if (bs->drv->bdrv_co_discard) { + ret = bs->drv->bdrv_co_discard(bs, sector_num, num); + } else { + BlockAIOCB *acb; + CoroutineIOCompletion co = { + .coroutine = qemu_coroutine_self(), + }; + + acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors, + bdrv_co_io_em_complete, &co); + if (acb == NULL) { + return -EIO; + } else { + qemu_coroutine_yield(); + ret = co.ret; + } + } + if (ret && ret != -ENOTSUP) { + return ret; + } + + sector_num += num; + nb_sectors -= num; + } + return 0; +} + +int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) +{ + Coroutine *co; + DiscardCo rwco = { + .bs = bs, + .sector_num = sector_num, + .nb_sectors = nb_sectors, + .ret = NOT_DONE, + }; + + if (qemu_in_coroutine()) { + /* Fast-path if already in coroutine context */ + bdrv_discard_co_entry(&rwco); + } else { + AioContext *aio_context = bdrv_get_aio_context(bs); + + co = qemu_coroutine_create(bdrv_discard_co_entry); + qemu_coroutine_enter(co, &rwco); + while (rwco.ret == NOT_DONE) { + aio_poll(aio_context, true); + } + } + + return rwco.ret; +} + +/* needed for generic scsi interface */ + +int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) +{ + BlockDriver *drv = bs->drv; + + if (drv && drv->bdrv_ioctl) + return drv->bdrv_ioctl(bs, req, buf); + return -ENOTSUP; +} + +BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, + unsigned long int req, void *buf, + BlockCompletionFunc *cb, void *opaque) +{ + BlockDriver *drv = bs->drv; + + if (drv && drv->bdrv_aio_ioctl) + return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque); + return NULL; +} + +void *qemu_blockalign(BlockDriverState *bs, size_t size) +{ + return qemu_memalign(bdrv_opt_mem_align(bs), size); +} + +void *qemu_blockalign0(BlockDriverState *bs, size_t size) +{ + return memset(qemu_blockalign(bs, size), 0, size); +} + +void *qemu_try_blockalign(BlockDriverState *bs, size_t size) +{ + size_t align = bdrv_opt_mem_align(bs); + + /* Ensure that NULL is never returned on success */ + assert(align > 0); + if (size == 0) { + size = align; + } + + return qemu_try_memalign(align, size); +} + +void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) +{ + void *mem = qemu_try_blockalign(bs, size); + + if (mem) { + memset(mem, 0, size); + } + + return mem; +} + +/* + * Check if all memory in this vector is sector aligned. + */ +bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) +{ + int i; + size_t alignment = bdrv_opt_mem_align(bs); + + for (i = 0; i < qiov->niov; i++) { + if ((uintptr_t) qiov->iov[i].iov_base % alignment) { + return false; + } + if (qiov->iov[i].iov_len % alignment) { + return false; + } + } + + return true; +} + +void bdrv_add_before_write_notifier(BlockDriverState *bs, + NotifierWithReturn *notifier) +{ + notifier_with_return_list_add(&bs->before_write_notifiers, notifier); +} + +void bdrv_io_plug(BlockDriverState *bs) +{ + BlockDriver *drv = bs->drv; + if (drv && drv->bdrv_io_plug) { + drv->bdrv_io_plug(bs); + } else if (bs->file) { + bdrv_io_plug(bs->file); + } +} + +void bdrv_io_unplug(BlockDriverState *bs) +{ + BlockDriver *drv = bs->drv; + if (drv && drv->bdrv_io_unplug) { + drv->bdrv_io_unplug(bs); + } else if (bs->file) { + bdrv_io_unplug(bs->file); + } +} + +void bdrv_flush_io_queue(BlockDriverState *bs) +{ + BlockDriver *drv = bs->drv; + if (drv && drv->bdrv_flush_io_queue) { + drv->bdrv_flush_io_queue(bs); + } else if (bs->file) { + bdrv_flush_io_queue(bs->file); + } +} diff --git a/block/iscsi.c b/block/iscsi.c index ba33290000..8fca1d32cb 100644 --- a/block/iscsi.c +++ b/block/iscsi.c @@ -2,7 +2,7 @@ * QEMU Block driver for iSCSI images * * Copyright (c) 2010-2011 Ronnie Sahlberg <ronniesahlberg@gmail.com> - * Copyright (c) 2012-2014 Peter Lieven <pl@kamp.de> + * Copyright (c) 2012-2015 Peter Lieven <pl@kamp.de> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -57,9 +57,6 @@ typedef struct IscsiLun { int events; QEMUTimer *nop_timer; QEMUTimer *event_timer; - uint8_t lbpme; - uint8_t lbprz; - uint8_t has_write_same; struct scsi_inquiry_logical_block_provisioning lbp; struct scsi_inquiry_block_limits bl; unsigned char *zeroblock; @@ -67,6 +64,11 @@ typedef struct IscsiLun { int cluster_sectors; bool use_16_for_rw; bool write_protected; + bool lbpme; + bool lbprz; + bool dpofua; + bool has_write_same; + bool force_next_flush; } IscsiLun; typedef struct IscsiTask { @@ -79,6 +81,7 @@ typedef struct IscsiTask { QEMUBH *bh; IscsiLun *iscsilun; QEMUTimer retry_timer; + bool force_next_flush; } IscsiTask; typedef struct IscsiAIOCB { @@ -100,7 +103,7 @@ typedef struct IscsiAIOCB { #define NOP_INTERVAL 5000 #define MAX_NOP_FAILURES 3 #define ISCSI_CMD_RETRIES ARRAY_SIZE(iscsi_retry_times) -static const unsigned iscsi_retry_times[] = {8, 32, 128, 512, 2048}; +static const unsigned iscsi_retry_times[] = {8, 32, 128, 512, 2048, 8192, 32768}; /* this threshold is a trade-off knob to choose between * the potential additional overhead of an extra GET_LBA_STATUS request @@ -183,10 +186,13 @@ iscsi_co_generic_cb(struct iscsi_context *iscsi, int status, iTask->do_retry = 1; goto out; } - if (status == SCSI_STATUS_BUSY) { + /* status 0x28 is SCSI_TASK_SET_FULL. It was first introduced + * in libiscsi 1.10.0. Hardcode this value here to avoid + * the need to bump the libiscsi requirement to 1.10.0 */ + if (status == SCSI_STATUS_BUSY || status == 0x28) { unsigned retry_time = exp_random(iscsi_retry_times[iTask->retries - 1]); - error_report("iSCSI Busy (retry #%u in %u ms): %s", + error_report("iSCSI Busy/TaskSetFull (retry #%u in %u ms): %s", iTask->retries, retry_time, iscsi_get_error(iscsi)); aio_timer_init(iTask->iscsilun->aio_context, @@ -199,6 +205,8 @@ iscsi_co_generic_cb(struct iscsi_context *iscsi, int status, } } error_report("iSCSI Failure: %s", iscsi_get_error(iscsi)); + } else { + iTask->iscsilun->force_next_flush |= iTask->force_next_flush; } out: @@ -369,6 +377,7 @@ static int coroutine_fn iscsi_co_writev(BlockDriverState *bs, struct IscsiTask iTask; uint64_t lba; uint32_t num_sectors; + int fua; if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) { return -EINVAL; @@ -384,15 +393,17 @@ static int coroutine_fn iscsi_co_writev(BlockDriverState *bs, num_sectors = sector_qemu2lun(nb_sectors, iscsilun); iscsi_co_init_iscsitask(iscsilun, &iTask); retry: + fua = iscsilun->dpofua && !bs->enable_write_cache; + iTask.force_next_flush = !fua; if (iscsilun->use_16_for_rw) { iTask.task = iscsi_write16_task(iscsilun->iscsi, iscsilun->lun, lba, NULL, num_sectors * iscsilun->block_size, - iscsilun->block_size, 0, 0, 0, 0, 0, + iscsilun->block_size, 0, 0, fua, 0, 0, iscsi_co_generic_cb, &iTask); } else { iTask.task = iscsi_write10_task(iscsilun->iscsi, iscsilun->lun, lba, NULL, num_sectors * iscsilun->block_size, - iscsilun->block_size, 0, 0, 0, 0, 0, + iscsilun->block_size, 0, 0, fua, 0, 0, iscsi_co_generic_cb, &iTask); } if (iTask.task == NULL) { @@ -460,7 +471,7 @@ static int64_t coroutine_fn iscsi_co_get_block_status(BlockDriverState *bs, *pnum = nb_sectors; /* LUN does not support logical block provisioning */ - if (iscsilun->lbpme == 0) { + if (!iscsilun->lbpme) { goto out; } @@ -620,8 +631,12 @@ static int coroutine_fn iscsi_co_flush(BlockDriverState *bs) return 0; } - iscsi_co_init_iscsitask(iscsilun, &iTask); + if (!iscsilun->force_next_flush) { + return 0; + } + iscsilun->force_next_flush = false; + iscsi_co_init_iscsitask(iscsilun, &iTask); retry: if (iscsi_synchronizecache10_task(iscsilun->iscsi, iscsilun->lun, 0, 0, 0, 0, iscsi_co_generic_cb, &iTask) == NULL) { @@ -917,6 +932,7 @@ coroutine_fn iscsi_co_write_zeroes(BlockDriverState *bs, int64_t sector_num, } iscsi_co_init_iscsitask(iscsilun, &iTask); + iTask.force_next_flush = true; retry: if (use_16_for_ws) { iTask.task = iscsi_writesame16_task(iscsilun->iscsi, iscsilun->lun, lba, @@ -1121,8 +1137,8 @@ static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp) } else { iscsilun->block_size = rc16->block_length; iscsilun->num_blocks = rc16->returned_lba + 1; - iscsilun->lbpme = rc16->lbpme; - iscsilun->lbprz = rc16->lbprz; + iscsilun->lbpme = !!rc16->lbpme; + iscsilun->lbprz = !!rc16->lbprz; iscsilun->use_16_for_rw = (rc16->returned_lba > 0xffffffff); } } @@ -1253,11 +1269,12 @@ static void iscsi_attach_aio_context(BlockDriverState *bs, iscsi_timed_set_events, iscsilun); } -static bool iscsi_is_write_protected(IscsiLun *iscsilun) +static void iscsi_modesense_sync(IscsiLun *iscsilun) { struct scsi_task *task; struct scsi_mode_sense *ms = NULL; - bool wrprotected = false; + iscsilun->write_protected = false; + iscsilun->dpofua = false; task = iscsi_modesense6_sync(iscsilun->iscsi, iscsilun->lun, 1, SCSI_MODESENSE_PC_CURRENT, @@ -1278,13 +1295,13 @@ static bool iscsi_is_write_protected(IscsiLun *iscsilun) iscsi_get_error(iscsilun->iscsi)); goto out; } - wrprotected = ms->device_specific_parameter & 0x80; + iscsilun->write_protected = ms->device_specific_parameter & 0x80; + iscsilun->dpofua = ms->device_specific_parameter & 0x10; out: if (task) { scsi_free_scsi_task(task); } - return wrprotected; } /* @@ -1403,7 +1420,8 @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags, scsi_free_scsi_task(task); task = NULL; - iscsilun->write_protected = iscsi_is_write_protected(iscsilun); + iscsi_modesense_sync(iscsilun); + /* Check the write protect flag of the LUN if we want to write */ if (iscsilun->type == TYPE_DISK && (flags & BDRV_O_RDWR) && iscsilun->write_protected) { @@ -1481,7 +1499,7 @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags, iscsilun->bl.opt_unmap_gran * iscsilun->block_size <= 16 * 1024 * 1024) { iscsilun->cluster_sectors = (iscsilun->bl.opt_unmap_gran * iscsilun->block_size) >> BDRV_SECTOR_BITS; - if (iscsilun->lbprz && !(bs->open_flags & BDRV_O_NOCACHE)) { + if (iscsilun->lbprz) { iscsilun->allocationmap = iscsi_allocationmap_init(iscsilun); if (iscsilun->allocationmap == NULL) { ret = -ENOMEM; @@ -1501,6 +1519,9 @@ out: if (ret) { if (iscsi != NULL) { + if (iscsi_is_logged_in(iscsi)) { + iscsi_logout_sync(iscsi); + } iscsi_destroy_context(iscsi); } memset(iscsilun, 0, sizeof(IscsiLun)); @@ -1514,6 +1535,9 @@ static void iscsi_close(BlockDriverState *bs) struct iscsi_context *iscsi = iscsilun->iscsi; iscsi_detach_aio_context(bs); + if (iscsi_is_logged_in(iscsi)) { + iscsi_logout_sync(iscsi); + } iscsi_destroy_context(iscsi); g_free(iscsilun->zeroblock); g_free(iscsilun->allocationmap); @@ -1649,7 +1673,7 @@ out: static int iscsi_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) { IscsiLun *iscsilun = bs->opaque; - bdi->unallocated_blocks_are_zero = !!iscsilun->lbprz; + bdi->unallocated_blocks_are_zero = iscsilun->lbprz; bdi->can_write_zeroes_with_unmap = iscsilun->lbprz && iscsilun->lbp.lbpws; bdi->cluster_size = iscsilun->cluster_sectors * BDRV_SECTOR_SIZE; return 0; diff --git a/block/mirror.c b/block/mirror.c index 405616422b..58f391a6d6 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -125,11 +125,9 @@ static void mirror_write_complete(void *opaque, int ret) MirrorOp *op = opaque; MirrorBlockJob *s = op->s; if (ret < 0) { - BlockDriverState *source = s->common.bs; BlockErrorAction action; - bdrv_set_dirty_bitmap(source, s->dirty_bitmap, op->sector_num, - op->nb_sectors); + bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); action = mirror_error_action(s, false, -ret); if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { s->ret = ret; @@ -143,11 +141,9 @@ static void mirror_read_complete(void *opaque, int ret) MirrorOp *op = opaque; MirrorBlockJob *s = op->s; if (ret < 0) { - BlockDriverState *source = s->common.bs; BlockErrorAction action; - bdrv_set_dirty_bitmap(source, s->dirty_bitmap, op->sector_num, - op->nb_sectors); + bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); action = mirror_error_action(s, true, -ret); if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { s->ret = ret; @@ -170,10 +166,9 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) s->sector_num = hbitmap_iter_next(&s->hbi); if (s->sector_num < 0) { - bdrv_dirty_iter_init(source, s->dirty_bitmap, &s->hbi); + bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); s->sector_num = hbitmap_iter_next(&s->hbi); - trace_mirror_restart_iter(s, - bdrv_get_dirty_count(source, s->dirty_bitmap)); + trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); assert(s->sector_num >= 0); } @@ -288,8 +283,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) next_sector += sectors_per_chunk; } - bdrv_reset_dirty_bitmap(source, s->dirty_bitmap, sector_num, - nb_sectors); + bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num, nb_sectors); /* Copy the dirty cluster. */ s->in_flight++; @@ -446,7 +440,7 @@ static void coroutine_fn mirror_run(void *opaque) assert(n > 0); if (ret == 1) { - bdrv_set_dirty_bitmap(bs, s->dirty_bitmap, sector_num, n); + bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n); sector_num = next; } else { sector_num += n; @@ -454,7 +448,7 @@ static void coroutine_fn mirror_run(void *opaque) } } - bdrv_dirty_iter_init(bs, s->dirty_bitmap, &s->hbi); + bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); for (;;) { uint64_t delay_ns = 0; @@ -466,7 +460,7 @@ static void coroutine_fn mirror_run(void *opaque) goto immediate_exit; } - cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap); + cnt = bdrv_get_dirty_count(s->dirty_bitmap); /* s->common.offset contains the number of bytes already processed so * far, cnt is the number of dirty sectors remaining and * s->sectors_in_flight is the number of sectors currently being @@ -475,7 +469,7 @@ static void coroutine_fn mirror_run(void *opaque) (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE; /* Note that even when no rate limit is applied we need to yield - * periodically with no pending I/O so that qemu_aio_flush() returns. + * periodically with no pending I/O so that bdrv_drain_all() returns. * We do so every SLICE_TIME nanoseconds, or when there is an error, * or when the source is clean, whichever comes first. */ @@ -488,9 +482,6 @@ static void coroutine_fn mirror_run(void *opaque) continue; } else if (cnt != 0) { delay_ns = mirror_iteration(s); - if (delay_ns == 0) { - continue; - } } } @@ -516,7 +507,7 @@ static void coroutine_fn mirror_run(void *opaque) should_complete = s->should_complete || block_job_is_cancelled(&s->common); - cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap); + cnt = bdrv_get_dirty_count(s->dirty_bitmap); } } @@ -531,7 +522,7 @@ static void coroutine_fn mirror_run(void *opaque) */ trace_mirror_before_drain(s, cnt); bdrv_drain(bs); - cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap); + cnt = bdrv_get_dirty_count(s->dirty_bitmap); } ret = 0; @@ -634,7 +625,7 @@ static void mirror_complete(BlockJob *job, Error **errp) } s->should_complete = true; - block_job_resume(job); + block_job_enter(&s->common); } static const BlockJobDriver mirror_job_driver = { @@ -656,7 +647,7 @@ static const BlockJobDriver commit_active_job_driver = { static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target, const char *replaces, - int64_t speed, int64_t granularity, + int64_t speed, uint32_t granularity, int64_t buf_size, BlockdevOnError on_source_error, BlockdevOnError on_target_error, @@ -668,15 +659,7 @@ static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target, MirrorBlockJob *s; if (granularity == 0) { - /* Choose the default granularity based on the target file's cluster - * size, clamped between 4k and 64k. */ - BlockDriverInfo bdi; - if (bdrv_get_info(target, &bdi) >= 0 && bdi.cluster_size != 0) { - granularity = MAX(4096, bdi.cluster_size); - granularity = MIN(65536, granularity); - } else { - granularity = 65536; - } + granularity = bdrv_get_default_bitmap_granularity(target); } assert ((granularity & (granularity - 1)) == 0); @@ -703,7 +686,7 @@ static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target, s->granularity = granularity; s->buf_size = MAX(buf_size, granularity); - s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, errp); + s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); if (!s->dirty_bitmap) { return; } @@ -717,7 +700,7 @@ static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target, void mirror_start(BlockDriverState *bs, BlockDriverState *target, const char *replaces, - int64_t speed, int64_t granularity, int64_t buf_size, + int64_t speed, uint32_t granularity, int64_t buf_size, MirrorSyncMode mode, BlockdevOnError on_source_error, BlockdevOnError on_target_error, BlockCompletionFunc *cb, @@ -726,6 +709,10 @@ void mirror_start(BlockDriverState *bs, BlockDriverState *target, bool is_none_mode; BlockDriverState *base; + if (mode == MIRROR_SYNC_MODE_DIRTY_BITMAP) { + error_setg(errp, "Sync mode 'dirty-bitmap' not supported"); + return; + } is_none_mode = mode == MIRROR_SYNC_MODE_NONE; base = mode == MIRROR_SYNC_MODE_TOP ? bs->backing_hd : NULL; mirror_start_job(bs, target, replaces, diff --git a/block/null.c b/block/null.c index ec2bd27a4b..7d083233fb 100644 --- a/block/null.c +++ b/block/null.c @@ -12,8 +12,11 @@ #include "block/block_int.h" +#define NULL_OPT_LATENCY "latency-ns" + typedef struct { int64_t length; + int64_t latency_ns; } BDRVNullState; static QemuOptsList runtime_opts = { @@ -30,6 +33,12 @@ static QemuOptsList runtime_opts = { .type = QEMU_OPT_SIZE, .help = "size of the null block", }, + { + .name = NULL_OPT_LATENCY, + .type = QEMU_OPT_NUMBER, + .help = "nanoseconds (approximated) to wait " + "before completing request", + }, { /* end of list */ } }, }; @@ -39,13 +48,20 @@ static int null_file_open(BlockDriverState *bs, QDict *options, int flags, { QemuOpts *opts; BDRVNullState *s = bs->opaque; + int ret = 0; opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(opts, options, &error_abort); s->length = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 1 << 30); + s->latency_ns = + qemu_opt_get_number(opts, NULL_OPT_LATENCY, 0); + if (s->latency_ns < 0) { + error_setg(errp, "latency-ns is invalid"); + ret = -EINVAL; + } qemu_opts_del(opts); - return 0; + return ret; } static void null_close(BlockDriverState *bs) @@ -58,28 +74,40 @@ static int64_t null_getlength(BlockDriverState *bs) return s->length; } +static coroutine_fn int null_co_common(BlockDriverState *bs) +{ + BDRVNullState *s = bs->opaque; + + if (s->latency_ns) { + co_aio_sleep_ns(bdrv_get_aio_context(bs), QEMU_CLOCK_REALTIME, + s->latency_ns); + } + return 0; +} + static coroutine_fn int null_co_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { - return 0; + return null_co_common(bs); } static coroutine_fn int null_co_writev(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { - return 0; + return null_co_common(bs); } static coroutine_fn int null_co_flush(BlockDriverState *bs) { - return 0; + return null_co_common(bs); } typedef struct { BlockAIOCB common; QEMUBH *bh; + QEMUTimer timer; } NullAIOCB; static const AIOCBInfo null_aiocb_info = { @@ -94,15 +122,33 @@ static void null_bh_cb(void *opaque) qemu_aio_unref(acb); } +static void null_timer_cb(void *opaque) +{ + NullAIOCB *acb = opaque; + acb->common.cb(acb->common.opaque, 0); + timer_deinit(&acb->timer); + qemu_aio_unref(acb); +} + static inline BlockAIOCB *null_aio_common(BlockDriverState *bs, BlockCompletionFunc *cb, void *opaque) { NullAIOCB *acb; + BDRVNullState *s = bs->opaque; acb = qemu_aio_get(&null_aiocb_info, bs, cb, opaque); - acb->bh = aio_bh_new(bdrv_get_aio_context(bs), null_bh_cb, acb); - qemu_bh_schedule(acb->bh); + /* Only emulate latency after vcpu is running. */ + if (s->latency_ns) { + aio_timer_init(bdrv_get_aio_context(bs), &acb->timer, + QEMU_CLOCK_REALTIME, SCALE_NS, + null_timer_cb, acb); + timer_mod_ns(&acb->timer, + qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + s->latency_ns); + } else { + acb->bh = aio_bh_new(bdrv_get_aio_context(bs), null_bh_cb, acb); + qemu_bh_schedule(acb->bh); + } return &acb->common; } @@ -131,6 +177,12 @@ static BlockAIOCB *null_aio_flush(BlockDriverState *bs, return null_aio_common(bs, cb, opaque); } +static int null_reopen_prepare(BDRVReopenState *reopen_state, + BlockReopenQueue *queue, Error **errp) +{ + return 0; +} + static BlockDriver bdrv_null_co = { .format_name = "null-co", .protocol_name = "null-co", @@ -143,6 +195,7 @@ static BlockDriver bdrv_null_co = { .bdrv_co_readv = null_co_readv, .bdrv_co_writev = null_co_writev, .bdrv_co_flush_to_disk = null_co_flush, + .bdrv_reopen_prepare = null_reopen_prepare, }; static BlockDriver bdrv_null_aio = { @@ -157,6 +210,7 @@ static BlockDriver bdrv_null_aio = { .bdrv_aio_readv = null_aio_readv, .bdrv_aio_writev = null_aio_writev, .bdrv_aio_flush = null_aio_flush, + .bdrv_reopen_prepare = null_reopen_prepare, }; static void bdrv_null_init(void) diff --git a/block/qapi.c b/block/qapi.c index 8a19aed446..063dd1bc1f 100644 --- a/block/qapi.c +++ b/block/qapi.c @@ -31,8 +31,10 @@ #include "qapi/qmp/types.h" #include "sysemu/block-backend.h" -BlockDeviceInfo *bdrv_block_device_info(BlockDriverState *bs) +BlockDeviceInfo *bdrv_block_device_info(BlockDriverState *bs, Error **errp) { + ImageInfo **p_image_info; + BlockDriverState *bs0; BlockDeviceInfo *info = g_malloc0(sizeof(*info)); info->file = g_strdup(bs->filename); @@ -92,6 +94,25 @@ BlockDeviceInfo *bdrv_block_device_info(BlockDriverState *bs) info->write_threshold = bdrv_write_threshold_get(bs); + bs0 = bs; + p_image_info = &info->image; + while (1) { + Error *local_err = NULL; + bdrv_query_image_info(bs0, p_image_info, &local_err); + if (local_err) { + error_propagate(errp, local_err); + qapi_free_BlockDeviceInfo(info); + return NULL; + } + if (bs0->drv && bs0->backing_hd) { + bs0 = bs0->backing_hd; + (*p_image_info)->has_backing_image = true; + p_image_info = &((*p_image_info)->backing_image); + } else { + break; + } + } + return info; } @@ -264,9 +285,6 @@ static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info, { BlockInfo *info = g_malloc0(sizeof(*info)); BlockDriverState *bs = blk_bs(blk); - BlockDriverState *bs0; - ImageInfo **p_image_info; - Error *local_err = NULL; info->device = g_strdup(blk_name(blk)); info->type = g_strdup("unknown"); info->locked = blk_dev_is_medium_locked(blk); @@ -289,23 +307,9 @@ static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info, if (bs->drv) { info->has_inserted = true; - info->inserted = bdrv_block_device_info(bs); - - bs0 = bs; - p_image_info = &info->inserted->image; - while (1) { - bdrv_query_image_info(bs0, p_image_info, &local_err); - if (local_err) { - error_propagate(errp, local_err); - goto err; - } - if (bs0->drv && bs0->backing_hd) { - bs0 = bs0->backing_hd; - (*p_image_info)->has_backing_image = true; - p_image_info = &((*p_image_info)->backing_image); - } else { - break; - } + info->inserted = bdrv_block_device_info(bs, errp); + if (info->inserted == NULL) { + goto err; } } diff --git a/block/qcow.c b/block/qcow.c index 055896910e..ab893284d2 100644 --- a/block/qcow.c +++ b/block/qcow.c @@ -124,7 +124,7 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags, snprintf(version, sizeof(version), "QCOW version %" PRIu32, header.version); error_set(errp, QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, - bdrv_get_device_name(bs), "qcow", version); + bdrv_get_device_or_node_name(bs), "qcow", version); ret = -ENOTSUP; goto fail; } @@ -229,9 +229,9 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags, } /* Disable migration when qcow images are used */ - error_set(&s->migration_blocker, - QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, - "qcow", bdrv_get_device_name(bs), "live migration"); + error_setg(&s->migration_blocker, "The qcow format used by node '%s' " + "does not support live migration", + bdrv_get_device_or_node_name(bs)); migrate_add_blocker(s->migration_blocker); qemu_co_mutex_init(&s->lock); diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c index 6cbae1d205..f47260b808 100644 --- a/block/qcow2-refcount.c +++ b/block/qcow2-refcount.c @@ -2450,7 +2450,7 @@ int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset, if (ret < 0) { return ret; } else if (ret > 0) { - int metadata_ol_bitnr = ffs(ret) - 1; + int metadata_ol_bitnr = ctz32(ret); assert(metadata_ol_bitnr < QCOW2_OL_MAX_BITNR); qcow2_signal_corruption(bs, true, offset, size, "Preventing invalid " diff --git a/block/qcow2-snapshot.c b/block/qcow2-snapshot.c index 2aa9dcb1d1..17bb2119b2 100644 --- a/block/qcow2-snapshot.c +++ b/block/qcow2-snapshot.c @@ -351,10 +351,8 @@ int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info) memset(sn, 0, sizeof(*sn)); - /* Generate an ID if it wasn't passed */ - if (sn_info->id_str[0] == '\0') { - find_new_snapshot_id(bs, sn_info->id_str, sizeof(sn_info->id_str)); - } + /* Generate an ID */ + find_new_snapshot_id(bs, sn_info->id_str, sizeof(sn_info->id_str)); /* Check that the ID is unique */ if (find_snapshot_by_id_and_name(bs, sn_info->id_str, NULL) >= 0) { diff --git a/block/qcow2.c b/block/qcow2.c index 316a8db22b..b9a72e39d4 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -208,7 +208,7 @@ static void GCC_FMT_ATTR(3, 4) report_unsupported(BlockDriverState *bs, va_end(ap); error_set(errp, QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, - bdrv_get_device_name(bs), "qcow2", msg); + bdrv_get_device_or_node_name(bs), "qcow2", msg); } static void report_unsupported_feature(BlockDriverState *bs, @@ -1802,7 +1802,7 @@ static int qcow2_create2(const char *filename, int64_t total_size, { /* Calculate cluster_bits */ int cluster_bits; - cluster_bits = ffs(cluster_size) - 1; + cluster_bits = ctz32(cluster_size); if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || (1 << cluster_bits) != cluster_size) { @@ -2110,7 +2110,7 @@ static int qcow2_create(const char *filename, QemuOpts *opts, Error **errp) goto finish; } - refcount_order = ffs(refcount_bits) - 1; + refcount_order = ctz32(refcount_bits); ret = qcow2_create2(filename, size, backing_file, backing_fmt, flags, cluster_size, prealloc, opts, version, refcount_order, @@ -2824,6 +2824,7 @@ void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset, int64_t size, const char *message_format, ...) { BDRVQcowState *s = bs->opaque; + const char *node_name; char *message; va_list ap; @@ -2847,8 +2848,11 @@ void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset, "corruption events will be suppressed\n", message); } - qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs), message, - offset >= 0, offset, size >= 0, size, + node_name = bdrv_get_node_name(bs); + qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs), + *node_name != '\0', node_name, + message, offset >= 0, offset, + size >= 0, size, fatal, &error_abort); g_free(message); diff --git a/block/qed.c b/block/qed.c index 892b13c806..5bbe069ce9 100644 --- a/block/qed.c +++ b/block/qed.c @@ -408,7 +408,7 @@ static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags, snprintf(buf, sizeof(buf), "%" PRIx64, s->header.features & ~QED_FEATURE_MASK); error_set(errp, QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, - bdrv_get_device_name(bs), "QED", buf); + bdrv_get_device_or_node_name(bs), "QED", buf); return -ENOTSUP; } if (!qed_is_cluster_size_valid(s->header.cluster_size)) { @@ -436,9 +436,9 @@ static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags, s->table_nelems = (s->header.cluster_size * s->header.table_size) / sizeof(uint64_t); - s->l2_shift = ffs(s->header.cluster_size) - 1; + s->l2_shift = ctz32(s->header.cluster_size); s->l2_mask = s->table_nelems - 1; - s->l1_shift = s->l2_shift + ffs(s->table_nelems) - 1; + s->l1_shift = s->l2_shift + ctz32(s->table_nelems); /* Header size calculation must not overflow uint32_t */ if (s->header.header_size > UINT32_MAX / s->header.cluster_size) { diff --git a/block/quorum.c b/block/quorum.c index 437b12251d..f91ef75a84 100644 --- a/block/quorum.c +++ b/block/quorum.c @@ -226,10 +226,7 @@ static void quorum_report_bad(QuorumAIOCB *acb, char *node_name, int ret) static void quorum_report_failure(QuorumAIOCB *acb) { - const char *reference = bdrv_get_device_name(acb->common.bs)[0] ? - bdrv_get_device_name(acb->common.bs) : - acb->common.bs->node_name; - + const char *reference = bdrv_get_device_or_node_name(acb->common.bs); qapi_event_send_quorum_failure(reference, acb->sector_num, acb->nb_sectors, &error_abort); } diff --git a/block/rbd.c b/block/rbd.c index f3ab2ddd5a..fbe87e035b 100644 --- a/block/rbd.c +++ b/block/rbd.c @@ -325,7 +325,7 @@ static int qemu_rbd_create(const char *filename, QemuOpts *opts, Error **errp) error_setg(errp, "obj size too small"); return -EINVAL; } - obj_order = ffs(objsize) - 1; + obj_order = ctz32(objsize); } clientname = qemu_rbd_parse_clientname(conf, clientname_buf); diff --git a/block/sheepdog.c b/block/sheepdog.c index c14172cfa6..bd7cbed048 100644 --- a/block/sheepdog.c +++ b/block/sheepdog.c @@ -1716,7 +1716,7 @@ static int parse_block_size_shift(BDRVSheepdogState *s, QemuOpts *opt) if ((object_size - 1) & object_size) { /* not a power of 2? */ return -EINVAL; } - obj_order = ffs(object_size) - 1; + obj_order = ctz32(object_size); if (obj_order < 20 || obj_order > 31) { return -EINVAL; } @@ -2341,6 +2341,7 @@ static int sd_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info) if (ret < 0) { error_report("failed to create inode for snapshot: %s", error_get_pretty(local_err)); + error_free(local_err); goto cleanup; } diff --git a/block/snapshot.c b/block/snapshot.c index 698e1a1d58..50ae610139 100644 --- a/block/snapshot.c +++ b/block/snapshot.c @@ -246,9 +246,9 @@ int bdrv_snapshot_delete(BlockDriverState *bs, if (bs->file) { return bdrv_snapshot_delete(bs->file, snapshot_id, name, errp); } - error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, - drv->format_name, bdrv_get_device_name(bs), - "internal snapshot deletion"); + error_setg(errp, "Block format '%s' used by device '%s' " + "does not support internal snapshot deletion", + drv->format_name, bdrv_get_device_name(bs)); return -ENOTSUP; } @@ -329,9 +329,9 @@ int bdrv_snapshot_load_tmp(BlockDriverState *bs, if (drv->bdrv_snapshot_load_tmp) { return drv->bdrv_snapshot_load_tmp(bs, snapshot_id, name, errp); } - error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, - drv->format_name, bdrv_get_device_name(bs), - "temporarily load internal snapshot"); + error_setg(errp, "Block format '%s' used by device '%s' " + "does not support temporarily loading internal snapshots", + drv->format_name, bdrv_get_device_name(bs)); return -ENOTSUP; } diff --git a/block/vdi.c b/block/vdi.c index 53bd02fe22..7642ef3597 100644 --- a/block/vdi.c +++ b/block/vdi.c @@ -502,9 +502,9 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags, } /* Disable migration when vdi images are used */ - error_set(&s->migration_blocker, - QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, - "vdi", bdrv_get_device_name(bs), "live migration"); + error_setg(&s->migration_blocker, "The vdi format used by node '%s' " + "does not support live migration", + bdrv_get_device_or_node_name(bs)); migrate_add_blocker(s->migration_blocker); qemu_co_mutex_init(&s->write_lock); diff --git a/block/vhdx.c b/block/vhdx.c index bb3ed45d5c..0776de7174 100644 --- a/block/vhdx.c +++ b/block/vhdx.c @@ -1002,9 +1002,9 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags, /* TODO: differencing files */ /* Disable migration when VHDX images are used */ - error_set(&s->migration_blocker, - QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, - "vhdx", bdrv_get_device_name(bs), "live migration"); + error_setg(&s->migration_blocker, "The vhdx format used by node '%s' " + "does not support live migration", + bdrv_get_device_or_node_name(bs)); migrate_add_blocker(s->migration_blocker); return 0; @@ -1269,7 +1269,7 @@ static coroutine_fn int vhdx_co_writev(BlockDriverState *bs, int64_t sector_num, iov1.iov_base = qemu_blockalign(bs, iov1.iov_len); memset(iov1.iov_base, 0, iov1.iov_len); qemu_iovec_concat_iov(&hd_qiov, &iov1, 1, 0, - sinfo.block_offset); + iov1.iov_len); sectors_to_write += iov1.iov_len >> BDRV_SECTOR_BITS; } @@ -1285,7 +1285,7 @@ static coroutine_fn int vhdx_co_writev(BlockDriverState *bs, int64_t sector_num, iov2.iov_base = qemu_blockalign(bs, iov2.iov_len); memset(iov2.iov_base, 0, iov2.iov_len); qemu_iovec_concat_iov(&hd_qiov, &iov2, 1, 0, - sinfo.block_offset); + iov2.iov_len); sectors_to_write += iov2.iov_len >> BDRV_SECTOR_BITS; } } diff --git a/block/vmdk.c b/block/vmdk.c index 8410a158a2..1c5e2ef1b3 100644 --- a/block/vmdk.c +++ b/block/vmdk.c @@ -523,7 +523,7 @@ static int vmdk_open_vmfs_sparse(BlockDriverState *bs, } ret = vmdk_add_extent(bs, file, false, le32_to_cpu(header.disk_sectors), - le32_to_cpu(header.l1dir_offset) << 9, + (int64_t)le32_to_cpu(header.l1dir_offset) << 9, 0, le32_to_cpu(header.l1dir_size), 4096, @@ -669,7 +669,7 @@ static int vmdk_open_vmdk4(BlockDriverState *bs, snprintf(buf, sizeof(buf), "VMDK version %" PRId32, le32_to_cpu(header.version)); error_set(errp, QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, - bdrv_get_device_name(bs), "vmdk", buf); + bdrv_get_device_or_node_name(bs), "vmdk", buf); return -ENOTSUP; } else if (le32_to_cpu(header.version) == 3 && (flags & BDRV_O_RDWR)) { /* VMware KB 2064959 explains that version 3 added support for @@ -962,9 +962,9 @@ static int vmdk_open(BlockDriverState *bs, QDict *options, int flags, qemu_co_mutex_init(&s->lock); /* Disable migration when VMDK images are used */ - error_set(&s->migration_blocker, - QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, - "vmdk", bdrv_get_device_name(bs), "live migration"); + error_setg(&s->migration_blocker, "The vmdk format used by node '%s' " + "does not support live migration", + bdrv_get_device_or_node_name(bs)); migrate_add_blocker(s->migration_blocker); g_free(buf); return 0; diff --git a/block/vpc.c b/block/vpc.c index 43e768ee76..37572bab86 100644 --- a/block/vpc.c +++ b/block/vpc.c @@ -318,9 +318,9 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags, qemu_co_mutex_init(&s->lock); /* Disable migration when VHD images are used */ - error_set(&s->migration_blocker, - QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, - "vpc", bdrv_get_device_name(bs), "live migration"); + error_setg(&s->migration_blocker, "The vpc format used by node '%s' " + "does not support live migration", + bdrv_get_device_or_node_name(bs)); migrate_add_blocker(s->migration_blocker); return 0; diff --git a/block/vvfat.c b/block/vvfat.c index 9be632f404..e803589675 100644 --- a/block/vvfat.c +++ b/block/vvfat.c @@ -1180,9 +1180,10 @@ static int vvfat_open(BlockDriverState *bs, QDict *options, int flags, /* Disable migration when vvfat is used rw */ if (s->qcow) { - error_set(&s->migration_blocker, - QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, - "vvfat (rw)", bdrv_get_device_name(bs), "live migration"); + error_setg(&s->migration_blocker, + "The vvfat (rw) format used by node '%s' " + "does not support live migration", + bdrv_get_device_or_node_name(bs)); migrate_add_blocker(s->migration_blocker); } diff --git a/blockdev.c b/blockdev.c index fbb3a79978..5eaf77e599 100644 --- a/blockdev.c +++ b/blockdev.c @@ -1164,6 +1164,68 @@ out_aio_context: return NULL; } +/** + * block_dirty_bitmap_lookup: + * Return a dirty bitmap (if present), after validating + * the node reference and bitmap names. + * + * @node: The name of the BDS node to search for bitmaps + * @name: The name of the bitmap to search for + * @pbs: Output pointer for BDS lookup, if desired. Can be NULL. + * @paio: Output pointer for aio_context acquisition, if desired. Can be NULL. + * @errp: Output pointer for error information. Can be NULL. + * + * @return: A bitmap object on success, or NULL on failure. + */ +static BdrvDirtyBitmap *block_dirty_bitmap_lookup(const char *node, + const char *name, + BlockDriverState **pbs, + AioContext **paio, + Error **errp) +{ + BlockDriverState *bs; + BdrvDirtyBitmap *bitmap; + AioContext *aio_context; + + if (!node) { + error_setg(errp, "Node cannot be NULL"); + return NULL; + } + if (!name) { + error_setg(errp, "Bitmap name cannot be NULL"); + return NULL; + } + bs = bdrv_lookup_bs(node, node, NULL); + if (!bs) { + error_setg(errp, "Node '%s' not found", node); + return NULL; + } + + aio_context = bdrv_get_aio_context(bs); + aio_context_acquire(aio_context); + + bitmap = bdrv_find_dirty_bitmap(bs, name); + if (!bitmap) { + error_setg(errp, "Dirty bitmap '%s' not found", name); + goto fail; + } + + if (pbs) { + *pbs = bs; + } + if (paio) { + *paio = aio_context; + } else { + aio_context_release(aio_context); + } + + return bitmap; + + fail: + aio_context_release(aio_context); + return NULL; +} + /* New and old BlockDriverState structs for atomic group operations */ typedef struct BlkTransactionState BlkTransactionState; @@ -1248,13 +1310,14 @@ static void internal_snapshot_prepare(BlkTransactionState *common, } if (bdrv_is_read_only(bs)) { - error_set(errp, QERR_DEVICE_IS_READ_ONLY, device); + error_setg(errp, "Device '%s' is read only", device); return; } if (!bdrv_can_snapshot(bs)) { - error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, - bs->drv->format_name, device, "internal snapshot"); + error_setg(errp, "Block format '%s' used by device '%s' " + "does not support internal snapshots", + bs->drv->format_name, device); return; } @@ -1522,6 +1585,7 @@ static void drive_backup_prepare(BlkTransactionState *common, Error **errp) backup->sync, backup->has_mode, backup->mode, backup->has_speed, backup->speed, + backup->has_bitmap, backup->bitmap, backup->has_on_source_error, backup->on_source_error, backup->has_on_target_error, backup->on_target_error, &local_err); @@ -1953,6 +2017,102 @@ void qmp_block_set_io_throttle(const char *device, int64_t bps, int64_t bps_rd, aio_context_release(aio_context); } +void qmp_block_dirty_bitmap_add(const char *node, const char *name, + bool has_granularity, uint32_t granularity, + Error **errp) +{ + AioContext *aio_context; + BlockDriverState *bs; + + if (!name || name[0] == '\0') { + error_setg(errp, "Bitmap name cannot be empty"); + return; + } + + bs = bdrv_lookup_bs(node, node, errp); + if (!bs) { + return; + } + + aio_context = bdrv_get_aio_context(bs); + aio_context_acquire(aio_context); + + if (has_granularity) { + if (granularity < 512 || !is_power_of_2(granularity)) { + error_setg(errp, "Granularity must be power of 2 " + "and at least 512"); + goto out; + } + } else { + /* Default to cluster size, if available: */ + granularity = bdrv_get_default_bitmap_granularity(bs); + } + + bdrv_create_dirty_bitmap(bs, granularity, name, errp); + + out: + aio_context_release(aio_context); +} + +void qmp_block_dirty_bitmap_remove(const char *node, const char *name, + Error **errp) +{ + AioContext *aio_context; + BlockDriverState *bs; + BdrvDirtyBitmap *bitmap; + + bitmap = block_dirty_bitmap_lookup(node, name, &bs, &aio_context, errp); + if (!bitmap || !bs) { + return; + } + + if (bdrv_dirty_bitmap_frozen(bitmap)) { + error_setg(errp, + "Bitmap '%s' is currently frozen and cannot be removed", + name); + goto out; + } + bdrv_dirty_bitmap_make_anon(bitmap); + bdrv_release_dirty_bitmap(bs, bitmap); + + out: + aio_context_release(aio_context); +} + +/** + * Completely clear a bitmap, for the purposes of synchronizing a bitmap + * immediately after a full backup operation. + */ +void qmp_block_dirty_bitmap_clear(const char *node, const char *name, + Error **errp) +{ + AioContext *aio_context; + BdrvDirtyBitmap *bitmap; + BlockDriverState *bs; + + bitmap = block_dirty_bitmap_lookup(node, name, &bs, &aio_context, errp); + if (!bitmap || !bs) { + return; + } + + if (bdrv_dirty_bitmap_frozen(bitmap)) { + error_setg(errp, + "Bitmap '%s' is currently frozen and cannot be modified", + name); + goto out; + } else if (!bdrv_dirty_bitmap_enabled(bitmap)) { + error_setg(errp, + "Bitmap '%s' is currently disabled and cannot be cleared", + name); + goto out; + } + + bdrv_clear_dirty_bitmap(bitmap); + + out: + aio_context_release(aio_context); +} + int hmp_drive_del(Monitor *mon, const QDict *qdict, QObject **ret_data) { const char *id = qdict_get_str(qdict, "id"); @@ -2055,7 +2215,7 @@ void qmp_block_resize(bool has_device, const char *device, error_set(errp, QERR_UNSUPPORTED); break; case -EACCES: - error_set(errp, QERR_DEVICE_IS_READ_ONLY, device); + error_setg(errp, "Device '%s' is read only", device); break; case -EBUSY: error_set(errp, QERR_DEVICE_IN_USE, device); @@ -2270,6 +2430,7 @@ void qmp_drive_backup(const char *device, const char *target, enum MirrorSyncMode sync, bool has_mode, enum NewImageMode mode, bool has_speed, int64_t speed, + bool has_bitmap, const char *bitmap, bool has_on_source_error, BlockdevOnError on_source_error, bool has_on_target_error, BlockdevOnError on_target_error, Error **errp) @@ -2278,6 +2439,7 @@ void qmp_drive_backup(const char *device, const char *target, BlockDriverState *bs; BlockDriverState *target_bs; BlockDriverState *source = NULL; + BdrvDirtyBitmap *bmap = NULL; AioContext *aio_context; BlockDriver *drv = NULL; Error *local_err = NULL; @@ -2377,7 +2539,16 @@ void qmp_drive_backup(const char *device, const char *target, bdrv_set_aio_context(target_bs, aio_context); - backup_start(bs, target_bs, speed, sync, on_source_error, on_target_error, + if (has_bitmap) { + bmap = bdrv_find_dirty_bitmap(bs, bitmap); + if (!bmap) { + error_setg(errp, "Bitmap '%s' could not be found", bitmap); + goto out; + } + } + + backup_start(bs, target_bs, speed, sync, bmap, + on_source_error, on_target_error, block_job_cb, bs, &local_err); if (local_err != NULL) { bdrv_unref(target_bs); @@ -2391,7 +2562,7 @@ out: BlockDeviceInfoList *qmp_query_named_block_nodes(Error **errp) { - return bdrv_named_nodes_list(); + return bdrv_named_nodes_list(errp); } void qmp_blockdev_backup(const char *device, const char *target, @@ -2438,8 +2609,8 @@ void qmp_blockdev_backup(const char *device, const char *target, bdrv_ref(target_bs); bdrv_set_aio_context(target_bs, aio_context); - backup_start(bs, target_bs, speed, sync, on_source_error, on_target_error, - block_job_cb, bs, &local_err); + backup_start(bs, target_bs, speed, sync, NULL, on_source_error, + on_target_error, block_job_cb, bs, &local_err); if (local_err != NULL) { bdrv_unref(target_bs); error_propagate(errp, local_err); @@ -2699,7 +2870,7 @@ void qmp_block_job_cancel(const char *device, force = false; } - if (job->paused && !force) { + if (job->user_paused && !force) { error_setg(errp, "The block job for device '%s' is currently paused", device); goto out; @@ -2716,10 +2887,11 @@ void qmp_block_job_pause(const char *device, Error **errp) AioContext *aio_context; BlockJob *job = find_block_job(device, &aio_context, errp); - if (!job) { + if (!job || job->user_paused) { return; } + job->user_paused = true; trace_qmp_block_job_pause(job); block_job_pause(job); aio_context_release(aio_context); @@ -2730,10 +2902,11 @@ void qmp_block_job_resume(const char *device, Error **errp) AioContext *aio_context; BlockJob *job = find_block_job(device, &aio_context, errp); - if (!job) { + if (!job || !job->user_paused) { return; } + job->user_paused = false; trace_qmp_block_job_resume(job); block_job_resume(job); aio_context_release(aio_context); diff --git a/blockjob.c b/blockjob.c index ba2255d91f..2755465259 100644 --- a/blockjob.c +++ b/blockjob.c @@ -107,7 +107,7 @@ void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) void block_job_complete(BlockJob *job, Error **errp) { - if (job->paused || job->cancelled || !job->driver->complete) { + if (job->pause_count || job->cancelled || !job->driver->complete) { error_set(errp, QERR_BLOCK_JOB_NOT_READY, bdrv_get_device_name(job->bs)); return; @@ -118,17 +118,26 @@ void block_job_complete(BlockJob *job, Error **errp) void block_job_pause(BlockJob *job) { - job->paused = true; + job->pause_count++; } bool block_job_is_paused(BlockJob *job) { - return job->paused; + return job->pause_count > 0; } void block_job_resume(BlockJob *job) { - job->paused = false; + assert(job->pause_count > 0); + job->pause_count--; + if (job->pause_count) { + return; + } + block_job_enter(job); +} + +void block_job_enter(BlockJob *job) +{ block_job_iostatus_reset(job); if (job->co && !job->busy) { qemu_coroutine_enter(job->co, NULL); @@ -138,7 +147,7 @@ void block_job_resume(BlockJob *job) void block_job_cancel(BlockJob *job) { job->cancelled = true; - block_job_resume(job); + block_job_enter(job); } bool block_job_is_cancelled(BlockJob *job) @@ -258,7 +267,7 @@ BlockJobInfo *block_job_query(BlockJob *job) info->device = g_strdup(bdrv_get_device_name(job->bs)); info->len = job->len; info->busy = job->busy; - info->paused = job->paused; + info->paused = job->pause_count > 0; info->offset = job->offset; info->speed = job->speed; info->io_status = job->iostatus; @@ -335,6 +344,8 @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockDriverState *bs, IO_OPERATION_TYPE_WRITE, action, &error_abort); if (action == BLOCK_ERROR_ACTION_STOP) { + /* make the pause user visible, which will be resumed from QMP. */ + job->user_paused = true; block_job_pause(job); block_job_iostatus_set_err(job, error); if (bs != job->bs) { diff --git a/bsd-user/main.c b/bsd-user/main.c index 1bb27548f2..5bfaf5c421 100644 --- a/bsd-user/main.c +++ b/bsd-user/main.c @@ -905,7 +905,6 @@ int main(int argc, char **argv) #endif } tcg_exec_init(0); - cpu_exec_init_all(); /* NOTE: we need to init the CPU at this stage to get qemu_host_page_size */ cpu = cpu_init(cpu_model); @@ -336,6 +336,7 @@ libssh2="" vhdx="" quorum="" numa="" +tcmalloc="no" # parse CC options first for opt do @@ -1134,6 +1135,10 @@ for opt do ;; --enable-numa) numa="yes" ;; + --disable-tcmalloc) tcmalloc="no" + ;; + --enable-tcmalloc) tcmalloc="yes" + ;; *) echo "ERROR: unknown option $opt" echo "Try '$0 --help' for more information" @@ -1407,6 +1412,8 @@ Advanced options (experts only): --enable-quorum enable quorum block filter support --disable-numa disable libnuma support --enable-numa enable libnuma support + --disable-tcmalloc disable tcmalloc support + --enable-tcmalloc enable tcmalloc support NOTE: The object files are built at the place where configure is launched EOF @@ -1549,6 +1556,17 @@ if test "$static" = "yes" ; then fi fi +# Unconditional check for compiler __thread support + cat > $TMPC << EOF +static __thread int tls_var; +int main(void) { return tls_var; } +EOF + +if ! compile_prog "-Werror" "" ; then + error_exit "Your compiler does not support the __thread specifier for " \ + "Thread-Local Storage (TLS). Please upgrade to a version that does." +fi + if test "$pie" = ""; then case "$cpu-$targetos" in i386-Linux|x86_64-Linux|x32-Linux|i386-OpenBSD|x86_64-OpenBSD) @@ -3135,7 +3153,7 @@ else fi if test "$opengl" != "no" ; then - opengl_pkgs="gl" + opengl_pkgs="gl glesv2" if $pkg_config $opengl_pkgs x11 && test "$have_glx" = "yes"; then opengl_cflags="$($pkg_config --cflags $opengl_pkgs) $x11_cflags" opengl_libs="$($pkg_config --libs $opengl_pkgs) $x11_libs" @@ -3331,6 +3349,22 @@ EOF fi ########################################## +# tcmalloc probe + +if test "$tcmalloc" = "yes" ; then + cat > $TMPC << EOF +#include <stdlib.h> +int main(void) { malloc(1); return 0; } +EOF + + if compile_prog "" "-ltcmalloc" ; then + LIBS="-ltcmalloc $LIBS" + else + feature_not_found "tcmalloc" "install gperftools devel" + fi +fi + +########################################## # signalfd probe signalfd="no" cat > $TMPC << EOF @@ -4441,6 +4475,7 @@ echo "lzo support $lzo" echo "snappy support $snappy" echo "bzip2 support $bzip2" echo "NUMA host support $numa" +echo "tcmalloc support $tcmalloc" if test "$sdl_too_old" = "yes"; then echo "-> Your SDL version is too old - please upgrade to have SDL support" @@ -5169,8 +5204,6 @@ case "$target_name" in TARGET_BASE_ARCH=mips echo "TARGET_ABI_MIPSN64=y" >> $config_target_mak ;; - tricore) - ;; moxie) ;; or32) @@ -5221,6 +5254,8 @@ case "$target_name" in s390x) gdb_xml_files="s390x-core64.xml s390-acr.xml s390-fpr.xml" ;; + tricore) + ;; unicore32) ;; xtensa|xtensaeb) @@ -1016,7 +1016,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) qemu_cond_signal(&qemu_cpu_cond); /* wait for initial kick-off after machine start */ - while (QTAILQ_FIRST(&cpus)->stopped) { + while (first_cpu->stopped) { qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); /* process any pending work */ @@ -249,9 +249,9 @@ static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr, * Called from TCG-generated code, which is under an RCU read-side * critical section. */ -void tlb_set_page(CPUState *cpu, target_ulong vaddr, - hwaddr paddr, int prot, - int mmu_idx, target_ulong size) +void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, + hwaddr paddr, MemTxAttrs attrs, int prot, + int mmu_idx, target_ulong size) { CPUArchState *env = cpu->env_ptr; MemoryRegionSection *section; @@ -301,7 +301,8 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr, env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; /* refill the tlb */ - env->iotlb[mmu_idx][index] = iotlb - vaddr; + env->iotlb[mmu_idx][index].addr = iotlb - vaddr; + env->iotlb[mmu_idx][index].attrs = attrs; te->addend = addend - vaddr; if (prot & PAGE_READ) { te->addr_read = address; @@ -331,6 +332,17 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr, } } +/* Add a new TLB entry, but without specifying the memory + * transaction attributes to be used. + */ +void tlb_set_page(CPUState *cpu, target_ulong vaddr, + hwaddr paddr, int prot, + int mmu_idx, target_ulong size) +{ + tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, + prot, mmu_idx, size); +} + /* NOTE: this function can trigger an exception */ /* NOTE2: the returned address is not exactly the physical address: it * is actually a ram_addr_t (in system mode; the user mode emulation @@ -349,7 +361,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) (addr & TARGET_PAGE_MASK))) { cpu_ldub_code(env1, addr); } - pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK; + pd = env1->iotlb[mmu_idx][page_index].addr & ~TARGET_PAGE_MASK; mr = iotlb_to_region(cpu, pd); if (memory_region_is_unassigned(mr)) { CPUClass *cc = CPU_GET_CLASS(cpu); diff --git a/default-configs/microblazeel-softmmu.mak b/default-configs/microblazeel-softmmu.mak index acf22c5bb3..2fcf442fc7 100644 --- a/default-configs/microblazeel-softmmu.mak +++ b/default-configs/microblazeel-softmmu.mak @@ -1,11 +1,3 @@ # Default configuration for microblazeel-softmmu -CONFIG_PTIMER=y -CONFIG_PFLASH_CFI01=y -CONFIG_SERIAL=y -CONFIG_XILINX=y -CONFIG_XILINX_AXI=y -CONFIG_XILINX_SPI=y -CONFIG_XILINX_ETHLITE=y -CONFIG_SSI=y -CONFIG_SSI_M25P80=y +include microblaze-softmmu.mak diff --git a/dma-helpers.c b/dma-helpers.c index 6918572e18..4faec5d0ca 100644 --- a/dma-helpers.c +++ b/dma-helpers.c @@ -10,7 +10,6 @@ #include "sysemu/block-backend.h" #include "sysemu/dma.h" #include "trace.h" -#include "qemu/range.h" #include "qemu/thread.h" #include "qemu/main-loop.h" @@ -28,7 +27,8 @@ int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len) memset(fillbuf, c, FILLBUF_SIZE); while (len > 0) { l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; - error |= address_space_rw(as, addr, fillbuf, l, true); + error |= address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED, + fillbuf, l, true); len -= l; addr += l; } @@ -92,14 +92,6 @@ static void reschedule_dma(void *opaque) dma_blk_cb(dbs, 0); } -static void continue_after_map_failure(void *opaque) -{ - DMAAIOCB *dbs = (DMAAIOCB *)opaque; - - dbs->bh = qemu_bh_new(reschedule_dma, dbs); - qemu_bh_schedule(dbs->bh); -} - static void dma_blk_unmap(DMAAIOCB *dbs) { int i; @@ -161,7 +153,9 @@ static void dma_blk_cb(void *opaque, int ret) if (dbs->iov.size == 0) { trace_dma_map_wait(dbs); - cpu_register_map_client(dbs, continue_after_map_failure); + dbs->bh = aio_bh_new(blk_get_aio_context(dbs->blk), + reschedule_dma, dbs); + cpu_register_map_client(dbs->bh); return; } @@ -183,6 +177,11 @@ static void dma_aio_cancel(BlockAIOCB *acb) if (dbs->acb) { blk_aio_cancel_async(dbs->acb); } + if (dbs->bh) { + cpu_unregister_map_client(dbs->bh); + qemu_bh_delete(dbs->bh); + dbs->bh = NULL; + } } diff --git a/docs/atomics.txt b/docs/atomics.txt index 6f2997bc65..ef285e3c2a 100644 --- a/docs/atomics.txt +++ b/docs/atomics.txt @@ -281,7 +281,7 @@ note that the other barrier may actually be in a driver that runs in the guest! For the purposes of pairing, smp_read_barrier_depends() and smp_rmb() -both count as read barriers. A read barriers shall pair with a write +both count as read barriers. A read barrier shall pair with a write barrier or a full barrier; a write barrier shall pair with a read barrier or a full barrier. A full barrier can pair with anything. For example: @@ -294,7 +294,7 @@ For example: smp_rmb(); y = a; -Note that the "writing" thread are accessing the variables in the +Note that the "writing" thread is accessing the variables in the opposite order as the "reading" thread. This is expected: stores before the write barrier will normally match the loads after the read barrier, and vice versa. The same is true for more than 2 diff --git a/docs/bitmaps.md b/docs/bitmaps.md new file mode 100644 index 0000000000..f066b48aa5 --- /dev/null +++ b/docs/bitmaps.md @@ -0,0 +1,352 @@ +<!-- +Copyright 2015 John Snow <jsnow@redhat.com> and Red Hat, Inc. +All rights reserved. + +This file is licensed via The FreeBSD Documentation License, the full text of +which is included at the end of this document. +--> + +# Dirty Bitmaps and Incremental Backup + +* Dirty Bitmaps are objects that track which data needs to be backed up for the + next incremental backup. + +* Dirty bitmaps can be created at any time and attached to any node + (not just complete drives.) + +## Dirty Bitmap Names + +* A dirty bitmap's name is unique to the node, but bitmaps attached to different + nodes can share the same name. + +## Bitmap Modes + +* A Bitmap can be "frozen," which means that it is currently in-use by a backup + operation and cannot be deleted, renamed, written to, reset, + etc. + +## Basic QMP Usage + +### Supported Commands ### + +* block-dirty-bitmap-add +* block-dirty-bitmap-remove +* block-dirty-bitmap-clear + +### Creation + +* To create a new bitmap, enabled, on the drive with id=drive0: + +```json +{ "execute": "block-dirty-bitmap-add", + "arguments": { + "node": "drive0", + "name": "bitmap0" + } +} +``` + +* This bitmap will have a default granularity that matches the cluster size of + its associated drive, if available, clamped to between [4KiB, 64KiB]. + The current default for qcow2 is 64KiB. + +* To create a new bitmap that tracks changes in 32KiB segments: + +```json +{ "execute": "block-dirty-bitmap-add", + "arguments": { + "node": "drive0", + "name": "bitmap0", + "granularity": 32768 + } +} +``` + +### Deletion + +* Bitmaps that are frozen cannot be deleted. + +* Deleting the bitmap does not impact any other bitmaps attached to the same + node, nor does it affect any backups already created from this node. + +* Because bitmaps are only unique to the node to which they are attached, + you must specify the node/drive name here, too. + +```json +{ "execute": "block-dirty-bitmap-remove", + "arguments": { + "node": "drive0", + "name": "bitmap0" + } +} +``` + +### Resetting + +* Resetting a bitmap will clear all information it holds. + +* An incremental backup created from an empty bitmap will copy no data, + as if nothing has changed. + +```json +{ "execute": "block-dirty-bitmap-clear", + "arguments": { + "node": "drive0", + "name": "bitmap0" + } +} +``` + +## Transactions (Not yet implemented) + +* Transactional commands are forthcoming in a future version, + and are not yet available for use. This section serves as + documentation of intent for their design and usage. + +### Justification + +Bitmaps can be safely modified when the VM is paused or halted by using +the basic QMP commands. For instance, you might perform the following actions: + +1. Boot the VM in a paused state. +2. Create a full drive backup of drive0. +3. Create a new bitmap attached to drive0. +4. Resume execution of the VM. +5. Incremental backups are ready to be created. + +At this point, the bitmap and drive backup would be correctly in sync, +and incremental backups made from this point forward would be correctly aligned +to the full drive backup. + +This is not particularly useful if we decide we want to start incremental +backups after the VM has been running for a while, for which we will need to +perform actions such as the following: + +1. Boot the VM and begin execution. +2. Using a single transaction, perform the following operations: + * Create bitmap0. + * Create a full drive backup of drive0. +3. Incremental backups are now ready to be created. + +### Supported Bitmap Transactions + +* block-dirty-bitmap-add +* block-dirty-bitmap-clear + +The usages are identical to their respective QMP commands, but see below +for examples. + +### Example: New Incremental Backup + +As outlined in the justification, perhaps we want to create a new incremental +backup chain attached to a drive. + +```json +{ "execute": "transaction", + "arguments": { + "actions": [ + {"type": "block-dirty-bitmap-add", + "data": {"node": "drive0", "name": "bitmap0"} }, + {"type": "drive-backup", + "data": {"device": "drive0", "target": "/path/to/full_backup.img", + "sync": "full", "format": "qcow2"} } + ] + } +} +``` + +### Example: New Incremental Backup Anchor Point + +Maybe we just want to create a new full backup with an existing bitmap and +want to reset the bitmap to track the new chain. + +```json +{ "execute": "transaction", + "arguments": { + "actions": [ + {"type": "block-dirty-bitmap-clear", + "data": {"node": "drive0", "name": "bitmap0"} }, + {"type": "drive-backup", + "data": {"device": "drive0", "target": "/path/to/new_full_backup.img", + "sync": "full", "format": "qcow2"} } + ] + } +} +``` + +## Incremental Backups + +The star of the show. + +**Nota Bene!** Only incremental backups of entire drives are supported for now. +So despite the fact that you can attach a bitmap to any arbitrary node, they are +only currently useful when attached to the root node. This is because +drive-backup only supports drives/devices instead of arbitrary nodes. + +### Example: First Incremental Backup + +1. Create a full backup and sync it to the dirty bitmap, as in the transactional +examples above; or with the VM offline, manually create a full copy and then +create a new bitmap before the VM begins execution. + + * Let's assume the full backup is named 'full_backup.img'. + * Let's assume the bitmap you created is 'bitmap0' attached to 'drive0'. + +2. Create a destination image for the incremental backup that utilizes the +full backup as a backing image. + + * Let's assume it is named 'incremental.0.img'. + + ```sh + # qemu-img create -f qcow2 incremental.0.img -b full_backup.img -F qcow2 + ``` + +3. Issue the incremental backup command: + + ```json + { "execute": "drive-backup", + "arguments": { + "device": "drive0", + "bitmap": "bitmap0", + "target": "incremental.0.img", + "format": "qcow2", + "sync": "dirty-bitmap", + "mode": "existing" + } + } + ``` + +### Example: Second Incremental Backup + +1. Create a new destination image for the incremental backup that points to the + previous one, e.g.: 'incremental.1.img' + + ```sh + # qemu-img create -f qcow2 incremental.1.img -b incremental.0.img -F qcow2 + ``` + +2. Issue a new incremental backup command. The only difference here is that we + have changed the target image below. + + ```json + { "execute": "drive-backup", + "arguments": { + "device": "drive0", + "bitmap": "bitmap0", + "target": "incremental.1.img", + "format": "qcow2", + "sync": "dirty-bitmap", + "mode": "existing" + } + } + ``` + +## Errors + +* In the event of an error that occurs after a backup job is successfully + launched, either by a direct QMP command or a QMP transaction, the user + will receive a BLOCK_JOB_COMPLETE event with a failure message, accompanied + by a BLOCK_JOB_ERROR event. + +* In the case of an event being cancelled, the user will receive a + BLOCK_JOB_CANCELLED event instead of a pair of COMPLETE and ERROR events. + +* In either case, the incremental backup data contained within the bitmap is + safely rolled back, and the data within the bitmap is not lost. The image + file created for the failed attempt can be safely deleted. + +* Once the underlying problem is fixed (e.g. more storage space is freed up), + you can simply retry the incremental backup command with the same bitmap. + +### Example + +1. Create a target image: + + ```sh + # qemu-img create -f qcow2 incremental.0.img -b full_backup.img -F qcow2 + ``` + +2. Attempt to create an incremental backup via QMP: + + ```json + { "execute": "drive-backup", + "arguments": { + "device": "drive0", + "bitmap": "bitmap0", + "target": "incremental.0.img", + "format": "qcow2", + "sync": "dirty-bitmap", + "mode": "existing" + } + } + ``` + +3. Receive an event notifying us of failure: + + ```json + { "timestamp": { "seconds": 1424709442, "microseconds": 844524 }, + "data": { "speed": 0, "offset": 0, "len": 67108864, + "error": "No space left on device", + "device": "drive1", "type": "backup" }, + "event": "BLOCK_JOB_COMPLETED" } + ``` + +4. Delete the failed incremental, and re-create the image. + + ```sh + # rm incremental.0.img + # qemu-img create -f qcow2 incremental.0.img -b full_backup.img -F qcow2 + ``` + +5. Retry the command after fixing the underlying problem, + such as freeing up space on the backup volume: + + ```json + { "execute": "drive-backup", + "arguments": { + "device": "drive0", + "bitmap": "bitmap0", + "target": "incremental.0.img", + "format": "qcow2", + "sync": "dirty-bitmap", + "mode": "existing" + } + } + ``` + +6. Receive confirmation that the job completed successfully: + + ```json + { "timestamp": { "seconds": 1424709668, "microseconds": 526525 }, + "data": { "device": "drive1", "type": "backup", + "speed": 0, "len": 67108864, "offset": 67108864}, + "event": "BLOCK_JOB_COMPLETED" } + ``` + +<!-- +The FreeBSD Documentation License + +Redistribution and use in source (Markdown) and 'compiled' forms (SGML, HTML, +PDF, PostScript, RTF and so forth) with or without modification, are permitted +provided that the following conditions are met: + +Redistributions of source code (Markdown) must retain the above copyright +notice, this list of conditions and the following disclaimer of this file +unmodified. + +Redistributions in compiled form (transformed to other DTDs, converted to PDF, +PostScript, RTF and other formats) must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +THIS DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +--> diff --git a/docs/multi-thread-compression.txt b/docs/multi-thread-compression.txt new file mode 100644 index 0000000000..3d477c3bd2 --- /dev/null +++ b/docs/multi-thread-compression.txt @@ -0,0 +1,149 @@ +Use multiple thread (de)compression in live migration +===================================================== +Copyright (C) 2015 Intel Corporation +Author: Liang Li <liang.z.li@intel.com> + +This work is licensed under the terms of the GNU GPLv2 or later. See +the COPYING file in the top-level directory. + +Contents: +========= +* Introduction +* When to use +* Performance +* Usage +* TODO + +Introduction +============ +Instead of sending the guest memory directly, this solution will +compress the RAM page before sending; after receiving, the data will +be decompressed. Using compression in live migration can help +to reduce the data transferred about 60%, this is very useful when the +bandwidth is limited, and the total migration time can also be reduced +about 70% in a typical case. In addition to this, the VM downtime can be +reduced about 50%. The benefit depends on data's compressibility in VM. + +The process of compression will consume additional CPU cycles, and the +extra CPU cycles will increase the migration time. On the other hand, +the amount of data transferred will decrease; this factor can reduce +the total migration time. If the process of the compression is quick +enough, then the total migration time can be reduced, and multiple +thread compression can be used to accelerate the compression process. + +The decompression speed of Zlib is at least 4 times as quick as +compression, if the source and destination CPU have equal speed, +keeping the compression thread count 4 times the decompression +thread count can avoid resource waste. + +Compression level can be used to control the compression speed and the +compression ratio. High compression ratio will take more time, level 0 +stands for no compression, level 1 stands for the best compression +speed, and level 9 stands for the best compression ratio. Users can +select a level number between 0 and 9. + + +When to use the multiple thread compression in live migration +============================================================= +Compression of data will consume extra CPU cycles; so in a system with +high overhead of CPU, avoid using this feature. When the network +bandwidth is very limited and the CPU resource is adequate, use of +multiple thread compression will be very helpful. If both the CPU and +the network bandwidth are adequate, use of multiple thread compression +can still help to reduce the migration time. + +Performance +=========== +Test environment: + +CPU: Intel(R) Xeon(R) CPU E5-2680 0 @ 2.70GHz +Socket Count: 2 +RAM: 128G +NIC: Intel I350 (10/100/1000Mbps) +Host OS: CentOS 7 64-bit +Guest OS: RHEL 6.5 64-bit +Parameter: qemu-system-x86_64 -enable-kvm -smp 4 -m 4096 + /share/ia32e_rhel6u5.qcow -monitor stdio + +There is no additional application is running on the guest when doing +the test. + + +Speed limit: 1000Gb/s +--------------------------------------------------------------- + | original | compress thread: 8 + | way | decompress thread: 2 + | | compression level: 1 +--------------------------------------------------------------- +total time(msec): | 3333 | 1833 +--------------------------------------------------------------- +downtime(msec): | 100 | 27 +--------------------------------------------------------------- +transferred ram(kB):| 363536 | 107819 +--------------------------------------------------------------- +throughput(mbps): | 893.73 | 482.22 +--------------------------------------------------------------- +total ram(kB): | 4211524 | 4211524 +--------------------------------------------------------------- + +There is an application running on the guest which write random numbers +to RAM block areas periodically. + +Speed limit: 1000Gb/s +--------------------------------------------------------------- + | original | compress thread: 8 + | way | decompress thread: 2 + | | compression level: 1 +--------------------------------------------------------------- +total time(msec): | 37369 | 15989 +--------------------------------------------------------------- +downtime(msec): | 337 | 173 +--------------------------------------------------------------- +transferred ram(kB):| 4274143 | 1699824 +--------------------------------------------------------------- +throughput(mbps): | 936.99 | 870.95 +--------------------------------------------------------------- +total ram(kB): | 4211524 | 4211524 +--------------------------------------------------------------- + +Usage +===== +1. Verify both the source and destination QEMU are able +to support the multiple thread compression migration: + {qemu} info_migrate_capabilities + {qemu} ... compress: off ... + +2. Activate compression on the source: + {qemu} migrate_set_capability compress on + +3. Set the compression thread count on source: + {qemu} migrate_set_parameter compress_threads 12 + +4. Set the compression level on the source: + {qemu} migrate_set_parameter compress_level 1 + +5. Set the decompression thread count on destination: + {qemu} migrate_set_parameter decompress_threads 3 + +6. Start outgoing migration: + {qemu} migrate -d tcp:destination.host:4444 + {qemu} info migrate + Capabilities: ... compress: on + ... + +The following are the default settings: + compress: off + compress_threads: 8 + decompress_threads: 2 + compress_level: 1 (which means best speed) + +So, only the first two steps are required to use the multiple +thread compression in migration. You can do more if the default +settings are not appropriate. + +TODO +==== +Some faster (de)compression method such as LZ4 and Quicklz can help +to reduce the CPU consumption when doing (de)compression. If using +these faster (de)compression method, less (de)compression threads +are needed when doing the migration. diff --git a/docs/qapi-code-gen.txt b/docs/qapi-code-gen.txt index 8313ba6af8..269a1f3d27 100644 --- a/docs/qapi-code-gen.txt +++ b/docs/qapi-code-gen.txt @@ -1,61 +1,193 @@ = How to use the QAPI code generator = -QAPI is a native C API within QEMU which provides management-level -functionality to internal/external users. For external -users/processes, this interface is made available by a JSON-based -QEMU Monitor protocol that is provided by the QMP server. - -To map QMP-defined interfaces to the native C QAPI implementations, -a JSON-based schema is used to define types and function -signatures, and a set of scripts is used to generate types/signatures, -and marshaling/dispatch code. The QEMU Guest Agent also uses these -scripts, paired with a separate schema, to generate -marshaling/dispatch code for the guest agent server running in the -guest. +Copyright IBM Corp. 2011 +Copyright (C) 2012-2015 Red Hat, Inc. -This document will describe how the schemas, scripts, and resulting -code are used. +This work is licensed under the terms of the GNU GPL, version 2 or +later. See the COPYING file in the top-level directory. +== Introduction == -== QMP/Guest agent schema == - -This file defines the types, commands, and events used by QMP. It should -fully describe the interface used by QMP. +QAPI is a native C API within QEMU which provides management-level +functionality to internal and external users. For external +users/processes, this interface is made available by a JSON-based wire +format for the QEMU Monitor Protocol (QMP) for controlling qemu, as +well as the QEMU Guest Agent (QGA) for communicating with the guest. +The remainder of this document uses "Client JSON Protocol" when +referring to the wire contents of a QMP or QGA connection. -This file is designed to be loosely based on JSON although it's technically -executable Python. While dictionaries are used, they are parsed as -OrderedDicts so that ordering is preserved. +To map Client JSON Protocol interfaces to the native C QAPI +implementations, a JSON-based schema is used to define types and +function signatures, and a set of scripts is used to generate types, +signatures, and marshaling/dispatch code. This document will describe +how the schemas, scripts, and resulting code are used. -There are two basic syntaxes used, type definitions and command definitions. -The first syntax defines a type and is represented by a dictionary. There are -three kinds of user-defined types that are supported: complex types, -enumeration types and union types. +== QMP/Guest agent schema == -Generally speaking, types definitions should always use CamelCase for the type -names. Command names should be all lower case with words separated by a hyphen. +A QAPI schema file is designed to be loosely based on JSON +(http://www.ietf.org/rfc/rfc7159.txt) with changes for quoting style +and the use of comments; a QAPI schema file is then parsed by a python +code generation program. A valid QAPI schema consists of a series of +top-level expressions, with no commas between them. Where +dictionaries (JSON objects) are used, they are parsed as python +OrderedDicts so that ordering is preserved (for predictable layout of +generated C structs and parameter lists). Ordering doesn't matter +between top-level expressions or the keys within an expression, but +does matter within dictionary values for 'data' and 'returns' members +of a single expression. QAPI schema input is written using 'single +quotes' instead of JSON's "double quotes" (in contrast, Client JSON +Protocol uses no comments, and while input accepts 'single quotes' as +an extension, output is strict JSON using only "double quotes"). As +in JSON, trailing commas are not permitted in arrays or dictionaries. +Input must be ASCII (although QMP supports full Unicode strings, the +QAPI parser does not). At present, there is no place where a QAPI +schema requires the use of JSON numbers or null. + +Comments are allowed; anything between an unquoted # and the following +newline is ignored. Although there is not yet a documentation +generator, a form of stylized comments has developed for consistently +documenting details about an expression and when it was added to the +schema. The documentation is delimited between two lines of ##, then +the first line names the expression, an optional overview is provided, +then individual documentation about each member of 'data' is provided, +and finally, a 'Since: x.y.z' tag lists the release that introduced +the expression. Optional fields are tagged with the phrase +'#optional', often with their default value; and extensions added +after the expression was first released are also given a '(since +x.y.z)' comment. For example: + + ## + # @BlockStats: + # + # Statistics of a virtual block device or a block backing device. + # + # @device: #optional If the stats are for a virtual block device, the name + # corresponding to the virtual block device. + # + # @stats: A @BlockDeviceStats for the device. + # + # @parent: #optional This describes the file block device if it has one. + # + # @backing: #optional This describes the backing block device if it has one. + # (Since 2.0) + # + # Since: 0.14.0 + ## + { 'struct': 'BlockStats', + 'data': {'*device': 'str', 'stats': 'BlockDeviceStats', + '*parent': 'BlockStats', + '*backing': 'BlockStats'} } + +The schema sets up a series of types, as well as commands and events +that will use those types. Forward references are allowed: the parser +scans in two passes, where the first pass learns all type names, and +the second validates the schema and generates the code. This allows +the definition of complex structs that can have mutually recursive +types, and allows for indefinite nesting of Client JSON Protocol that +satisfies the schema. A type name should not be defined more than +once. It is permissible for the schema to contain additional types +not used by any commands or events in the Client JSON Protocol, for +the side effect of generated C code used internally. + +There are seven top-level expressions recognized by the parser: +'include', 'command', 'struct', 'enum', 'union', 'alternate', and +'event'. There are several groups of types: simple types (a number of +built-in types, such as 'int' and 'str'; as well as enumerations), +complex types (structs and two flavors of unions), and alternate types +(a choice between other types). The 'command' and 'event' expressions +can refer to existing types by name, or list an anonymous type as a +dictionary. Listing a type name inside an array refers to a +single-dimension array of that type; multi-dimension arrays are not +directly supported (although an array of a complex struct that +contains an array member is possible). + +Types, commands, and events share a common namespace. Therefore, +generally speaking, type definitions should always use CamelCase for +user-defined type names, while built-in types are lowercase. Type +definitions should not end in 'Kind', as this namespace is used for +creating implicit C enums for visiting union types. Command names, +and field names within a type, should be all lower case with words +separated by a hyphen. However, some existing older commands and +complex types use underscore; when extending such expressions, +consistency is preferred over blindly avoiding underscore. Event +names should be ALL_CAPS with words separated by underscore. The +special string '**' appears for some commands that manually perform +their own type checking rather than relying on the type-safe code +produced by the qapi code generators. + +Any name (command, event, type, field, or enum value) beginning with +"x-" is marked experimental, and may be withdrawn or changed +incompatibly in a future release. Downstream vendors may add +extensions; such extensions should begin with a prefix matching +"__RFQDN_" (for the reverse-fully-qualified-domain-name of the +vendor), even if the rest of the name uses dash (example: +__com.redhat_drive-mirror). Other than downstream extensions (with +leading underscore and the use of dots), all names should begin with a +letter, and contain only ASCII letters, digits, dash, and underscore. +It is okay to reuse names that match C keywords; the generator will +rename a field named "default" in the QAPI to "q_default" in the +generated C code. + +In the rest of this document, usage lines are given for each +expression type, with literal strings written in lower case and +placeholders written in capitals. If a literal string includes a +prefix of '*', that key/value pair can be omitted from the expression. +For example, a usage statement that includes '*base':STRUCT-NAME +means that an expression has an optional key 'base', which if present +must have a value that forms a struct name. + + +=== Built-in Types === + +The following types are built-in to the parser: + 'str' - arbitrary UTF-8 string + 'int' - 64-bit signed integer (although the C code may place further + restrictions on acceptable range) + 'number' - floating point number + 'bool' - JSON value of true or false + 'int8', 'int16', 'int32', 'int64' - like 'int', but enforce maximum + bit size + 'uint8', 'uint16', 'uint32', 'uint64' - unsigned counterparts + 'size' - like 'uint64', but allows scaled suffix from command line + visitor === Includes === +Usage: { 'include': STRING } + The QAPI schema definitions can be modularized using the 'include' directive: - { 'include': 'path/to/file.json'} + { 'include': 'path/to/file.json' } The directive is evaluated recursively, and include paths are relative to the -file using the directive. Multiple includes of the same file are safe. +file using the directive. Multiple includes of the same file are +safe. No other keys should appear in the expression, and the include +value should be a string. + +As a matter of style, it is a good idea to have all files be +self-contained, but at the moment, nothing prevents an included file +from making a forward reference to a type that is only introduced by +an outer file. The parser may be made stricter in the future to +prevent incomplete include files. -=== Complex types === +=== Struct types === -A complex type is a dictionary containing a single key whose value is a -dictionary. This corresponds to a struct in C or an Object in JSON. An -example of a complex type is: +Usage: { 'struct': STRING, 'data': DICT, '*base': STRUCT-NAME } - { 'type': 'MyType', +A struct is a dictionary containing a single 'data' key whose +value is a dictionary. This corresponds to a struct in C or an Object +in JSON. Each value of the 'data' dictionary must be the name of a +type, or a one-element array containing a type name. An example of a +struct is: + + { 'struct': 'MyType', 'data': { 'member1': 'str', 'member2': 'int', '*member3': 'str' } } -The use of '*' as a prefix to the name means the member is optional. +The use of '*' as a prefix to the name means the member is optional in +the corresponding JSON protocol usage. The default initialization value of an optional argument should not be changed between versions of QEMU unless the new default maintains backward @@ -84,13 +216,13 @@ A structure that is used in both input and output of various commands must consider the backwards compatibility constraints of both directions of use. -A complex type definition can specify another complex type as its base. +A struct definition can specify another struct as its base. In this case, the fields of the base type are included as top-level fields -of the new complex type's dictionary in the QMP wire format. An example -definition is: +of the new struct's dictionary in the Client JSON Protocol wire +format. An example definition is: - { 'type': 'BlockdevOptionsGenericFormat', 'data': { 'file': 'str' } } - { 'type': 'BlockdevOptionsGenericCOWFormat', + { 'struct': 'BlockdevOptionsGenericFormat', 'data': { 'file': 'str' } } + { 'struct': 'BlockdevOptionsGenericCOWFormat', 'base': 'BlockdevOptionsGenericFormat', 'data': { '*backing': 'str' } } @@ -100,97 +232,158 @@ both fields like this: { "file": "/some/place/my-image", "backing": "/some/place/my-backing-file" } + === Enumeration types === -An enumeration type is a dictionary containing a single key whose value is a -list of strings. An example enumeration is: +Usage: { 'enum': STRING, 'data': ARRAY-OF-STRING } + +An enumeration type is a dictionary containing a single 'data' key +whose value is a list of strings. An example enumeration is: { 'enum': 'MyEnum', 'data': [ 'value1', 'value2', 'value3' ] } +Nothing prevents an empty enumeration, although it is probably not +useful. The list of strings should be lower case; if an enum name +represents multiple words, use '-' between words. The string 'max' is +not allowed as an enum value, and values should not be repeated. + +The enumeration values are passed as strings over the Client JSON +Protocol, but are encoded as C enum integral values in generated code. +While the C code starts numbering at 0, it is better to use explicit +comparisons to enum values than implicit comparisons to 0; the C code +will also include a generated enum member ending in _MAX for tracking +the size of the enum, useful when using common functions for +converting between strings and enum values. Since the wire format +always passes by name, it is acceptable to reorder or add new +enumeration members in any location without breaking clients of Client +JSON Protocol; however, removing enum values would break +compatibility. For any struct that has a field that will only contain +a finite set of string values, using an enum type for that field is +better than open-coding the field to be type 'str'. + + === Union types === -Union types are used to let the user choose between several different data -types. A union type is defined using a dictionary as explained in the -following paragraphs. +Usage: { 'union': STRING, 'data': DICT } +or: { 'union': STRING, 'data': DICT, 'base': STRUCT-NAME, + 'discriminator': ENUM-MEMBER-OF-BASE } +Union types are used to let the user choose between several different +variants for an object. There are two flavors: simple (no +discriminator or base), flat (both discriminator and base). A union +type is defined using a data dictionary as explained in the following +paragraphs. -A simple union type defines a mapping from discriminator values to data types -like in this example: +A simple union type defines a mapping from automatic discriminator +values to data types like in this example: - { 'type': 'FileOptions', 'data': { 'filename': 'str' } } - { 'type': 'Qcow2Options', + { 'struct': 'FileOptions', 'data': { 'filename': 'str' } } + { 'struct': 'Qcow2Options', 'data': { 'backing-file': 'str', 'lazy-refcounts': 'bool' } } { 'union': 'BlockdevOptions', 'data': { 'file': 'FileOptions', 'qcow2': 'Qcow2Options' } } -In the QMP wire format, a simple union is represented by a dictionary that -contains the 'type' field as a discriminator, and a 'data' field that is of the -specified data type corresponding to the discriminator value: +In the Client JSON Protocol, a simple union is represented by a +dictionary that contains the 'type' field as a discriminator, and a +'data' field that is of the specified data type corresponding to the +discriminator value, as in these examples: + { "type": "file", "data" : { "filename": "/some/place/my-image" } } { "type": "qcow2", "data" : { "backing-file": "/some/place/my-image", "lazy-refcounts": true } } +The generated C code uses a struct containing a union. Additionally, +an implicit C enum 'NameKind' is created, corresponding to the union +'Name', for accessing the various branches of the union. No branch of +the union can be named 'max', as this would collide with the implicit +enum. The value for each branch can be of any type. -A union definition can specify a complex type as its base. In this case, the -fields of the complex type are included as top-level fields of the union -dictionary in the QMP wire format. An example definition is: - { 'type': 'BlockdevCommonOptions', 'data': { 'readonly': 'bool' } } - { 'union': 'BlockdevOptions', - 'base': 'BlockdevCommonOptions', - 'data': { 'raw': 'RawOptions', - 'qcow2': 'Qcow2Options' } } +A flat union definition specifies a struct as its base, and +avoids nesting on the wire. All branches of the union must be +complex types, and the top-level fields of the union dictionary on +the wire will be combination of fields from both the base type and the +appropriate branch type (when merging two dictionaries, there must be +no keys in common). The 'discriminator' field must be the name of an +enum-typed member of the base struct. -And it looks like this on the wire: - - { "type": "qcow2", - "readonly": false, - "data" : { "backing-file": "/some/place/my-image", - "lazy-refcounts": true } } - - -Flat union types avoid the nesting on the wire. They are used whenever a -specific field of the base type is declared as the discriminator ('type' is -then no longer generated). The discriminator must be of enumeration type. -The above example can then be modified as follows: +The following example enhances the above simple union example by +adding a common field 'readonly', renaming the discriminator to +something more applicable, and reducing the number of {} required on +the wire: { 'enum': 'BlockdevDriver', 'data': [ 'raw', 'qcow2' ] } - { 'type': 'BlockdevCommonOptions', + { 'struct': 'BlockdevCommonOptions', 'data': { 'driver': 'BlockdevDriver', 'readonly': 'bool' } } { 'union': 'BlockdevOptions', 'base': 'BlockdevCommonOptions', 'discriminator': 'driver', - 'data': { 'raw': 'RawOptions', + 'data': { 'file': 'FileOptions', 'qcow2': 'Qcow2Options' } } -Resulting in this JSON object: +Resulting in these JSON objects: + + { "driver": "file", "readonly": true, + "filename": "/some/place/my-image" } + { "driver": "qcow2", "readonly": false, + "backing-file": "/some/place/my-image", "lazy-refcounts": true } + +Notice that in a flat union, the discriminator name is controlled by +the user, but because it must map to a base member with enum type, the +code generator can ensure that branches exist for all values of the +enum (although the order of the keys need not match the declaration of +the enum). In the resulting generated C data types, a flat union is +represented as a struct with the base member fields included directly, +and then a union of structures for each branch of the struct. + +A simple union can always be re-written as a flat union where the base +class has a single member named 'type', and where each branch of the +union has a struct with a single member named 'data'. That is, - { "driver": "qcow2", - "readonly": false, - "backing-file": "/some/place/my-image", - "lazy-refcounts": true } + { 'union': 'Simple', 'data': { 'one': 'str', 'two': 'int' } } +is identical on the wire to: -A special type of unions are anonymous unions. They don't form a dictionary in -the wire format but allow the direct use of different types in their place. As -they aren't structured, they don't have any explicit discriminator but use -the (QObject) data type of their value as an implicit discriminator. This means -that they are restricted to using only one discriminator value per QObject -type. For example, you cannot have two different complex types in an anonymous -union, or two different integer types. + { 'enum': 'Enum', 'data': ['one', 'two'] } + { 'struct': 'Base', 'data': { 'type': 'Enum' } } + { 'struct': 'Branch1', 'data': { 'data': 'str' } } + { 'struct': 'Branch2', 'data': { 'data': 'int' } } + { 'union': 'Flat': 'base': 'Base', 'discriminator': 'type', + 'data': { 'one': 'Branch1', 'two': 'Branch2' } } -Anonymous unions are declared using an empty dictionary as their discriminator. -The discriminator values never appear on the wire, they are only used in the -generated C code. Anonymous unions cannot have a base type. - { 'union': 'BlockRef', - 'discriminator': {}, +=== Alternate types === + +Usage: { 'alternate': STRING, 'data': DICT } + +An alternate type is one that allows a choice between two or more JSON +data types (string, integer, number, or object, but currently not +array) on the wire. The definition is similar to a simple union type, +where each branch of the union names a QAPI type. For example: + + { 'alternate': 'BlockRef', 'data': { 'definition': 'BlockdevOptions', 'reference': 'str' } } -This example allows using both of the following example objects: +Just like for a simple union, an implicit C enum 'NameKind' is created +to enumerate the branches for the alternate 'Name'. + +Unlike a union, the discriminator string is never passed on the wire +for the Client JSON Protocol. Instead, the value's JSON type serves +as an implicit discriminator, which in turn means that an alternate +can only express a choice between types represented differently in +JSON. If a branch is typed as the 'bool' built-in, the alternate +accepts true and false; if it is typed as any of the various numeric +built-ins, it accepts a JSON number; if it is typed as a 'str' +built-in or named enum type, it accepts a JSON string; and if it is +typed as a complex type (struct or union), it accepts a JSON object. +Two different complex types, for instance, aren't permitted, because +both are represented as a JSON object. + +The example alternate declaration above allows using both of the +following example objects: { "file": "my_existing_block_device_id" } { "file": { "driver": "file", @@ -200,23 +393,95 @@ This example allows using both of the following example objects: === Commands === -Commands are defined by using a list containing three members. The first -member is the command name, the second member is a dictionary containing -arguments, and the third member is the return type. - -An example command is: +Usage: { 'command': STRING, '*data': COMPLEX-TYPE-NAME-OR-DICT, + '*returns': TYPE-NAME-OR-DICT, + '*gen': false, '*success-response': false } + +Commands are defined by using a dictionary containing several members, +where three members are most common. The 'command' member is a +mandatory string, and determines the "execute" value passed in a +Client JSON Protocol command exchange. + +The 'data' argument maps to the "arguments" dictionary passed in as +part of a Client JSON Protocol command. The 'data' member is optional +and defaults to {} (an empty dictionary). If present, it must be the +string name of a complex type, a one-element array containing the name +of a complex type, or a dictionary that declares an anonymous type +with the same semantics as a 'struct' expression, with one exception +noted below when 'gen' is used. + +The 'returns' member describes what will appear in the "return" field +of a Client JSON Protocol reply on successful completion of a command. +The member is optional from the command declaration; if absent, the +"return" field will be an empty dictionary. If 'returns' is present, +it must be the string name of a complex or built-in type, a +one-element array containing the name of a complex or built-in type, +or a dictionary that declares an anonymous type with the same +semantics as a 'struct' expression, with one exception noted below +when 'gen' is used. Although it is permitted to have the 'returns' +member name a built-in type or an array of built-in types, any command +that does this cannot be extended to return additional information in +the future; thus, new commands should strongly consider returning a +dictionary-based type or an array of dictionaries, even if the +dictionary only contains one field at the present. + +All commands in Client JSON Protocol use a dictionary to report +failure, with no way to specify that in QAPI. Where the error return +is different than the usual GenericError class in order to help the +client react differently to certain error conditions, it is worth +documenting this in the comments before the command declaration. + +Some example commands: + + { 'command': 'my-first-command', + 'data': { 'arg1': 'str', '*arg2': 'str' } } + { 'struct': 'MyType', 'data': { '*value': 'str' } } + { 'command': 'my-second-command', + 'returns': [ 'MyType' ] } + +which would validate this Client JSON Protocol transaction: + + => { "execute": "my-first-command", + "arguments": { "arg1": "hello" } } + <= { "return": { } } + => { "execute": "my-second-command" } + <= { "return": [ { "value": "one" }, { } ] } + +In rare cases, QAPI cannot express a type-safe representation of a +corresponding Client JSON Protocol command. In these cases, if the +command expression includes the key 'gen' with boolean value false, +then the 'data' or 'returns' member that intends to bypass generated +type-safety and do its own manual validation should use an inline +dictionary definition, with a value of '**' rather than a valid type +name for the keys that the generated code will not validate. Please +try to avoid adding new commands that rely on this, and instead use +type-safe unions. For an example of bypass usage: + + { 'command': 'netdev_add', + 'data': {'type': 'str', 'id': 'str', '*props': '**'}, + 'gen': false } + +Normally, the QAPI schema is used to describe synchronous exchanges, +where a response is expected. But in some cases, the action of a +command is expected to change state in a way that a successful +response is not possible (although the command will still return a +normal dictionary error on failure). When a successful reply is not +possible, the command expression should include the optional key +'success-response' with boolean value false. So far, only QGA makes +use of this field. - { 'command': 'my-command', - 'data': { 'arg1': 'str', '*arg2': 'str' }, - 'returns': 'str' } === Events === -Events are defined with the keyword 'event'. When 'data' is also specified, -additional info will be included in the event. Finally there will be C API -generated in qapi-event.h; when called by QEMU code, a message with timestamp -will be emitted on the wire. If timestamp is -1, it means failure to retrieve -host time. +Usage: { 'event': STRING, '*data': COMPLEX-TYPE-NAME-OR-DICT } + +Events are defined with the keyword 'event'. It is not allowed to +name an event 'MAX', since the generator also produces a C enumeration +of all event names with a generated _MAX value at the end. When +'data' is also specified, additional info will be included in the +event, with similar semantics to a 'struct' expression. Finally there +will be C API generated in qapi-event.h; when called by QEMU code, a +message with timestamp will be emitted on the wire. An example event is: @@ -234,9 +499,9 @@ Resulting in this JSON object: Schemas are fed into 3 scripts to generate all the code/files that, paired with the core QAPI libraries, comprise everything required to take JSON -commands read in by a QMP/guest agent server, unmarshal the arguments into +commands read in by a Client JSON Protocol server, unmarshal the arguments into the underlying C types, call into the corresponding C function, and map the -response back to a QMP/guest agent response to be returned to the user. +response back to a Client JSON Protocol response to be returned to the user. As an example, we'll use the following schema, which describes a single complex user-defined type (which will produce a C struct, along with a list @@ -245,7 +510,7 @@ case we want to accept/return a list of this type with a command), and a command which takes that type as a parameter and returns the same type: $ cat example-schema.json - { 'type': 'UserDefOne', + { 'struct': 'UserDefOne', 'data': { 'integer': 'int', 'string': 'str' } } { 'command': 'my-command', @@ -311,7 +576,7 @@ Example: #ifndef EXAMPLE_QAPI_TYPES_H #define EXAMPLE_QAPI_TYPES_H -[Builtin types omitted...] +[Built-in types omitted...] typedef struct UserDefOne UserDefOne; @@ -324,7 +589,7 @@ Example: struct UserDefOneList *next; } UserDefOneList; -[Functions on builtin types omitted...] +[Functions on built-in types omitted...] struct UserDefOne { @@ -423,7 +688,7 @@ Example: #ifndef EXAMPLE_QAPI_VISIT_H #define EXAMPLE_QAPI_VISIT_H -[Visitors for builtin types omitted...] +[Visitors for built-in types omitted...] void visit_type_UserDefOne(Visitor *m, UserDefOne **obj, const char *name, Error **errp); void visit_type_UserDefOneList(Visitor *m, UserDefOneList **obj, const char *name, Error **errp); diff --git a/docs/qmp/qmp-events.txt b/docs/qmp/qmp-events.txt index 3be468f6d6..6dc2cca7de 100644 --- a/docs/qmp/qmp-events.txt +++ b/docs/qmp/qmp-events.txt @@ -31,21 +31,27 @@ Example: BLOCK_IMAGE_CORRUPTED --------------------- -Emitted when a disk image is being marked corrupt. +Emitted when a disk image is being marked corrupt. The image can be +identified by its device or node name. The 'device' field is always +present for compatibility reasons, but it can be empty ("") if the +image does not have a device name associated. Data: -- "device": Device name (json-string) -- "msg": Informative message (e.g., reason for the corruption) (json-string) -- "offset": If the corruption resulted from an image access, this is the access - offset into the image (json-int) -- "size": If the corruption resulted from an image access, this is the access - size (json-int) +- "device": Device name (json-string) +- "node-name": Node name (json-string, optional) +- "msg": Informative message (e.g., reason for the corruption) + (json-string) +- "offset": If the corruption resulted from an image access, this + is the host's access offset into the image + (json-int, optional) +- "size": If the corruption resulted from an image access, this + is the access size (json-int, optional) Example: { "event": "BLOCK_IMAGE_CORRUPTED", - "data": { "device": "ide0-hd0", + "data": { "device": "ide0-hd0", "node-name": "node0", "msg": "Prevented active L1 table overwrite", "offset": 196608, "size": 65536 }, "timestamp": { "seconds": 1378126126, "microseconds": 966463 } } diff --git a/docs/qmp/qmp-spec.txt b/docs/qmp/qmp-spec.txt index 22568c644e..4c28cd9438 100644 --- a/docs/qmp/qmp-spec.txt +++ b/docs/qmp/qmp-spec.txt @@ -1,10 +1,21 @@ QEMU Machine Protocol Specification +0. About This Document +====================== + +Copyright (C) 2009-2015 Red Hat, Inc. + +This work is licensed under the terms of the GNU GPL, version 2 or +later. See the COPYING file in the top-level directory. + 1. Introduction =============== -This document specifies the QEMU Machine Protocol (QMP), a JSON-based protocol -which is available for applications to operate QEMU at the machine-level. +This document specifies the QEMU Machine Protocol (QMP), a JSON-based +protocol which is available for applications to operate QEMU at the +machine-level. It is also in use by the QEMU Guest Agent (QGA), which +is available for host applications to interact with the guest +operating system. 2. Protocol Specification ========================= @@ -18,14 +29,27 @@ following format: json-DATA-STRUCTURE-NAME -Where DATA-STRUCTURE-NAME is any valid JSON data structure, as defined by -the JSON standard: +Where DATA-STRUCTURE-NAME is any valid JSON data structure, as defined +by the JSON standard: + +http://www.ietf.org/rfc/rfc7159.txt -http://www.ietf.org/rfc/rfc4627.txt +The protocol is always encoded in UTF-8 except for synchronization +bytes (documented below); although thanks to json-string escape +sequences, the server will reply using only the strict ASCII subset. -For convenience, json-object members and json-array elements mentioned in -this document will be in a certain order. However, in real protocol usage -they can be in ANY order, thus no particular order should be assumed. +For convenience, json-object members mentioned in this document will +be in a certain order. However, in real protocol usage they can be in +ANY order, thus no particular order should be assumed. On the other +hand, use of json-array elements presumes that preserving order is +important unless specifically documented otherwise. Repeating a key +within a json-object gives unpredictable results. + +Also for convenience, the server will accept an extension of +'single-quoted' strings in place of the usual "double-quoted" +json-string, and both input forms of strings understand an additional +escape sequence of "\'" for a single quote. The server will only use +double quoting on output. 2.1 General Definitions ----------------------- @@ -52,7 +76,16 @@ The greeting message format is: - The "version" member contains the Server's version information (the format is the same of the query-version command) - The "capabilities" member specify the availability of features beyond the - baseline specification + baseline specification; the order of elements in this array has no + particular significance, so a client must search the entire array + when looking for a particular capability + +2.2.1 Capabilities +------------------ + +As of the date this document was last revised, no server or client +capability strings have been defined. + 2.3 Issuing Commands -------------------- @@ -65,10 +98,14 @@ The format for command execution is: - The "execute" member identifies the command to be executed by the Server - The "arguments" member is used to pass any arguments required for the - execution of the command, it is optional when no arguments are required + execution of the command, it is optional when no arguments are + required. Each command documents what contents will be considered + valid when handling the json-argument - The "id" member is a transaction identification associated with the command execution, it is optional and will be part of the response if - provided + provided. The "id" member can be any json-value, although most + clients merely use a json-number incremented for each successive + command 2.4 Commands Responses ---------------------- @@ -81,13 +118,15 @@ of a command execution: success or error. The format of a success response is: -{ "return": json-object, "id": json-value } +{ "return": json-value, "id": json-value } Where, -- The "return" member contains the command returned data, which is defined - in a per-command basis or an empty json-object if the command does not - return data +- The "return" member contains the data returned by the command, which + is defined on a per-command basis (usually a json-object or + json-array of json-objects, but sometimes a json-number, json-string, + or json-array of json-strings); it is an empty json-object if the + command does not return data - The "id" member contains the transaction identification associated with the command execution if issued by the Client @@ -114,7 +153,8 @@ if provided by the client. ----------------------- As a result of state changes, the Server may send messages unilaterally -to the Client at any time. They are called "asynchronous events". +to the Client at any time, when not in the middle of any other +response. They are called "asynchronous events". The format of asynchronous events is: @@ -126,13 +166,27 @@ The format of asynchronous events is: - The "event" member contains the event's name - The "data" member contains event specific data, which is defined in a per-event basis, it is optional -- The "timestamp" member contains the exact time of when the event occurred - in the Server. It is a fixed json-object with time in seconds and - microseconds +- The "timestamp" member contains the exact time of when the event + occurred in the Server. It is a fixed json-object with time in + seconds and microseconds relative to the Unix Epoch (1 Jan 1970); if + there is a failure to retrieve host time, both members of the + timestamp will be set to -1. For a listing of supported asynchronous events, please, refer to the qmp-events.txt file. +2.5 QGA Synchronization +----------------------- + +When using QGA, an additional synchronization feature is built into +the protocol. If the Client sends a raw 0xFF sentinel byte (not valid +JSON), then the Server will reset its state and discard all pending +data prior to the sentinel. Conversely, if the Client makes use of +the 'guest-sync-delimited' command, the Server will send a raw 0xFF +sentinel byte prior to its response, to aid the Client in discarding +any data prior to the sentinel. + + 3. QMP Examples =============== @@ -145,32 +199,37 @@ This section provides some examples of real QMP usage, in all of them S: { "QMP": { "version": { "qemu": { "micro": 50, "minor": 6, "major": 1 }, "package": ""}, "capabilities": []}} -3.2 Simple 'stop' execution +3.2 Client QMP negotiation +-------------------------- +C: { "execute": "qmp_capabilities" } +S: { "return": {}} + +3.3 Simple 'stop' execution --------------------------- C: { "execute": "stop" } S: { "return": {} } -3.3 KVM information +3.4 KVM information ------------------- C: { "execute": "query-kvm", "id": "example" } S: { "return": { "enabled": true, "present": true }, "id": "example"} -3.4 Parsing error +3.5 Parsing error ------------------ C: { "execute": } S: { "error": { "class": "GenericError", "desc": "Invalid JSON syntax" } } -3.5 Powerdown event +3.6 Powerdown event ------------------- S: { "timestamp": { "seconds": 1258551470, "microseconds": 802384 }, "event": "POWERDOWN" } 4. Capabilities Negotiation ----------------------------- +=========================== When a Client successfully establishes a connection, the Server is in Capabilities Negotiation mode. @@ -189,7 +248,7 @@ effect, all commands (except qmp_capabilities) are allowed and asynchronous messages are delivered. 5 Compatibility Considerations ------------------------------- +============================== All protocol changes or new features which modify the protocol format in an incompatible way are disabled by default and will be advertised by the @@ -213,12 +272,16 @@ However, Clients must not assume any particular: - Amount of errors generated by a command, that is, new errors can be added to any existing command in newer versions of the Server +Any command or field name beginning with "x-" is deemed experimental, +and may be withdrawn or changed in an incompatible manner in a future +release. + Of course, the Server does guarantee to send valid JSON. But apart from this, a Client should be "conservative in what they send, and liberal in what they accept". 6. Downstream extension of QMP ------------------------------- +============================== We recommend that downstream consumers of QEMU do *not* modify QMP. Management tools should be able to support both upstream and downstream @@ -373,6 +373,7 @@ static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) return false; } +/* Called from RCU critical section */ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, hwaddr *xlat, hwaddr *plen, bool is_write) @@ -380,9 +381,7 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, IOMMUTLBEntry iotlb; MemoryRegionSection *section; MemoryRegion *mr; - hwaddr len = *plen; - rcu_read_lock(); for (;;) { AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch); section = address_space_translate_internal(d, addr, &addr, plen, true); @@ -395,7 +394,7 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, iotlb = mr->iommu_ops->translate(mr, addr, is_write); addr = ((iotlb.translated_addr & ~iotlb.addr_mask) | (addr & iotlb.addr_mask)); - len = MIN(len, (addr | iotlb.addr_mask) - addr + 1); + *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1); if (!(iotlb.perm & (1 << is_write))) { mr = &io_mem_unassigned; break; @@ -406,12 +405,10 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, if (xen_enabled() && memory_access_is_direct(mr, is_write)) { hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr; - len = MIN(page, len); + *plen = MIN(page, *plen); } - *plen = len; *xlat = addr; - rcu_read_unlock(); return mr; } @@ -429,15 +426,6 @@ address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr, } #endif -void cpu_exec_init_all(void) -{ -#if !defined(CONFIG_USER_ONLY) - qemu_mutex_init(&ram_list.mutex); - memory_map_init(); - io_mem_init(); -#endif -} - #if !defined(CONFIG_USER_ONLY) static int cpu_common_post_load(void *opaque, int version_id) @@ -1858,7 +1846,7 @@ static const MemoryRegionOps notdirty_mem_ops = { }; /* Generate a debug exception if a watchpoint has been hit. */ -static void check_watchpoint(int offset, int len, int flags) +static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) { CPUState *cpu = current_cpu; CPUArchState *env = cpu->env_ptr; @@ -1884,6 +1872,7 @@ static void check_watchpoint(int offset, int len, int flags) wp->flags |= BP_WATCHPOINT_HIT_WRITE; } wp->hitaddr = vaddr; + wp->hitattrs = attrs; if (!cpu->watchpoint_hit) { cpu->watchpoint_hit = wp; tb_check_watchpoint(cpu); @@ -1905,69 +1894,93 @@ static void check_watchpoint(int offset, int len, int flags) /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, so these check for a hit then pass through to the normal out-of-line phys routines. */ -static uint64_t watch_mem_read(void *opaque, hwaddr addr, - unsigned size) +static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata, + unsigned size, MemTxAttrs attrs) { - check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ); + MemTxResult res; + uint64_t data; + + check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ); switch (size) { - case 1: return ldub_phys(&address_space_memory, addr); - case 2: return lduw_phys(&address_space_memory, addr); - case 4: return ldl_phys(&address_space_memory, addr); + case 1: + data = address_space_ldub(&address_space_memory, addr, attrs, &res); + break; + case 2: + data = address_space_lduw(&address_space_memory, addr, attrs, &res); + break; + case 4: + data = address_space_ldl(&address_space_memory, addr, attrs, &res); + break; default: abort(); } + *pdata = data; + return res; } -static void watch_mem_write(void *opaque, hwaddr addr, - uint64_t val, unsigned size) +static MemTxResult watch_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size, + MemTxAttrs attrs) { - check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE); + MemTxResult res; + + check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE); switch (size) { case 1: - stb_phys(&address_space_memory, addr, val); + address_space_stb(&address_space_memory, addr, val, attrs, &res); break; case 2: - stw_phys(&address_space_memory, addr, val); + address_space_stw(&address_space_memory, addr, val, attrs, &res); break; case 4: - stl_phys(&address_space_memory, addr, val); + address_space_stl(&address_space_memory, addr, val, attrs, &res); break; default: abort(); } + return res; } static const MemoryRegionOps watch_mem_ops = { - .read = watch_mem_read, - .write = watch_mem_write, + .read_with_attrs = watch_mem_read, + .write_with_attrs = watch_mem_write, .endianness = DEVICE_NATIVE_ENDIAN, }; -static uint64_t subpage_read(void *opaque, hwaddr addr, - unsigned len) +static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data, + unsigned len, MemTxAttrs attrs) { subpage_t *subpage = opaque; uint8_t buf[8]; + MemTxResult res; #if defined(DEBUG_SUBPAGE) printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__, subpage, len, addr); #endif - address_space_read(subpage->as, addr + subpage->base, buf, len); + res = address_space_read(subpage->as, addr + subpage->base, + attrs, buf, len); + if (res) { + return res; + } switch (len) { case 1: - return ldub_p(buf); + *data = ldub_p(buf); + return MEMTX_OK; case 2: - return lduw_p(buf); + *data = lduw_p(buf); + return MEMTX_OK; case 4: - return ldl_p(buf); + *data = ldl_p(buf); + return MEMTX_OK; case 8: - return ldq_p(buf); + *data = ldq_p(buf); + return MEMTX_OK; default: abort(); } } -static void subpage_write(void *opaque, hwaddr addr, - uint64_t value, unsigned len) +static MemTxResult subpage_write(void *opaque, hwaddr addr, + uint64_t value, unsigned len, MemTxAttrs attrs) { subpage_t *subpage = opaque; uint8_t buf[8]; @@ -1993,7 +2006,8 @@ static void subpage_write(void *opaque, hwaddr addr, default: abort(); } - address_space_write(subpage->as, addr + subpage->base, buf, len); + return address_space_write(subpage->as, addr + subpage->base, + attrs, buf, len); } static bool subpage_accepts(void *opaque, hwaddr addr, @@ -2010,8 +2024,8 @@ static bool subpage_accepts(void *opaque, hwaddr addr, } static const MemoryRegionOps subpage_ops = { - .read = subpage_read, - .write = subpage_write, + .read_with_attrs = subpage_read, + .write_with_attrs = subpage_write, .impl.min_access_size = 1, .impl.max_access_size = 8, .valid.min_access_size = 1, @@ -2304,16 +2318,17 @@ static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) return l; } -bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, - int len, bool is_write) +MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, + uint8_t *buf, int len, bool is_write) { hwaddr l; uint8_t *ptr; uint64_t val; hwaddr addr1; MemoryRegion *mr; - bool error = false; + MemTxResult result = MEMTX_OK; + rcu_read_lock(); while (len > 0) { l = len; mr = address_space_translate(as, addr, &addr1, &l, is_write); @@ -2327,22 +2342,26 @@ bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, case 8: /* 64 bit write access */ val = ldq_p(buf); - error |= io_mem_write(mr, addr1, val, 8); + result |= memory_region_dispatch_write(mr, addr1, val, 8, + attrs); break; case 4: /* 32 bit write access */ val = ldl_p(buf); - error |= io_mem_write(mr, addr1, val, 4); + result |= memory_region_dispatch_write(mr, addr1, val, 4, + attrs); break; case 2: /* 16 bit write access */ val = lduw_p(buf); - error |= io_mem_write(mr, addr1, val, 2); + result |= memory_region_dispatch_write(mr, addr1, val, 2, + attrs); break; case 1: /* 8 bit write access */ val = ldub_p(buf); - error |= io_mem_write(mr, addr1, val, 1); + result |= memory_region_dispatch_write(mr, addr1, val, 1, + attrs); break; default: abort(); @@ -2361,22 +2380,26 @@ bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, switch (l) { case 8: /* 64 bit read access */ - error |= io_mem_read(mr, addr1, &val, 8); + result |= memory_region_dispatch_read(mr, addr1, &val, 8, + attrs); stq_p(buf, val); break; case 4: /* 32 bit read access */ - error |= io_mem_read(mr, addr1, &val, 4); + result |= memory_region_dispatch_read(mr, addr1, &val, 4, + attrs); stl_p(buf, val); break; case 2: /* 16 bit read access */ - error |= io_mem_read(mr, addr1, &val, 2); + result |= memory_region_dispatch_read(mr, addr1, &val, 2, + attrs); stw_p(buf, val); break; case 1: /* 8 bit read access */ - error |= io_mem_read(mr, addr1, &val, 1); + result |= memory_region_dispatch_read(mr, addr1, &val, 1, + attrs); stb_p(buf, val); break; default: @@ -2392,26 +2415,29 @@ bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, buf += l; addr += l; } + rcu_read_unlock(); - return error; + return result; } -bool address_space_write(AddressSpace *as, hwaddr addr, - const uint8_t *buf, int len) +MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, + const uint8_t *buf, int len) { - return address_space_rw(as, addr, (uint8_t *)buf, len, true); + return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true); } -bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len) +MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, + uint8_t *buf, int len) { - return address_space_rw(as, addr, buf, len, false); + return address_space_rw(as, addr, attrs, buf, len, false); } void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf, int len, int is_write) { - address_space_rw(&address_space_memory, addr, buf, len, is_write); + address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED, + buf, len, is_write); } enum write_rom_type { @@ -2427,6 +2453,7 @@ static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as, hwaddr addr1; MemoryRegion *mr; + rcu_read_lock(); while (len > 0) { l = len; mr = address_space_translate(as, addr, &addr1, &l, true); @@ -2452,6 +2479,7 @@ static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as, buf += l; addr += l; } + rcu_read_unlock(); } /* used for ROM loading : can write in RAM and ROM */ @@ -2482,46 +2510,77 @@ typedef struct { void *buffer; hwaddr addr; hwaddr len; + bool in_use; } BounceBuffer; static BounceBuffer bounce; typedef struct MapClient { - void *opaque; - void (*callback)(void *opaque); + QEMUBH *bh; QLIST_ENTRY(MapClient) link; } MapClient; +QemuMutex map_client_list_lock; static QLIST_HEAD(map_client_list, MapClient) map_client_list = QLIST_HEAD_INITIALIZER(map_client_list); -void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) +static void cpu_unregister_map_client_do(MapClient *client) +{ + QLIST_REMOVE(client, link); + g_free(client); +} + +static void cpu_notify_map_clients_locked(void) +{ + MapClient *client; + + while (!QLIST_EMPTY(&map_client_list)) { + client = QLIST_FIRST(&map_client_list); + qemu_bh_schedule(client->bh); + cpu_unregister_map_client_do(client); + } +} + +void cpu_register_map_client(QEMUBH *bh) { MapClient *client = g_malloc(sizeof(*client)); - client->opaque = opaque; - client->callback = callback; + qemu_mutex_lock(&map_client_list_lock); + client->bh = bh; QLIST_INSERT_HEAD(&map_client_list, client, link); - return client; + if (!atomic_read(&bounce.in_use)) { + cpu_notify_map_clients_locked(); + } + qemu_mutex_unlock(&map_client_list_lock); } -static void cpu_unregister_map_client(void *_client) +void cpu_exec_init_all(void) { - MapClient *client = (MapClient *)_client; - - QLIST_REMOVE(client, link); - g_free(client); + qemu_mutex_init(&ram_list.mutex); + memory_map_init(); + io_mem_init(); + qemu_mutex_init(&map_client_list_lock); } -static void cpu_notify_map_clients(void) +void cpu_unregister_map_client(QEMUBH *bh) { MapClient *client; - while (!QLIST_EMPTY(&map_client_list)) { - client = QLIST_FIRST(&map_client_list); - client->callback(client->opaque); - cpu_unregister_map_client(client); + qemu_mutex_lock(&map_client_list_lock); + QLIST_FOREACH(client, &map_client_list, link) { + if (client->bh == bh) { + cpu_unregister_map_client_do(client); + break; + } } + qemu_mutex_unlock(&map_client_list_lock); +} + +static void cpu_notify_map_clients(void) +{ + qemu_mutex_lock(&map_client_list_lock); + cpu_notify_map_clients_locked(); + qemu_mutex_unlock(&map_client_list_lock); } bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write) @@ -2529,6 +2588,7 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_ MemoryRegion *mr; hwaddr l, xlat; + rcu_read_lock(); while (len > 0) { l = len; mr = address_space_translate(as, addr, &xlat, &l, is_write); @@ -2542,6 +2602,7 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_ len -= l; addr += l; } + rcu_read_unlock(); return true; } @@ -2568,9 +2629,12 @@ void *address_space_map(AddressSpace *as, } l = len; + rcu_read_lock(); mr = address_space_translate(as, addr, &xlat, &l, is_write); + if (!memory_access_is_direct(mr, is_write)) { - if (bounce.buffer) { + if (atomic_xchg(&bounce.in_use, true)) { + rcu_read_unlock(); return NULL; } /* Avoid unbounded allocations */ @@ -2582,9 +2646,11 @@ void *address_space_map(AddressSpace *as, memory_region_ref(mr); bounce.mr = mr; if (!is_write) { - address_space_read(as, addr, bounce.buffer, l); + address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED, + bounce.buffer, l); } + rcu_read_unlock(); *plen = l; return bounce.buffer; } @@ -2608,6 +2674,7 @@ void *address_space_map(AddressSpace *as, } memory_region_ref(mr); + rcu_read_unlock(); *plen = done; return qemu_ram_ptr_length(raddr + base, plen); } @@ -2635,11 +2702,13 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, return; } if (is_write) { - address_space_write(as, bounce.addr, bounce.buffer, access_len); + address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED, + bounce.buffer, access_len); } qemu_vfree(bounce.buffer); bounce.buffer = NULL; memory_region_unref(bounce.mr); + atomic_mb_set(&bounce.in_use, false); cpu_notify_map_clients(); } @@ -2657,19 +2726,23 @@ void cpu_physical_memory_unmap(void *buffer, hwaddr len, } /* warning: addr must be aligned */ -static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr, - enum device_endian endian) +static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, + MemTxResult *result, + enum device_endian endian) { uint8_t *ptr; uint64_t val; MemoryRegion *mr; hwaddr l = 4; hwaddr addr1; + MemTxResult r; + rcu_read_lock(); mr = address_space_translate(as, addr, &addr1, &l, false); if (l < 4 || !memory_access_is_direct(mr, false)) { /* I/O case */ - io_mem_read(mr, addr1, &val, 4); + r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap32(val); @@ -2695,40 +2768,70 @@ static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr, val = ldl_p(ptr); break; } + r = MEMTX_OK; + } + if (result) { + *result = r; } + rcu_read_unlock(); return val; } +uint32_t address_space_ldl(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result) +{ + return address_space_ldl_internal(as, addr, attrs, result, + DEVICE_NATIVE_ENDIAN); +} + +uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result) +{ + return address_space_ldl_internal(as, addr, attrs, result, + DEVICE_LITTLE_ENDIAN); +} + +uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result) +{ + return address_space_ldl_internal(as, addr, attrs, result, + DEVICE_BIG_ENDIAN); +} + uint32_t ldl_phys(AddressSpace *as, hwaddr addr) { - return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN); + return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL); } uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr) { - return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN); + return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL); } uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr) { - return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN); + return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL); } /* warning: addr must be aligned */ -static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr, - enum device_endian endian) +static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, + MemTxResult *result, + enum device_endian endian) { uint8_t *ptr; uint64_t val; MemoryRegion *mr; hwaddr l = 8; hwaddr addr1; + MemTxResult r; + rcu_read_lock(); mr = address_space_translate(as, addr, &addr1, &l, false); if (l < 8 || !memory_access_is_direct(mr, false)) { /* I/O case */ - io_mem_read(mr, addr1, &val, 8); + r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap64(val); @@ -2754,48 +2857,90 @@ static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr, val = ldq_p(ptr); break; } + r = MEMTX_OK; } + if (result) { + *result = r; + } + rcu_read_unlock(); return val; } +uint64_t address_space_ldq(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result) +{ + return address_space_ldq_internal(as, addr, attrs, result, + DEVICE_NATIVE_ENDIAN); +} + +uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result) +{ + return address_space_ldq_internal(as, addr, attrs, result, + DEVICE_LITTLE_ENDIAN); +} + +uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result) +{ + return address_space_ldq_internal(as, addr, attrs, result, + DEVICE_BIG_ENDIAN); +} + uint64_t ldq_phys(AddressSpace *as, hwaddr addr) { - return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN); + return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL); } uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr) { - return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN); + return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL); } uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr) { - return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN); + return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL); } /* XXX: optimize */ -uint32_t ldub_phys(AddressSpace *as, hwaddr addr) +uint32_t address_space_ldub(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result) { uint8_t val; - address_space_rw(as, addr, &val, 1, 0); + MemTxResult r; + + r = address_space_rw(as, addr, attrs, &val, 1, 0); + if (result) { + *result = r; + } return val; } +uint32_t ldub_phys(AddressSpace *as, hwaddr addr) +{ + return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL); +} + /* warning: addr must be aligned */ -static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr, - enum device_endian endian) +static inline uint32_t address_space_lduw_internal(AddressSpace *as, + hwaddr addr, + MemTxAttrs attrs, + MemTxResult *result, + enum device_endian endian) { uint8_t *ptr; uint64_t val; MemoryRegion *mr; hwaddr l = 2; hwaddr addr1; + MemTxResult r; + rcu_read_lock(); mr = address_space_translate(as, addr, &addr1, &l, false); if (l < 2 || !memory_access_is_direct(mr, false)) { /* I/O case */ - io_mem_read(mr, addr1, &val, 2); + r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap16(val); @@ -2821,39 +2966,68 @@ static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr, val = lduw_p(ptr); break; } + r = MEMTX_OK; + } + if (result) { + *result = r; } + rcu_read_unlock(); return val; } +uint32_t address_space_lduw(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result) +{ + return address_space_lduw_internal(as, addr, attrs, result, + DEVICE_NATIVE_ENDIAN); +} + +uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result) +{ + return address_space_lduw_internal(as, addr, attrs, result, + DEVICE_LITTLE_ENDIAN); +} + +uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result) +{ + return address_space_lduw_internal(as, addr, attrs, result, + DEVICE_BIG_ENDIAN); +} + uint32_t lduw_phys(AddressSpace *as, hwaddr addr) { - return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN); + return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL); } uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr) { - return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN); + return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL); } uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr) { - return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN); + return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL); } /* warning: addr must be aligned. The ram page is not masked as dirty and the code inside is not invalidated. It is useful if the dirty bits are used to track modified PTEs */ -void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val) +void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result) { uint8_t *ptr; MemoryRegion *mr; hwaddr l = 4; hwaddr addr1; + MemTxResult r; + rcu_read_lock(); mr = address_space_translate(as, addr, &addr1, &l, true); if (l < 4 || !memory_access_is_direct(mr, true)) { - io_mem_write(mr, addr1, val, 4); + r = memory_region_dispatch_write(mr, addr1, val, 4, attrs); } else { addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; ptr = qemu_get_ram_ptr(addr1); @@ -2867,19 +3041,33 @@ void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val) cpu_physical_memory_set_dirty_range_nocode(addr1, 4); } } + r = MEMTX_OK; + } + if (result) { + *result = r; } + rcu_read_unlock(); +} + +void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val) +{ + address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } /* warning: addr must be aligned */ -static inline void stl_phys_internal(AddressSpace *as, - hwaddr addr, uint32_t val, - enum device_endian endian) +static inline void address_space_stl_internal(AddressSpace *as, + hwaddr addr, uint32_t val, + MemTxAttrs attrs, + MemTxResult *result, + enum device_endian endian) { uint8_t *ptr; MemoryRegion *mr; hwaddr l = 4; hwaddr addr1; + MemTxResult r; + rcu_read_lock(); mr = address_space_translate(as, addr, &addr1, &l, true); if (l < 4 || !memory_access_is_direct(mr, true)) { @@ -2892,7 +3080,7 @@ static inline void stl_phys_internal(AddressSpace *as, val = bswap32(val); } #endif - io_mem_write(mr, addr1, val, 4); + r = memory_region_dispatch_write(mr, addr1, val, 4, attrs); } else { /* RAM case */ addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; @@ -2909,41 +3097,82 @@ static inline void stl_phys_internal(AddressSpace *as, break; } invalidate_and_set_dirty(addr1, 4); + r = MEMTX_OK; + } + if (result) { + *result = r; } + rcu_read_unlock(); +} + +void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result) +{ + address_space_stl_internal(as, addr, val, attrs, result, + DEVICE_NATIVE_ENDIAN); +} + +void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result) +{ + address_space_stl_internal(as, addr, val, attrs, result, + DEVICE_LITTLE_ENDIAN); +} + +void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result) +{ + address_space_stl_internal(as, addr, val, attrs, result, + DEVICE_BIG_ENDIAN); } void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val) { - stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN); + address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val) { - stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN); + address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val) { - stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN); + address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } /* XXX: optimize */ -void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val) +void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result) { uint8_t v = val; - address_space_rw(as, addr, &v, 1, 1); + MemTxResult r; + + r = address_space_rw(as, addr, attrs, &v, 1, 1); + if (result) { + *result = r; + } +} + +void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val) +{ + address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } /* warning: addr must be aligned */ -static inline void stw_phys_internal(AddressSpace *as, - hwaddr addr, uint32_t val, - enum device_endian endian) +static inline void address_space_stw_internal(AddressSpace *as, + hwaddr addr, uint32_t val, + MemTxAttrs attrs, + MemTxResult *result, + enum device_endian endian) { uint8_t *ptr; MemoryRegion *mr; hwaddr l = 2; hwaddr addr1; + MemTxResult r; + rcu_read_lock(); mr = address_space_translate(as, addr, &addr1, &l, true); if (l < 2 || !memory_access_is_direct(mr, true)) { #if defined(TARGET_WORDS_BIGENDIAN) @@ -2955,7 +3184,7 @@ static inline void stw_phys_internal(AddressSpace *as, val = bswap16(val); } #endif - io_mem_write(mr, addr1, val, 2); + r = memory_region_dispatch_write(mr, addr1, val, 2, attrs); } else { /* RAM case */ addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; @@ -2972,41 +3201,96 @@ static inline void stw_phys_internal(AddressSpace *as, break; } invalidate_and_set_dirty(addr1, 2); + r = MEMTX_OK; } + if (result) { + *result = r; + } + rcu_read_unlock(); +} + +void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result) +{ + address_space_stw_internal(as, addr, val, attrs, result, + DEVICE_NATIVE_ENDIAN); +} + +void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result) +{ + address_space_stw_internal(as, addr, val, attrs, result, + DEVICE_LITTLE_ENDIAN); +} + +void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result) +{ + address_space_stw_internal(as, addr, val, attrs, result, + DEVICE_BIG_ENDIAN); } void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val) { - stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN); + address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val) { - stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN); + address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val) { - stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN); + address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } /* XXX: optimize */ -void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val) +void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val, + MemTxAttrs attrs, MemTxResult *result) { + MemTxResult r; val = tswap64(val); - address_space_rw(as, addr, (void *) &val, 8, 1); + r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1); + if (result) { + *result = r; + } } -void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val) +void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val, + MemTxAttrs attrs, MemTxResult *result) { + MemTxResult r; val = cpu_to_le64(val); - address_space_rw(as, addr, (void *) &val, 8, 1); + r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1); + if (result) { + *result = r; + } +} +void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val, + MemTxAttrs attrs, MemTxResult *result) +{ + MemTxResult r; + val = cpu_to_be64(val); + r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1); + if (result) { + *result = r; + } +} + +void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val) +{ + address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); +} + +void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val) +{ + address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val) { - val = cpu_to_be64(val); - address_space_rw(as, addr, (void *) &val, 8, 1); + address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } /* virtual memory access for debug (includes writing to ROM) */ @@ -3030,7 +3314,8 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, if (is_write) { cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l); } else { - address_space_rw(cpu->as, phys_addr, buf, l, 0); + address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED, + buf, l, 0); } len -= l; buf += l; @@ -3059,12 +3344,15 @@ bool cpu_physical_memory_is_io(hwaddr phys_addr) { MemoryRegion*mr; hwaddr l = 1; + bool res; + rcu_read_lock(); mr = address_space_translate(&address_space_memory, phys_addr, &phys_addr, &l, false); - return !(memory_region_is_ram(mr) || - memory_region_is_romd(mr)); + res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr)); + rcu_read_unlock(); + return res; } void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) diff --git a/hmp-commands.hx b/hmp-commands.hx index 3089533a4f..e864a6ca81 100644 --- a/hmp-commands.hx +++ b/hmp-commands.hx @@ -993,13 +993,27 @@ Enable/Disable the usage of a capability @var{capability} for migration. ETEXI { + .name = "migrate_set_parameter", + .args_type = "parameter:s,value:i", + .params = "parameter value", + .help = "Set the parameter for migration", + .mhandler.cmd = hmp_migrate_set_parameter, + .command_completion = migrate_set_parameter_completion, + }, + +STEXI +@item migrate_set_parameter @var{parameter} @var{value} +@findex migrate_set_parameter +Set the parameter @var{parameter} for migration. +ETEXI + + { .name = "client_migrate_info", .args_type = "protocol:s,hostname:s,port:i?,tls-port:i?,cert-subject:s?", .params = "protocol hostname port tls-port cert-subject", .help = "send migration info to spice/vnc client", .user_print = monitor_user_noop, - .mhandler.cmd_async = client_migrate_info, - .flags = MONITOR_CMD_ASYNC, + .mhandler.cmd_new = client_migrate_info, }, STEXI @@ -1762,6 +1776,8 @@ show user network stack connection states show migration status @item info migrate_capabilities show current migration capabilities +@item info migrate_parameters +show current migration parameters @item info migrate_cache_size show current migration XBZRLE cache size @item info balloon @@ -60,7 +60,7 @@ void hmp_info_version(Monitor *mon, const QDict *qdict) info = qmp_query_version(NULL); monitor_printf(mon, "%" PRId64 ".%" PRId64 ".%" PRId64 "%s\n", - info->qemu.major, info->qemu.minor, info->qemu.micro, + info->qemu->major, info->qemu->minor, info->qemu->micro, info->package); qapi_free_VersionInfo(info); @@ -252,6 +252,29 @@ void hmp_info_migrate_capabilities(Monitor *mon, const QDict *qdict) qapi_free_MigrationCapabilityStatusList(caps); } +void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict) +{ + MigrationParameters *params; + + params = qmp_query_migrate_parameters(NULL); + + if (params) { + monitor_printf(mon, "parameters:"); + monitor_printf(mon, " %s: %" PRId64, + MigrationParameter_lookup[MIGRATION_PARAMETER_COMPRESS_LEVEL], + params->compress_level); + monitor_printf(mon, " %s: %" PRId64, + MigrationParameter_lookup[MIGRATION_PARAMETER_COMPRESS_THREADS], + params->compress_threads); + monitor_printf(mon, " %s: %" PRId64, + MigrationParameter_lookup[MIGRATION_PARAMETER_DECOMPRESS_THREADS], + params->decompress_threads); + monitor_printf(mon, "\n"); + } + + qapi_free_MigrationParameters(params); +} + void hmp_info_migrate_cache_size(Monitor *mon, const QDict *qdict) { monitor_printf(mon, "xbzrel cache size: %" PRId64 " kbytes\n", @@ -391,8 +414,7 @@ static void print_block_info(Monitor *mon, BlockInfo *info, inserted->iops_size); } - /* TODO: inserted->image should never be null */ - if (verbose && inserted->image) { + if (verbose) { monitor_printf(mon, "\nImages:\n"); image_info = inserted->image; while (1) { @@ -649,14 +671,14 @@ static void hmp_info_pci_device(Monitor *mon, const PciDeviceInfo *dev) dev->slot, dev->function); monitor_printf(mon, " "); - if (dev->class_info.has_desc) { - monitor_printf(mon, "%s", dev->class_info.desc); + if (dev->class_info->has_desc) { + monitor_printf(mon, "%s", dev->class_info->desc); } else { - monitor_printf(mon, "Class %04" PRId64, dev->class_info.q_class); + monitor_printf(mon, "Class %04" PRId64, dev->class_info->q_class); } monitor_printf(mon, ": PCI device %04" PRIx64 ":%04" PRIx64 "\n", - dev->id.vendor, dev->id.device); + dev->id->vendor, dev->id->device); if (dev->has_irq) { monitor_printf(mon, " IRQ %" PRId64 ".\n", dev->irq); @@ -664,25 +686,25 @@ static void hmp_info_pci_device(Monitor *mon, const PciDeviceInfo *dev) if (dev->has_pci_bridge) { monitor_printf(mon, " BUS %" PRId64 ".\n", - dev->pci_bridge->bus.number); + dev->pci_bridge->bus->number); monitor_printf(mon, " secondary bus %" PRId64 ".\n", - dev->pci_bridge->bus.secondary); + dev->pci_bridge->bus->secondary); monitor_printf(mon, " subordinate bus %" PRId64 ".\n", - dev->pci_bridge->bus.subordinate); + dev->pci_bridge->bus->subordinate); monitor_printf(mon, " IO range [0x%04"PRIx64", 0x%04"PRIx64"]\n", - dev->pci_bridge->bus.io_range->base, - dev->pci_bridge->bus.io_range->limit); + dev->pci_bridge->bus->io_range->base, + dev->pci_bridge->bus->io_range->limit); monitor_printf(mon, " memory range [0x%08"PRIx64", 0x%08"PRIx64"]\n", - dev->pci_bridge->bus.memory_range->base, - dev->pci_bridge->bus.memory_range->limit); + dev->pci_bridge->bus->memory_range->base, + dev->pci_bridge->bus->memory_range->limit); monitor_printf(mon, " prefetchable memory range " "[0x%08"PRIx64", 0x%08"PRIx64"]\n", - dev->pci_bridge->bus.prefetchable_range->base, - dev->pci_bridge->bus.prefetchable_range->limit); + dev->pci_bridge->bus->prefetchable_range->base, + dev->pci_bridge->bus->prefetchable_range->limit); } for (region = dev->regions; region; region = region->next) { @@ -1062,7 +1084,8 @@ void hmp_drive_backup(Monitor *mon, const QDict *qdict) qmp_drive_backup(device, filename, !!format, format, full ? MIRROR_SYNC_MODE_FULL : MIRROR_SYNC_MODE_TOP, - true, mode, false, 0, false, 0, false, 0, &err); + true, mode, false, 0, false, NULL, + false, 0, false, 0, &err); hmp_handle_error(mon, &err); } @@ -1185,6 +1208,48 @@ void hmp_migrate_set_capability(Monitor *mon, const QDict *qdict) } } +void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict) +{ + const char *param = qdict_get_str(qdict, "parameter"); + int value = qdict_get_int(qdict, "value"); + Error *err = NULL; + bool has_compress_level = false; + bool has_compress_threads = false; + bool has_decompress_threads = false; + int i; + + for (i = 0; i < MIGRATION_PARAMETER_MAX; i++) { + if (strcmp(param, MigrationParameter_lookup[i]) == 0) { + switch (i) { + case MIGRATION_PARAMETER_COMPRESS_LEVEL: + has_compress_level = true; + break; + case MIGRATION_PARAMETER_COMPRESS_THREADS: + has_compress_threads = true; + break; + case MIGRATION_PARAMETER_DECOMPRESS_THREADS: + has_decompress_threads = true; + break; + } + qmp_migrate_set_parameters(has_compress_level, value, + has_compress_threads, value, + has_decompress_threads, value, + &err); + break; + } + } + + if (i == MIGRATION_PARAMETER_MAX) { + error_set(&err, QERR_INVALID_PARAMETER, param); + } + + if (err) { + monitor_printf(mon, "migrate_set_parameter: %s\n", + error_get_pretty(err)); + error_free(err); + } +} + void hmp_set_password(Monitor *mon, const QDict *qdict) { const char *protocol = qdict_get_str(qdict, "protocol"); @@ -28,6 +28,7 @@ void hmp_info_chardev(Monitor *mon, const QDict *qdict); void hmp_info_mice(Monitor *mon, const QDict *qdict); void hmp_info_migrate(Monitor *mon, const QDict *qdict); void hmp_info_migrate_capabilities(Monitor *mon, const QDict *qdict); +void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict); void hmp_info_migrate_cache_size(Monitor *mon, const QDict *qdict); void hmp_info_cpus(Monitor *mon, const QDict *qdict); void hmp_info_block(Monitor *mon, const QDict *qdict); @@ -64,6 +65,7 @@ void hmp_migrate_incoming(Monitor *mon, const QDict *qdict); void hmp_migrate_set_downtime(Monitor *mon, const QDict *qdict); void hmp_migrate_set_speed(Monitor *mon, const QDict *qdict); void hmp_migrate_set_capability(Monitor *mon, const QDict *qdict); +void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict); void hmp_migrate_set_cache_size(Monitor *mon, const QDict *qdict); void hmp_set_password(Monitor *mon, const QDict *qdict); void hmp_expire_password(Monitor *mon, const QDict *qdict); @@ -109,11 +111,12 @@ void set_link_completion(ReadLineState *rs, int nb_args, const char *str); void netdev_add_completion(ReadLineState *rs, int nb_args, const char *str); void netdev_del_completion(ReadLineState *rs, int nb_args, const char *str); void ringbuf_write_completion(ReadLineState *rs, int nb_args, const char *str); -void ringbuf_read_completion(ReadLineState *rs, int nb_args, const char *str); void watchdog_action_completion(ReadLineState *rs, int nb_args, const char *str); void migrate_set_capability_completion(ReadLineState *rs, int nb_args, const char *str); +void migrate_set_parameter_completion(ReadLineState *rs, int nb_args, + const char *str); void host_net_add_completion(ReadLineState *rs, int nb_args, const char *str); void host_net_remove_completion(ReadLineState *rs, int nb_args, const char *str); diff --git a/hw/acpi/pcihp.c b/hw/acpi/pcihp.c index 612fec03ee..1e11af906d 100644 --- a/hw/acpi/pcihp.c +++ b/hw/acpi/pcihp.c @@ -31,7 +31,6 @@ #include "hw/pci/pci.h" #include "hw/acpi/acpi.h" #include "sysemu/sysemu.h" -#include "qemu/range.h" #include "exec/ioport.h" #include "exec/address-spaces.h" #include "hw/pci/pci_bus.h" @@ -120,7 +119,7 @@ static bool acpi_pcihp_pc_no_hotplug(AcpiPciHpState *s, PCIDevice *dev) static void acpi_pcihp_eject_slot(AcpiPciHpState *s, unsigned bsel, unsigned slots) { BusChild *kid, *next; - int slot = ffs(slots) - 1; + int slot = ctz32(slots); PCIBus *bus = acpi_pcihp_find_hotplug_bus(s, bsel); if (!bus) { diff --git a/hw/alpha/dp264.c b/hw/alpha/dp264.c index e82d61d28c..9fe7e8b5cb 100644 --- a/hw/alpha/dp264.c +++ b/hw/alpha/dp264.c @@ -157,9 +157,12 @@ static void clipper_init(MachineState *machine) load_image_targphys(initrd_filename, initrd_base, ram_size - initrd_base); - stq_phys(&address_space_memory, - param_offset + 0x100, initrd_base + 0xfffffc0000000000ULL); - stq_phys(&address_space_memory, param_offset + 0x108, initrd_size); + address_space_stq(&address_space_memory, param_offset + 0x100, + initrd_base + 0xfffffc0000000000ULL, + MEMTXATTRS_UNSPECIFIED, + NULL); + address_space_stq(&address_space_memory, param_offset + 0x108, + initrd_size, MEMTXATTRS_UNSPECIFIED, NULL); } } } diff --git a/hw/alpha/typhoon.c b/hw/alpha/typhoon.c index a6044f28c3..7df842dff7 100644 --- a/hw/alpha/typhoon.c +++ b/hw/alpha/typhoon.c @@ -613,7 +613,8 @@ static bool make_iommu_tlbe(hwaddr taddr, hwaddr mask, IOMMUTLBEntry *ret) translation, given the address of the PTE. */ static bool pte_translate(hwaddr pte_addr, IOMMUTLBEntry *ret) { - uint64_t pte = ldq_phys(&address_space_memory, pte_addr); + uint64_t pte = address_space_ldq(&address_space_memory, pte_addr, + MEMTXATTRS_UNSPECIFIED, NULL); /* Check valid bit. */ if ((pte & 1) == 0) { diff --git a/hw/arm/boot.c b/hw/arm/boot.c index a48d1b28d4..fa6950352c 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -170,7 +170,8 @@ static void default_reset_secondary(ARMCPU *cpu, { CPUARMState *env = &cpu->env; - stl_phys_notdirty(&address_space_memory, info->smp_bootreg_addr, 0); + address_space_stl_notdirty(&address_space_memory, info->smp_bootreg_addr, + 0, MEMTXATTRS_UNSPECIFIED, NULL); env->regs[15] = info->smp_loader_start; } @@ -180,7 +181,8 @@ static inline bool have_dtb(const struct arm_boot_info *info) } #define WRITE_WORD(p, value) do { \ - stl_phys_notdirty(&address_space_memory, p, value); \ + address_space_stl_notdirty(&address_space_memory, p, value, \ + MEMTXATTRS_UNSPECIFIED, NULL); \ p += 4; \ } while (0) diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c index dd2a67bcf0..b2d048b911 100644 --- a/hw/arm/highbank.c +++ b/hw/arm/highbank.c @@ -69,11 +69,17 @@ static void hb_reset_secondary(ARMCPU *cpu, const struct arm_boot_info *info) switch (info->nb_cpus) { case 4: - stl_phys_notdirty(&address_space_memory, SMP_BOOT_REG + 0x30, 0); + address_space_stl_notdirty(&address_space_memory, + SMP_BOOT_REG + 0x30, 0, + MEMTXATTRS_UNSPECIFIED, NULL); case 3: - stl_phys_notdirty(&address_space_memory, SMP_BOOT_REG + 0x20, 0); + address_space_stl_notdirty(&address_space_memory, + SMP_BOOT_REG + 0x20, 0, + MEMTXATTRS_UNSPECIFIED, NULL); case 2: - stl_phys_notdirty(&address_space_memory, SMP_BOOT_REG + 0x10, 0); + address_space_stl_notdirty(&address_space_memory, + SMP_BOOT_REG + 0x10, 0, + MEMTXATTRS_UNSPECIFIED, NULL); env->regs[15] = SMP_BOOT_ADDR; break; default: diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c index 2a5406d98d..d243159664 100644 --- a/hw/arm/nseries.c +++ b/hw/arm/nseries.c @@ -579,7 +579,10 @@ static uint32_t mipid_txrx(void *opaque, uint32_t cmd, int len) case 0x26: /* GAMSET */ if (!s->pm) { - s->gamma = ffs(s->param[0] & 0xf) - 1; + s->gamma = ctz32(s->param[0] & 0xf); + if (s->gamma == 32) { + s->gamma = -1; /* XXX: should this be 0? */ + } } else if (s->pm < 0) { s->pm = 1; } diff --git a/hw/arm/omap1.c b/hw/arm/omap1.c index 91ffb589e5..de2b289257 100644 --- a/hw/arm/omap1.c +++ b/hw/arm/omap1.c @@ -2004,8 +2004,7 @@ static void omap_mpuio_write(void *opaque, hwaddr addr, case 0x04: /* OUTPUT_REG */ diff = (s->outputs ^ value) & ~s->dir; s->outputs = value; - while ((ln = ffs(diff))) { - ln --; + while ((ln = ctz32(diff)) != 32) { if (s->handler[ln]) qemu_set_irq(s->handler[ln], (value >> ln) & 1); diff &= ~(1 << ln); @@ -2017,8 +2016,7 @@ static void omap_mpuio_write(void *opaque, hwaddr addr, s->dir = value; value = s->outputs & ~s->dir; - while ((ln = ffs(diff))) { - ln --; + while ((ln = ctz32(diff)) != 32) { if (s->handler[ln]) qemu_set_irq(s->handler[ln], (value >> ln) & 1); diff &= ~(1 << ln); diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c index 165ba2a169..f921a5680c 100644 --- a/hw/arm/pxa2xx.c +++ b/hw/arm/pxa2xx.c @@ -274,7 +274,7 @@ static void pxa2xx_pwrmode_write(CPUARMState *env, const ARMCPRegInfo *ri, s->cpu->env.uncached_cpsr = ARM_CPU_MODE_SVC; s->cpu->env.daif = PSTATE_A | PSTATE_F | PSTATE_I; s->cpu->env.cp15.sctlr_ns = 0; - s->cpu->env.cp15.c1_coproc = 0; + s->cpu->env.cp15.cpacr_el1 = 0; s->cpu->env.cp15.ttbr0_el[1] = 0; s->cpu->env.cp15.dacr_ns = 0; s->pm_regs[PSSR >> 2] |= 0x8; /* Set STS */ diff --git a/hw/arm/pxa2xx_gpio.c b/hw/arm/pxa2xx_gpio.c index 354ccf1ea1..c89c8045c3 100644 --- a/hw/arm/pxa2xx_gpio.c +++ b/hw/arm/pxa2xx_gpio.c @@ -137,7 +137,7 @@ static void pxa2xx_gpio_handler_update(PXA2xxGPIOInfo *s) { level = s->olevel[i] & s->dir[i]; for (diff = s->prev_level[i] ^ level; diff; diff ^= 1 << bit) { - bit = ffs(diff) - 1; + bit = ctz32(diff); line = bit + 32 * i; qemu_set_irq(s->handler[line], (level >> bit) & 1); } diff --git a/hw/arm/strongarm.c b/hw/arm/strongarm.c index 1ddea6d89c..da9fc1d51b 100644 --- a/hw/arm/strongarm.c +++ b/hw/arm/strongarm.c @@ -528,7 +528,7 @@ static void strongarm_gpio_handler_update(StrongARMGPIOInfo *s) level = s->olevel & s->dir; for (diff = s->prev_level ^ level; diff; diff ^= 1 << bit) { - bit = ffs(diff) - 1; + bit = ctz32(diff); qemu_set_irq(s->handler[bit], (level >> bit) & 1); } @@ -745,7 +745,7 @@ static void strongarm_ppc_handler_update(StrongARMPPCInfo *s) level = s->olevel & s->dir; for (diff = s->prev_level ^ level; diff; diff ^= 1 << bit) { - bit = ffs(diff) - 1; + bit = ctz32(diff); qemu_set_irq(s->handler[bit], (level >> bit) & 1); } diff --git a/hw/audio/gus.c b/hw/audio/gus.c index 4a43ce7adf..86223a9544 100644 --- a/hw/audio/gus.c +++ b/hw/audio/gus.c @@ -71,13 +71,6 @@ IO_READ_PROTO (gus_readb) return gus_read (&s->emu, nport, 1); } -IO_READ_PROTO (gus_readw) -{ - GUSState *s = opaque; - - return gus_read (&s->emu, nport, 2); -} - IO_WRITE_PROTO (gus_writeb) { GUSState *s = opaque; @@ -85,13 +78,6 @@ IO_WRITE_PROTO (gus_writeb) gus_write (&s->emu, nport, 1, val); } -IO_WRITE_PROTO (gus_writew) -{ - GUSState *s = opaque; - - gus_write (&s->emu, nport, 2, val); -} - static int write_audio (GUSState *s, int samples) { int net = 0; @@ -236,17 +222,13 @@ static const VMStateDescription vmstate_gus = { static const MemoryRegionPortio gus_portio_list1[] = { {0x000, 1, 1, .write = gus_writeb }, - {0x000, 1, 2, .write = gus_writew }, {0x006, 10, 1, .read = gus_readb, .write = gus_writeb }, - {0x006, 10, 2, .read = gus_readw, .write = gus_writew }, {0x100, 8, 1, .read = gus_readb, .write = gus_writeb }, - {0x100, 8, 2, .read = gus_readw, .write = gus_writew }, PORTIO_END_OF_LIST (), }; static const MemoryRegionPortio gus_portio_list2[] = { - {0, 1, 1, .read = gus_readb }, - {0, 1, 2, .read = gus_readw }, + {0, 2, 1, .read = gus_readb }, PORTIO_END_OF_LIST (), }; diff --git a/hw/audio/sb16.c b/hw/audio/sb16.c index 444eb9e419..b052de5f7d 100644 --- a/hw/audio/sb16.c +++ b/hw/audio/sb16.c @@ -1121,12 +1121,6 @@ static IO_WRITE_PROTO (mixer_write_datab) s->mixer_regs[s->mixer_nreg] = val; } -static IO_WRITE_PROTO (mixer_write_indexw) -{ - mixer_write_indexb (opaque, nport, val & 0xff); - mixer_write_datab (opaque, nport, (val >> 8) & 0xff); -} - static IO_READ_PROTO (mixer_read) { SB16State *s = opaque; @@ -1345,7 +1339,6 @@ static const VMStateDescription vmstate_sb16 = { static const MemoryRegionPortio sb16_ioport_list[] = { { 4, 1, 1, .write = mixer_write_indexb }, - { 4, 1, 2, .write = mixer_write_indexw }, { 5, 1, 1, .read = mixer_read, .write = mixer_write_datab }, { 6, 1, 1, .read = dsp_read, .write = dsp_write }, { 10, 1, 1, .read = dsp_read }, diff --git a/hw/block/fdc.c b/hw/block/fdc.c index 2bf87c9eea..f72a392163 100644 --- a/hw/block/fdc.c +++ b/hw/block/fdc.c @@ -535,8 +535,6 @@ struct FDCtrl { uint8_t pwrd; /* Floppy drives */ uint8_t num_floppies; - /* Sun4m quirks? */ - int sun4m; FDrive drives[MAX_FD]; int reset_sensei; uint32_t check_media_rate; @@ -885,13 +883,6 @@ static void fdctrl_reset_irq(FDCtrl *fdctrl) static void fdctrl_raise_irq(FDCtrl *fdctrl) { - /* Sparc mutation */ - if (fdctrl->sun4m && (fdctrl->msr & FD_MSR_CMDBUSY)) { - /* XXX: not sure */ - fdctrl->msr &= ~FD_MSR_CMDBUSY; - fdctrl->msr |= FD_MSR_RQM | FD_MSR_DIO; - return; - } if (!(fdctrl->sra & FD_SRA_INTPEND)) { qemu_set_irq(fdctrl->irq, 1); fdctrl->sra |= FD_SRA_INTPEND; @@ -1080,12 +1071,6 @@ static uint32_t fdctrl_read_main_status(FDCtrl *fdctrl) fdctrl->dsr &= ~FD_DSR_PWRDOWN; fdctrl->dor |= FD_DOR_nRESET; - /* Sparc mutation */ - if (fdctrl->sun4m) { - retval |= FD_MSR_DIO; - fdctrl_reset_irq(fdctrl); - }; - FLOPPY_DPRINTF("main status register: 0x%02x\n", retval); return retval; @@ -2241,8 +2226,6 @@ static void sun4m_fdc_initfn(Object *obj) FDCtrlSysBus *sys = SYSBUS_FDC(obj); FDCtrl *fdctrl = &sys->state; - fdctrl->sun4m = 1; - memory_region_init_io(&fdctrl->iomem, obj, &fdctrl_mem_strict_ops, fdctrl, "fdctrl", 0x08); sysbus_init_mmio(sbd, &fdctrl->iomem); diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c index afe243b811..efc43dde6a 100644 --- a/hw/block/m25p80.c +++ b/hw/block/m25p80.c @@ -621,7 +621,6 @@ static int m25p80_init(SSISlave *ss) s->size = s->pi->sector_size * s->pi->n_sectors; s->dirty_page = -1; - s->storage = blk_blockalign(s->blk, s->size); /* FIXME use a qdev drive property instead of drive_get_next() */ dinfo = drive_get_next(IF_MTD); @@ -629,6 +628,9 @@ static int m25p80_init(SSISlave *ss) if (dinfo) { DB_PRINT_L(0, "Binding to IF_MTD drive\n"); s->blk = blk_by_legacy_dinfo(dinfo); + blk_attach_dev_nofail(s->blk, s); + + s->storage = blk_blockalign(s->blk, s->size); /* FIXME: Move to late init */ if (blk_read(s->blk, 0, s->storage, @@ -638,6 +640,7 @@ static int m25p80_init(SSISlave *ss) } } else { DB_PRINT_L(0, "No BDRV - binding to RAM\n"); + s->storage = blk_blockalign(NULL, s->size); memset(s->storage, 0xFF, s->size); } diff --git a/hw/block/nvme.c b/hw/block/nvme.c index 1e071662d2..ad988d7c24 100644 --- a/hw/block/nvme.c +++ b/hw/block/nvme.c @@ -615,6 +615,13 @@ static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data, n->bar.intmc = n->bar.intms; break; case 0x14: + /* Windows first sends data, then sends enable bit */ + if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) && + !NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc)) + { + n->bar.cc = data; + } + if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) { n->bar.cc = data; if (nvme_start_ctrl(n)) { diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index 9546fd2919..e6afe9763d 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -515,7 +515,7 @@ void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) type = virtio_ldl_p(VIRTIO_DEVICE(req->dev), &req->out.type); /* VIRTIO_BLK_T_OUT defines the command direction. VIRTIO_BLK_T_BARRIER - * is an optional flag. Altough a guest should not send this flag if + * is an optional flag. Although a guest should not send this flag if * not negotiated we ignored it in the past. So keep ignoring it. */ switch (type & ~(VIRTIO_BLK_T_OUT | VIRTIO_BLK_T_BARRIER)) { case VIRTIO_BLK_T_IN: diff --git a/hw/bt/sdp.c b/hw/bt/sdp.c index 218e075df7..c903747952 100644 --- a/hw/bt/sdp.c +++ b/hw/bt/sdp.c @@ -707,7 +707,7 @@ static void sdp_service_record_build(struct sdp_service_record_s *record, len += sdp_attr_max_size(&def->attributes[record->attributes ++].data, &record->uuids); } - record->uuids = 1 << ffs(record->uuids - 1); + record->uuids = pow2ceil(record->uuids); record->attribute_list = g_malloc0(record->attributes * sizeof(*record->attribute_list)); record->uuid = diff --git a/hw/char/sclpconsole-lm.c b/hw/char/sclpconsole-lm.c index a9f5e62f24..02ac80b650 100644 --- a/hw/char/sclpconsole-lm.c +++ b/hw/char/sclpconsole-lm.c @@ -364,6 +364,7 @@ static void console_class_init(ObjectClass *klass, void *data) ec->can_handle_event = can_handle_event; ec->read_event_data = read_event_data; ec->write_event_data = write_event_data; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } static const TypeInfo sclp_console_info = { diff --git a/hw/char/sclpconsole.c b/hw/char/sclpconsole.c index 79891dfc58..b014c7f522 100644 --- a/hw/char/sclpconsole.c +++ b/hw/char/sclpconsole.c @@ -266,6 +266,7 @@ static void console_class_init(ObjectClass *klass, void *data) ec->can_handle_event = can_handle_event; ec->read_event_data = read_event_data; ec->write_event_data = write_event_data; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } static const TypeInfo sclp_console_info = { diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c index e336bdb4a9..6e2ad8221b 100644 --- a/hw/char/virtio-serial-bus.c +++ b/hw/char/virtio-serial-bus.c @@ -814,12 +814,12 @@ static uint32_t find_free_port_id(VirtIOSerial *vser) max_nr_ports = vser->serial.max_virtserial_ports; for (i = 0; i < (max_nr_ports + 31) / 32; i++) { - uint32_t map, bit; + uint32_t map, zeroes; map = vser->ports_map[i]; - bit = ffs(~map); - if (bit) { - return (bit - 1) + i * 32; + zeroes = ctz32(~map); + if (zeroes != 32) { + return zeroes + i * 32; } } return VIRTIO_CONSOLE_BAD_ID; diff --git a/hw/core/loader.c b/hw/core/loader.c index d4c441fd18..7ee675c1df 100644 --- a/hw/core/loader.c +++ b/hw/core/loader.c @@ -835,12 +835,12 @@ err: return -1; } -ram_addr_t rom_add_blob(const char *name, const void *blob, size_t len, +MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len, size_t max_len, hwaddr addr, const char *fw_file_name, FWCfgReadCallback fw_callback, void *callback_opaque) { Rom *rom; - ram_addr_t ret = RAM_ADDR_MAX; + MemoryRegion *mr = NULL; rom = g_malloc0(sizeof(*rom)); rom->name = g_strdup(name); @@ -858,7 +858,7 @@ ram_addr_t rom_add_blob(const char *name, const void *blob, size_t len, if (rom_file_has_mr) { data = rom_set_mr(rom, OBJECT(fw_cfg), devpath); - ret = memory_region_get_ram_addr(rom->mr); + mr = rom->mr; } else { data = rom->data; } @@ -867,7 +867,7 @@ ram_addr_t rom_add_blob(const char *name, const void *blob, size_t len, fw_callback, callback_opaque, data, rom->datasize); } - return ret; + return mr; } /* This function is specific for elf program because we don't need to allocate diff --git a/hw/core/ptimer.c b/hw/core/ptimer.c index 2abad1fa3d..8437bd6e8b 100644 --- a/hw/core/ptimer.c +++ b/hw/core/ptimer.c @@ -189,7 +189,7 @@ void ptimer_set_limit(ptimer_state *s, uint64_t limit, int reload) * on the current generation of host machines. */ - if (limit * s->period < 10000 && s->period) { + if (!use_icount && limit * s->period < 10000 && s->period) { limit = 10000 / s->period; } diff --git a/hw/display/Makefile.objs b/hw/display/Makefile.objs index e73cb7d8ec..3ea106d9f3 100644 --- a/hw/display/Makefile.objs +++ b/hw/display/Makefile.objs @@ -21,7 +21,7 @@ common-obj-$(CONFIG_ZAURUS) += tc6393xb.o ifeq ($(CONFIG_MILKYMIST_TMU2),y) common-obj-y += milkymist-tmu2.o milkymist-tmu2.o-cflags := $(OPENGL_CFLAGS) -libs_softmmu += $(OPENGL_LIBS) +milkymist-tmu2.o-libs += $(OPENGL_LIBS) endif obj-$(CONFIG_OMAP) += omap_dss.o diff --git a/hw/display/qxl.c b/hw/display/qxl.c index b6d65b9487..0cd314c931 100644 --- a/hw/display/qxl.c +++ b/hw/display/qxl.c @@ -696,7 +696,7 @@ static inline void qxl_push_free_res(PCIQXLDevice *d, int flush) /* called from spice server thread context only */ static void interface_release_resource(QXLInstance *sin, - struct QXLReleaseInfoExt ext) + QXLReleaseInfoExt ext) { PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl); QXLReleaseRing *ring; diff --git a/hw/display/tc6393xb.c b/hw/display/tc6393xb.c index 4306adc959..66b7ade8da 100644 --- a/hw/display/tc6393xb.c +++ b/hw/display/tc6393xb.c @@ -171,7 +171,7 @@ static void tc6393xb_gpio_handler_update(TC6393xbState *s) level = s->gpio_level & s->gpio_dir; for (diff = s->prev_level ^ level; diff; diff ^= 1 << bit) { - bit = ffs(diff) - 1; + bit = ctz32(diff); qemu_set_irq(s->handler[bit], (level >> bit) & 1); } diff --git a/hw/dma/pl080.c b/hw/dma/pl080.c index 741dd20d31..b89b4744f7 100644 --- a/hw/dma/pl080.c +++ b/hw/dma/pl080.c @@ -205,10 +205,22 @@ again: if (size == 0) { /* Transfer complete. */ if (ch->lli) { - ch->src = ldl_le_phys(&address_space_memory, ch->lli); - ch->dest = ldl_le_phys(&address_space_memory, ch->lli + 4); - ch->ctrl = ldl_le_phys(&address_space_memory, ch->lli + 12); - ch->lli = ldl_le_phys(&address_space_memory, ch->lli + 8); + ch->src = address_space_ldl_le(&address_space_memory, + ch->lli, + MEMTXATTRS_UNSPECIFIED, + NULL); + ch->dest = address_space_ldl_le(&address_space_memory, + ch->lli + 4, + MEMTXATTRS_UNSPECIFIED, + NULL); + ch->ctrl = address_space_ldl_le(&address_space_memory, + ch->lli + 12, + MEMTXATTRS_UNSPECIFIED, + NULL); + ch->lli = address_space_ldl_le(&address_space_memory, + ch->lli + 8, + MEMTXATTRS_UNSPECIFIED, + NULL); } else { ch->conf &= ~PL080_CCONF_E; } diff --git a/hw/dma/sun4m_iommu.c b/hw/dma/sun4m_iommu.c index ec7c2efcd9..9a488bc9b7 100644 --- a/hw/dma/sun4m_iommu.c +++ b/hw/dma/sun4m_iommu.c @@ -263,7 +263,8 @@ static uint32_t iommu_page_get_flags(IOMMUState *s, hwaddr addr) iopte = s->regs[IOMMU_BASE] << 4; addr &= ~s->iostart; iopte += (addr >> (IOMMU_PAGE_SHIFT - 2)) & ~3; - ret = ldl_be_phys(&address_space_memory, iopte); + ret = address_space_ldl_be(&address_space_memory, iopte, + MEMTXATTRS_UNSPECIFIED, NULL); trace_sun4m_iommu_page_get_flags(pa, iopte, ret); return ret; } diff --git a/hw/gpio/max7310.c b/hw/gpio/max7310.c index 7fbf313ce8..2f59b134ee 100644 --- a/hw/gpio/max7310.c +++ b/hw/gpio/max7310.c @@ -96,7 +96,7 @@ static int max7310_tx(I2CSlave *i2c, uint8_t data) case 0x01: /* Output port */ for (diff = (data ^ s->level) & ~s->direction; diff; diff &= ~(1 << line)) { - line = ffs(diff) - 1; + line = ctz32(diff); if (s->handler[line]) qemu_set_irq(s->handler[line], (data >> line) & 1); } diff --git a/hw/gpio/omap_gpio.c b/hw/gpio/omap_gpio.c index 9a43486890..d92f8cfbae 100644 --- a/hw/gpio/omap_gpio.c +++ b/hw/gpio/omap_gpio.c @@ -125,8 +125,7 @@ static void omap_gpio_write(void *opaque, hwaddr addr, case 0x04: /* DATA_OUTPUT */ diff = (s->outputs ^ value) & ~s->dir; s->outputs = value; - while ((ln = ffs(diff))) { - ln --; + while ((ln = ctz32(diff)) != 32) { if (s->handler[ln]) qemu_set_irq(s->handler[ln], (value >> ln) & 1); diff &= ~(1 << ln); @@ -138,8 +137,7 @@ static void omap_gpio_write(void *opaque, hwaddr addr, s->dir = value; value = s->outputs & ~s->dir; - while ((ln = ffs(diff))) { - ln --; + while ((ln = ctz32(diff)) != 32) { if (s->handler[ln]) qemu_set_irq(s->handler[ln], (value >> ln) & 1); diff &= ~(1 << ln); @@ -253,8 +251,7 @@ static inline void omap2_gpio_module_out_update(struct omap2_gpio_s *s, s->outputs ^= diff; diff &= ~s->dir; - while ((ln = ffs(diff))) { - ln --; + while ((ln = ctz32(diff)) != 32) { qemu_set_irq(s->handler[ln], (s->outputs >> ln) & 1); diff &= ~(1 << ln); } @@ -442,8 +439,8 @@ static void omap2_gpio_module_write(void *opaque, hwaddr addr, s->dir = value; value = s->outputs & ~s->dir; - while ((ln = ffs(diff))) { - diff &= ~(1 <<-- ln); + while ((ln = ctz32(diff)) != 32) { + diff &= ~(1 << ln); qemu_set_irq(s->handler[ln], (value >> ln) & 1); } diff --git a/hw/gpio/zaurus.c b/hw/gpio/zaurus.c index 94083424f8..24a77272d7 100644 --- a/hw/gpio/zaurus.c +++ b/hw/gpio/zaurus.c @@ -65,7 +65,7 @@ static inline void scoop_gpio_handler_update(ScoopInfo *s) { level = s->gpio_level & s->gpio_dir; for (diff = s->prev_level ^ level; diff; diff ^= 1 << bit) { - bit = ffs(diff) - 1; + bit = ctz32(diff); qemu_set_irq(s->handler[bit], (level >> bit) & 1); } diff --git a/hw/i2c/omap_i2c.c b/hw/i2c/omap_i2c.c index d63278dbde..b6f544a221 100644 --- a/hw/i2c/omap_i2c.c +++ b/hw/i2c/omap_i2c.c @@ -171,9 +171,13 @@ static uint32_t omap_i2c_read(void *opaque, hwaddr addr) case 0x0c: /* I2C_IV */ if (s->revision >= OMAP2_INTR_REV) break; - ret = ffs(s->stat & s->mask); - if (ret) - s->stat ^= 1 << (ret - 1); + ret = ctz32(s->stat & s->mask); + if (ret != 32) { + s->stat ^= 1 << ret; + ret++; + } else { + ret = 0; + } omap_i2c_interrupts_update(s); return ret; diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index 02b2e0cece..73259e729b 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -26,7 +26,6 @@ #include "qemu-common.h" #include "qemu/bitmap.h" #include "qemu/osdep.h" -#include "qemu/range.h" #include "qemu/error-report.h" #include "hw/pci/pci.h" #include "qom/cpu.h" @@ -58,7 +57,6 @@ #include "qapi/qmp/qint.h" #include "qom/qom-qobject.h" -#include "exec/ram_addr.h" /* These are used to size the ACPI tables for -M pc-i440fx-1.7 and * -M pc-i440fx-2.0. Even if the actual amount of AML generated grows @@ -1261,13 +1259,13 @@ build_rsdp(GArray *rsdp_table, GArray *linker, unsigned rsdt) typedef struct AcpiBuildState { /* Copy of table in RAM (for patching). */ - ram_addr_t table_ram; + MemoryRegion *table_mr; /* Is table patched? */ uint8_t patched; PcGuestInfo *guest_info; void *rsdp; - ram_addr_t rsdp_ram; - ram_addr_t linker_ram; + MemoryRegion *rsdp_mr; + MemoryRegion *linker_mr; } AcpiBuildState; static bool acpi_get_mcfg(AcpiMcfgInfo *mcfg) @@ -1451,15 +1449,15 @@ void acpi_build(PcGuestInfo *guest_info, AcpiBuildTables *tables) g_array_free(table_offsets, true); } -static void acpi_ram_update(ram_addr_t ram, GArray *data) +static void acpi_ram_update(MemoryRegion *mr, GArray *data) { uint32_t size = acpi_data_len(data); /* Make sure RAM size is correct - in case it got changed e.g. by migration */ - qemu_ram_resize(ram, size, &error_abort); + memory_region_ram_resize(mr, size, &error_abort); - memcpy(qemu_get_ram_ptr(ram), data->data, size); - cpu_physical_memory_set_dirty_range_nocode(ram, size); + memcpy(memory_region_get_ram_ptr(mr), data->data, size); + memory_region_set_dirty(mr, 0, size); } static void acpi_build_update(void *build_opaque, uint32_t offset) @@ -1477,15 +1475,15 @@ static void acpi_build_update(void *build_opaque, uint32_t offset) acpi_build(build_state->guest_info, &tables); - acpi_ram_update(build_state->table_ram, tables.table_data); + acpi_ram_update(build_state->table_mr, tables.table_data); if (build_state->rsdp) { memcpy(build_state->rsdp, tables.rsdp->data, acpi_data_len(tables.rsdp)); } else { - acpi_ram_update(build_state->rsdp_ram, tables.rsdp); + acpi_ram_update(build_state->rsdp_mr, tables.rsdp); } - acpi_ram_update(build_state->linker_ram, tables.linker); + acpi_ram_update(build_state->linker_mr, tables.linker); acpi_build_tables_cleanup(&tables, true); } @@ -1495,8 +1493,9 @@ static void acpi_build_reset(void *build_opaque) build_state->patched = 0; } -static ram_addr_t acpi_add_rom_blob(AcpiBuildState *build_state, GArray *blob, - const char *name, uint64_t max_size) +static MemoryRegion *acpi_add_rom_blob(AcpiBuildState *build_state, + GArray *blob, const char *name, + uint64_t max_size) { return rom_add_blob(name, blob->data, acpi_data_len(blob), max_size, -1, name, acpi_build_update, build_state); @@ -1542,12 +1541,12 @@ void acpi_setup(PcGuestInfo *guest_info) acpi_build(build_state->guest_info, &tables); /* Now expose it all to Guest */ - build_state->table_ram = acpi_add_rom_blob(build_state, tables.table_data, + build_state->table_mr = acpi_add_rom_blob(build_state, tables.table_data, ACPI_BUILD_TABLE_FILE, ACPI_BUILD_TABLE_MAX_SIZE); - assert(build_state->table_ram != RAM_ADDR_MAX); + assert(build_state->table_mr != NULL); - build_state->linker_ram = + build_state->linker_mr = acpi_add_rom_blob(build_state, tables.linker, "etc/table-loader", 0); fw_cfg_add_file(guest_info->fw_cfg, ACPI_BUILD_TPMLOG_FILE, @@ -1565,10 +1564,10 @@ void acpi_setup(PcGuestInfo *guest_info) fw_cfg_add_file_callback(guest_info->fw_cfg, ACPI_BUILD_RSDP_FILE, acpi_build_update, build_state, build_state->rsdp, rsdp_size); - build_state->rsdp_ram = (ram_addr_t)-1; + build_state->rsdp_mr = NULL; } else { build_state->rsdp = NULL; - build_state->rsdp_ram = acpi_add_rom_blob(build_state, tables.rsdp, + build_state->rsdp_mr = acpi_add_rom_blob(build_state, tables.rsdp, ACPI_BUILD_RSDP_FILE, 0); } diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 7da70ff349..08055a8d8a 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -246,7 +246,8 @@ static void vtd_generate_interrupt(IntelIOMMUState *s, hwaddr mesg_addr_reg, data = vtd_get_long_raw(s, mesg_data_reg); VTD_DPRINTF(FLOG, "msi: addr 0x%"PRIx64 " data 0x%"PRIx32, addr, data); - stl_le_phys(&address_space_memory, addr, data); + address_space_stl_le(&address_space_memory, addr, data, + MEMTXATTRS_UNSPECIFIED, NULL); } /* Generate a fault event to software via MSI if conditions are met. diff --git a/hw/ide/core.c b/hw/ide/core.c index a895fd86f6..fcb908061c 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -2436,8 +2436,8 @@ void ide_init2(IDEBus *bus, qemu_irq irq) static const MemoryRegionPortio ide_portio_list[] = { { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write }, - { 0, 2, 2, .read = ide_data_readw, .write = ide_data_writew }, - { 0, 4, 4, .read = ide_data_readl, .write = ide_data_writel }, + { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew }, + { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel }, PORTIO_END_OF_LIST(), }; diff --git a/hw/intc/allwinner-a10-pic.c b/hw/intc/allwinner-a10-pic.c index de820b9723..eed7621f13 100644 --- a/hw/intc/allwinner-a10-pic.c +++ b/hw/intc/allwinner-a10-pic.c @@ -23,7 +23,7 @@ static void aw_a10_pic_update(AwA10PICState *s) { uint8_t i; - int irq = 0, fiq = 0, pending; + int irq = 0, fiq = 0, zeroes; s->vector = 0; @@ -32,9 +32,9 @@ static void aw_a10_pic_update(AwA10PICState *s) fiq |= s->select[i] & s->irq_pending[i] & ~s->mask[i]; if (!s->vector) { - pending = ffs(s->irq_pending[i] & ~s->mask[i]); - if (pending) { - s->vector = (i * 32 + pending - 1) * 4; + zeroes = ctz32(s->irq_pending[i] & ~s->mask[i]); + if (zeroes != 32) { + s->vector = (i * 32 + zeroes) * 4; } } } diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c index 042e960f42..d595d63a51 100644 --- a/hw/intc/apic_common.c +++ b/hw/intc/apic_common.c @@ -233,25 +233,15 @@ static void apic_reset_common(DeviceState *dev) { APICCommonState *s = APIC_COMMON(dev); APICCommonClass *info = APIC_COMMON_GET_CLASS(s); - bool bsp; + uint32_t bsp; - bsp = cpu_is_bsp(s->cpu); - s->apicbase = APIC_DEFAULT_ADDRESS | - (bsp ? MSR_IA32_APICBASE_BSP : 0) | MSR_IA32_APICBASE_ENABLE; + bsp = s->apicbase & MSR_IA32_APICBASE_BSP; + s->apicbase = APIC_DEFAULT_ADDRESS | bsp | MSR_IA32_APICBASE_ENABLE; s->vapic_paddr = 0; info->vapic_base_update(s); apic_init_reset(dev); - - if (bsp) { - /* - * LINT0 delivery mode on CPU #0 is set to ExtInt at initialization - * time typically by BIOS, so PIC interrupt can be delivered to the - * processor when local APIC is enabled. - */ - s->lvt[APIC_LVT_LINT0] = 0x700; - } } /* This function is only used for old state version 1 and 2 */ diff --git a/hw/intc/omap_intc.c b/hw/intc/omap_intc.c index ad3931c112..e9b38a3c63 100644 --- a/hw/intc/omap_intc.c +++ b/hw/intc/omap_intc.c @@ -60,7 +60,7 @@ struct omap_intr_handler_s { static void omap_inth_sir_update(struct omap_intr_handler_s *s, int is_fiq) { - int i, j, sir_intr, p_intr, p, f; + int i, j, sir_intr, p_intr, p; uint32_t level; sir_intr = 0; p_intr = 255; @@ -72,14 +72,15 @@ static void omap_inth_sir_update(struct omap_intr_handler_s *s, int is_fiq) for (j = 0; j < s->nbanks; ++j) { level = s->bank[j].irqs & ~s->bank[j].mask & (is_fiq ? s->bank[j].fiq : ~s->bank[j].fiq); - for (f = ffs(level), i = f - 1, level >>= f - 1; f; i += f, - level >>= f) { + + while (level != 0) { + i = ctz32(level); p = s->bank[j].priority[i]; if (p <= p_intr) { p_intr = p; sir_intr = 32 * j + i; } - f = ffs(level >> 1); + level &= level - 1; } } s->sir_intr[is_fiq] = sir_intr; diff --git a/hw/microblaze/boot.c b/hw/microblaze/boot.c index 38c59dbe9d..4c44317b65 100644 --- a/hw/microblaze/boot.c +++ b/hw/microblaze/boot.c @@ -113,15 +113,15 @@ void microblaze_load_kernel(MicroBlazeCPU *cpu, hwaddr ddr_base, const char *kernel_filename; const char *kernel_cmdline; const char *dtb_arg; + char *filename = NULL; machine_opts = qemu_get_machine_opts(); kernel_filename = qemu_opt_get(machine_opts, "kernel"); kernel_cmdline = qemu_opt_get(machine_opts, "append"); dtb_arg = qemu_opt_get(machine_opts, "dtb"); - if (dtb_arg) { /* Preference a -dtb argument */ - dtb_filename = dtb_arg; - } else { /* default to pcbios dtb as passed by machine_init */ - dtb_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, dtb_filename); + /* default to pcbios dtb as passed by machine_init */ + if (!dtb_arg) { + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, dtb_filename); } boot_info.machine_cpu_reset = machine_cpu_reset; @@ -203,7 +203,8 @@ void microblaze_load_kernel(MicroBlazeCPU *cpu, hwaddr ddr_base, boot_info.initrd_start, boot_info.initrd_end, kernel_cmdline, - dtb_filename); + /* Preference a -dtb argument */ + dtb_arg ? dtb_arg : filename); } - + g_free(filename); } diff --git a/hw/mips/mips_fulong2e.c b/hw/mips/mips_fulong2e.c index 4aae64a9ec..dea941ad88 100644 --- a/hw/mips/mips_fulong2e.c +++ b/hw/mips/mips_fulong2e.c @@ -168,6 +168,7 @@ static int64_t load_kernel (CPUMIPSState *env) rom_add_blob_fixed("prom", prom_buf, prom_size, cpu_mips_kseg0_to_phys(NULL, ENVP_ADDR)); + g_free(prom_buf); return kernel_entry; } diff --git a/hw/mips/mips_jazz.c b/hw/mips/mips_jazz.c index 07f3c270d4..2c153e092f 100644 --- a/hw/mips/mips_jazz.c +++ b/hw/mips/mips_jazz.c @@ -61,7 +61,8 @@ static void main_cpu_reset(void *opaque) static uint64_t rtc_read(void *opaque, hwaddr addr, unsigned size) { uint8_t val; - address_space_read(&address_space_memory, 0x90000071, &val, 1); + address_space_read(&address_space_memory, 0x90000071, + MEMTXATTRS_UNSPECIFIED, &val, 1); return val; } @@ -69,7 +70,8 @@ static void rtc_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { uint8_t buf = val & 0xff; - address_space_write(&address_space_memory, 0x90000071, &buf, 1); + address_space_write(&address_space_memory, 0x90000071, + MEMTXATTRS_UNSPECIFIED, &buf, 1); } static const MemoryRegionOps rtc_ops = { diff --git a/hw/mips/mips_malta.c b/hw/mips/mips_malta.c index b0fa71a514..482250d85d 100644 --- a/hw/mips/mips_malta.c +++ b/hw/mips/mips_malta.c @@ -861,6 +861,7 @@ static int64_t load_kernel (void) rom_add_blob_fixed("prom", prom_buf, prom_size, cpu_mips_kseg0_to_phys(NULL, ENVP_ADDR)); + g_free(prom_buf); return kernel_entry; } diff --git a/hw/mips/mips_r4k.c b/hw/mips/mips_r4k.c index 66e2a58e86..f4dcacd862 100644 --- a/hw/mips/mips_r4k.c +++ b/hw/mips/mips_r4k.c @@ -139,6 +139,7 @@ static int64_t load_kernel(void) rom_add_blob_fixed("params", params_buf, params_size, (16 << 20) - 264); + g_free(params_buf); return entry; } diff --git a/hw/misc/edu.c b/hw/misc/edu.c index f601069e82..fe50b42af7 100644 --- a/hw/misc/edu.c +++ b/hw/misc/edu.c @@ -279,7 +279,7 @@ static const MemoryRegionOps edu_mmio_ops = { }; /* - * We purposedly use a thread, so that users are forced to wait for the status + * We purposely use a thread, so that users are forced to wait for the status * register. */ static void *edu_fact_thread(void *opaque) diff --git a/hw/misc/slavio_misc.c b/hw/misc/slavio_misc.c index 50985958a4..ec50f10757 100644 --- a/hw/misc/slavio_misc.c +++ b/hw/misc/slavio_misc.c @@ -68,6 +68,7 @@ typedef struct APCState { } APCState; #define MISC_SIZE 1 +#define LED_SIZE 2 #define SYSCTRL_SIZE 4 #define AUX1_TC 0x02 @@ -452,13 +453,13 @@ static int slavio_misc_init1(SysBusDevice *sbd) /* 16 bit registers */ /* ss600mp diag LEDs */ memory_region_init_io(&s->led_iomem, OBJECT(s), &slavio_led_mem_ops, s, - "leds", MISC_SIZE); + "leds", LED_SIZE); sysbus_init_mmio(sbd, &s->led_iomem); /* 32 bit registers */ /* System control */ memory_region_init_io(&s->sysctrl_iomem, OBJECT(s), &slavio_sysctrl_mem_ops, s, - "system-control", MISC_SIZE); + "system-control", SYSCTRL_SIZE); sysbus_init_mmio(sbd, &s->sysctrl_iomem); /* AUX 1 (Misc System Functions) */ diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index c23284f8b0..cc252edeff 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -1594,7 +1594,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) n->max_queues = MAX(n->nic_conf.peers.queues, 1); if (n->max_queues * 2 + 1 > VIRTIO_PCI_QUEUE_MAX) { error_setg(errp, "Invalid number of queues (= %" PRIu32 "), " - "must be a postive integer less than %d.", + "must be a positive integer less than %d.", n->max_queues, (VIRTIO_PCI_QUEUE_MAX - 1) / 2); virtio_cleanup(vdev); return; diff --git a/hw/net/vmxnet_rx_pkt.c b/hw/net/vmxnet_rx_pkt.c index a40e346293..acbca6a3db 100644 --- a/hw/net/vmxnet_rx_pkt.c +++ b/hw/net/vmxnet_rx_pkt.c @@ -172,13 +172,6 @@ bool vmxnet_rx_pkt_has_virt_hdr(struct VmxnetRxPkt *pkt) return pkt->has_virt_hdr; } -uint16_t vmxnet_rx_pkt_get_num_frags(struct VmxnetRxPkt *pkt) -{ - assert(pkt); - - return pkt->vec_len; -} - uint16_t vmxnet_rx_pkt_get_vlan_tag(struct VmxnetRxPkt *pkt) { assert(pkt); diff --git a/hw/net/vmxnet_rx_pkt.h b/hw/net/vmxnet_rx_pkt.h index 6b2c60ef10..5f8352a468 100644 --- a/hw/net/vmxnet_rx_pkt.h +++ b/hw/net/vmxnet_rx_pkt.h @@ -114,15 +114,6 @@ bool vmxnet_rx_pkt_is_vlan_stripped(struct VmxnetRxPkt *pkt); bool vmxnet_rx_pkt_has_virt_hdr(struct VmxnetRxPkt *pkt); /** - * returns number of frags attached to the packet - * - * @pkt: packet - * @ret: number of frags - * - */ -uint16_t vmxnet_rx_pkt_get_num_frags(struct VmxnetRxPkt *pkt); - -/** * attach data to rx packet * * @pkt: packet diff --git a/hw/pci-bridge/i82801b11.c b/hw/pci-bridge/i82801b11.c index 14cd7fd405..7e79bc01ef 100644 --- a/hw/pci-bridge/i82801b11.c +++ b/hw/pci-bridge/i82801b11.c @@ -101,27 +101,6 @@ static const TypeInfo i82801b11_bridge_info = { .class_init = i82801b11_bridge_class_init, }; -PCIBus *ich9_d2pbr_init(PCIBus *bus, int devfn, int sec_bus) -{ - PCIDevice *d; - PCIBridge *br; - char buf[16]; - DeviceState *qdev; - - d = pci_create_multifunction(bus, devfn, true, "i82801b11-bridge"); - if (!d) { - return NULL; - } - br = PCI_BRIDGE(d); - qdev = DEVICE(d); - - snprintf(buf, sizeof(buf), "pci.%d", sec_bus); - pci_bridge_map_irq(br, buf, pci_swizzle_map_irq_fn); - qdev_init_nofail(qdev); - - return pci_bridge_get_sec_bus(br); -} - static void d2pbr_register(void) { type_register_static(&i82801b11_bridge_info); diff --git a/hw/pci-host/apb.c b/hw/pci-host/apb.c index 312fa703c6..599768e2d9 100644 --- a/hw/pci-host/apb.c +++ b/hw/pci-host/apb.c @@ -289,7 +289,8 @@ static IOMMUTLBEntry pbm_translate_iommu(MemoryRegion *iommu, hwaddr addr, } } - tte = ldq_be_phys(&address_space_memory, baseaddr + offset); + tte = address_space_ldq_be(&address_space_memory, baseaddr + offset, + MEMTXATTRS_UNSPECIFIED, NULL); if (!(tte & IOMMU_TTE_DATA_V)) { /* Invalid mapping */ diff --git a/hw/pci-host/bonito.c b/hw/pci-host/bonito.c index 8134d0bcd0..3a731fe18d 100644 --- a/hw/pci-host/bonito.c +++ b/hw/pci-host/bonito.c @@ -427,7 +427,7 @@ static uint32_t bonito_sbridge_pciaddr(void *opaque, hwaddr addr) cfgaddr |= (s->regs[BONITO_PCIMAP_CFG] & 0xffff) << 16; idsel = (cfgaddr & BONITO_PCICONF_IDSEL_MASK) >> BONITO_PCICONF_IDSEL_OFFSET; - devno = ffs(idsel) - 1; + devno = ctz32(idsel); funno = (cfgaddr & BONITO_PCICONF_FUN_MASK) >> BONITO_PCICONF_FUN_OFFSET; regno = (cfgaddr & BONITO_PCICONF_REG_MASK) >> BONITO_PCICONF_REG_OFFSET; diff --git a/hw/pci-host/prep.c b/hw/pci-host/prep.c index 6cea6ffebb..c63f45d217 100644 --- a/hw/pci-host/prep.c +++ b/hw/pci-host/prep.c @@ -140,7 +140,8 @@ static uint64_t raven_io_read(void *opaque, hwaddr addr, uint8_t buf[4]; addr = raven_io_address(s, addr); - address_space_read(&s->pci_io_as, addr + 0x80000000, buf, size); + address_space_read(&s->pci_io_as, addr + 0x80000000, + MEMTXATTRS_UNSPECIFIED, buf, size); if (size == 1) { return buf[0]; @@ -171,7 +172,8 @@ static void raven_io_write(void *opaque, hwaddr addr, g_assert_not_reached(); } - address_space_write(&s->pci_io_as, addr + 0x80000000, buf, size); + address_space_write(&s->pci_io_as, addr + 0x80000000, + MEMTXATTRS_UNSPECIFIED, buf, size); } static const MemoryRegionOps raven_io_ops = { diff --git a/hw/pci-host/uninorth.c b/hw/pci-host/uninorth.c index 53f2b59ae8..f0144eb7b0 100644 --- a/hw/pci-host/uninorth.c +++ b/hw/pci-host/uninorth.c @@ -92,7 +92,10 @@ static uint32_t unin_get_config_reg(uint32_t reg, uint32_t addr) uint32_t slot, func; /* Grab CFA0 style values */ - slot = ffs(reg & 0xfffff800) - 1; + slot = ctz32(reg & 0xfffff800); + if (slot == 32) { + slot = -1; /* XXX: should this be 0? */ + } func = (reg >> 8) & 7; /* ... and then convert them to x86 format */ diff --git a/hw/pci/msi.c b/hw/pci/msi.c index 52d23130d9..2949938223 100644 --- a/hw/pci/msi.c +++ b/hw/pci/msi.c @@ -72,7 +72,7 @@ static inline uint8_t msi_cap_sizeof(uint16_t flags) static inline unsigned int msi_nr_vectors(uint16_t flags) { return 1U << - ((flags & PCI_MSI_FLAGS_QSIZE) >> (ffs(PCI_MSI_FLAGS_QSIZE) - 1)); + ((flags & PCI_MSI_FLAGS_QSIZE) >> ctz32(PCI_MSI_FLAGS_QSIZE)); } static inline uint8_t msi_flags_off(const PCIDevice* dev) @@ -175,9 +175,9 @@ int msi_init(struct PCIDevice *dev, uint8_t offset, assert(nr_vectors > 0); assert(nr_vectors <= PCI_MSI_VECTORS_MAX); /* the nr of MSI vectors is up to 32 */ - vectors_order = ffs(nr_vectors) - 1; + vectors_order = ctz32(nr_vectors); - flags = vectors_order << (ffs(PCI_MSI_FLAGS_QMASK) - 1); + flags = vectors_order << ctz32(PCI_MSI_FLAGS_QMASK); if (msi64bit) { flags |= PCI_MSI_FLAGS_64BIT; } @@ -291,7 +291,8 @@ void msi_notify(PCIDevice *dev, unsigned int vector) "notify vector 0x%x" " address: 0x%"PRIx64" data: 0x%"PRIx32"\n", vector, msg.address, msg.data); - stl_le_phys(&dev->bus_master_as, msg.address, msg.data); + address_space_stl_le(&dev->bus_master_as, msg.address, msg.data, + MEMTXATTRS_UNSPECIFIED, NULL); } /* Normally called by pci_default_write_config(). */ @@ -354,12 +355,12 @@ void msi_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len) * just don't crash the host */ log_num_vecs = - (flags & PCI_MSI_FLAGS_QSIZE) >> (ffs(PCI_MSI_FLAGS_QSIZE) - 1); + (flags & PCI_MSI_FLAGS_QSIZE) >> ctz32(PCI_MSI_FLAGS_QSIZE); log_max_vecs = - (flags & PCI_MSI_FLAGS_QMASK) >> (ffs(PCI_MSI_FLAGS_QMASK) - 1); + (flags & PCI_MSI_FLAGS_QMASK) >> ctz32(PCI_MSI_FLAGS_QMASK); if (log_num_vecs > log_max_vecs) { flags &= ~PCI_MSI_FLAGS_QSIZE; - flags |= log_max_vecs << (ffs(PCI_MSI_FLAGS_QSIZE) - 1); + flags |= log_max_vecs << ctz32(PCI_MSI_FLAGS_QSIZE); pci_set_word(dev->config + msi_flags_off(dev), flags); } diff --git a/hw/pci/msix.c b/hw/pci/msix.c index f8748cfe1d..9935f98ae5 100644 --- a/hw/pci/msix.c +++ b/hw/pci/msix.c @@ -443,7 +443,8 @@ void msix_notify(PCIDevice *dev, unsigned vector) msg = msix_get_message(dev, vector); - stl_le_phys(&dev->bus_master_as, msg.address, msg.data); + address_space_stl_le(&dev->bus_master_as, msg.address, msg.data, + MEMTXATTRS_UNSPECIFIED, NULL); } void msix_reset(PCIDevice *dev) diff --git a/hw/pci/pci.c b/hw/pci/pci.c index 56947aebc7..48f19a306d 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -1456,24 +1456,26 @@ static PciBridgeInfo *qmp_query_pci_bridge(PCIDevice *dev, PCIBus *bus, int bus_num) { PciBridgeInfo *info; + PciMemoryRange *range; - info = g_malloc0(sizeof(*info)); + info = g_new0(PciBridgeInfo, 1); - info->bus.number = dev->config[PCI_PRIMARY_BUS]; - info->bus.secondary = dev->config[PCI_SECONDARY_BUS]; - info->bus.subordinate = dev->config[PCI_SUBORDINATE_BUS]; + info->bus = g_new0(PciBusInfo, 1); + info->bus->number = dev->config[PCI_PRIMARY_BUS]; + info->bus->secondary = dev->config[PCI_SECONDARY_BUS]; + info->bus->subordinate = dev->config[PCI_SUBORDINATE_BUS]; - info->bus.io_range = g_malloc0(sizeof(*info->bus.io_range)); - info->bus.io_range->base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_SPACE_IO); - info->bus.io_range->limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_SPACE_IO); + range = info->bus->io_range = g_new0(PciMemoryRange, 1); + range->base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_SPACE_IO); + range->limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_SPACE_IO); - info->bus.memory_range = g_malloc0(sizeof(*info->bus.memory_range)); - info->bus.memory_range->base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_SPACE_MEMORY); - info->bus.memory_range->limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_SPACE_MEMORY); + range = info->bus->memory_range = g_new0(PciMemoryRange, 1); + range->base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_SPACE_MEMORY); + range->limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_SPACE_MEMORY); - info->bus.prefetchable_range = g_malloc0(sizeof(*info->bus.prefetchable_range)); - info->bus.prefetchable_range->base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); - info->bus.prefetchable_range->limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); + range = info->bus->prefetchable_range = g_new0(PciMemoryRange, 1); + range->base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); + range->limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); if (dev->config[PCI_SECONDARY_BUS] != 0) { PCIBus *child_bus = pci_find_bus_nr(bus, dev->config[PCI_SECONDARY_BUS]); @@ -1494,21 +1496,23 @@ static PciDeviceInfo *qmp_query_pci_device(PCIDevice *dev, PCIBus *bus, uint8_t type; int class; - info = g_malloc0(sizeof(*info)); + info = g_new0(PciDeviceInfo, 1); info->bus = bus_num; info->slot = PCI_SLOT(dev->devfn); info->function = PCI_FUNC(dev->devfn); + info->class_info = g_new0(PciDeviceClass, 1); class = pci_get_word(dev->config + PCI_CLASS_DEVICE); - info->class_info.q_class = class; + info->class_info->q_class = class; desc = get_class_desc(class); if (desc->desc) { - info->class_info.has_desc = true; - info->class_info.desc = g_strdup(desc->desc); + info->class_info->has_desc = true; + info->class_info->desc = g_strdup(desc->desc); } - info->id.vendor = pci_get_word(dev->config + PCI_VENDOR_ID); - info->id.device = pci_get_word(dev->config + PCI_DEVICE_ID); + info->id = g_new0(PciDeviceId, 1); + info->id->vendor = pci_get_word(dev->config + PCI_VENDOR_ID); + info->id->device = pci_get_word(dev->config + PCI_DEVICE_ID); info->regions = qmp_query_pci_regions(dev); info->qdev_id = g_strdup(dev->qdev.id ? dev->qdev.id : ""); diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c index eaa3e6ea94..b48c09cd11 100644 --- a/hw/pci/pcie_aer.c +++ b/hw/pci/pcie_aer.c @@ -410,7 +410,7 @@ static void pcie_aer_msg(PCIDevice *dev, const PCIEAERMsg *msg) static void pcie_aer_update_log(PCIDevice *dev, const PCIEAERErr *err) { uint8_t *aer_cap = dev->config + dev->exp.aer_cap; - uint8_t first_bit = ffs(err->status) - 1; + uint8_t first_bit = ctz32(err->status); uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP); int i; diff --git a/hw/pci/shpc.c b/hw/pci/shpc.c index 759910f79a..a706486394 100644 --- a/hw/pci/shpc.c +++ b/hw/pci/shpc.c @@ -61,7 +61,7 @@ /* Same slot state masks are used for command and status registers */ #define SHPC_SLOT_STATE_MASK 0x03 #define SHPC_SLOT_STATE_SHIFT \ - (ffs(SHPC_SLOT_STATE_MASK) - 1) + ctz32(SHPC_SLOT_STATE_MASK) #define SHPC_STATE_NO 0x0 #define SHPC_STATE_PWRONLY 0x1 @@ -70,10 +70,10 @@ #define SHPC_SLOT_PWR_LED_MASK 0xC #define SHPC_SLOT_PWR_LED_SHIFT \ - (ffs(SHPC_SLOT_PWR_LED_MASK) - 1) + ctz32(SHPC_SLOT_PWR_LED_MASK) #define SHPC_SLOT_ATTN_LED_MASK 0x30 #define SHPC_SLOT_ATTN_LED_SHIFT \ - (ffs(SHPC_SLOT_ATTN_LED_MASK) - 1) + ctz32(SHPC_SLOT_ATTN_LED_MASK) #define SHPC_LED_NO 0x0 #define SHPC_LED_ON 0x1 @@ -136,7 +136,7 @@ static int roundup_pow_of_two(int x) static uint16_t shpc_get_status(SHPCDevice *shpc, int slot, uint16_t msk) { uint8_t *status = shpc->config + SHPC_SLOT_STATUS(slot); - return (pci_get_word(status) & msk) >> (ffs(msk) - 1); + return (pci_get_word(status) & msk) >> ctz32(msk); } static void shpc_set_status(SHPCDevice *shpc, @@ -144,7 +144,7 @@ static void shpc_set_status(SHPCDevice *shpc, { uint8_t *status = shpc->config + SHPC_SLOT_STATUS(slot); pci_word_test_and_clear_mask(status, msk); - pci_word_test_and_set_mask(status, value << (ffs(msk) - 1)); + pci_word_test_and_set_mask(status, value << ctz32(msk)); } static void shpc_interrupt_update(PCIDevice *d) diff --git a/hw/pci/slotid_cap.c b/hw/pci/slotid_cap.c index 62f7bae2f1..1c01d346c9 100644 --- a/hw/pci/slotid_cap.c +++ b/hw/pci/slotid_cap.c @@ -3,7 +3,7 @@ #include "qemu/error-report.h" #define SLOTID_CAP_LENGTH 4 -#define SLOTID_NSLOTS_SHIFT (ffs(PCI_SID_ESR_NSLOTS) - 1) +#define SLOTID_NSLOTS_SHIFT ctz32(PCI_SID_ESR_NSLOTS) int slotid_cap_init(PCIDevice *d, int nslots, uint8_t chassis, diff --git a/hw/ppc/ppce500_spin.c b/hw/ppc/ppce500_spin.c index d49f2b8803..a99f7b0397 100644 --- a/hw/ppc/ppce500_spin.c +++ b/hw/ppc/ppce500_spin.c @@ -74,7 +74,7 @@ static void spin_reset(void *opaque) /* Create -kernel TLB entries for BookE, linearly spanning 256MB. */ static inline hwaddr booke206_page_size_to_tlb(uint64_t size) { - return (ffs(size >> 10) - 1) >> 1; + return ctz32(size >> 10) >> 1; } static void mmubooke_create_initial_mapping(CPUPPCState *env, diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 8e43aa21f2..ac261ef050 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -1029,7 +1029,7 @@ static int spapr_post_load(void *opaque, int version_id) sPAPREnvironment *spapr = (sPAPREnvironment *)opaque; int err = 0; - /* In earlier versions, there was no seperate qdev for the PAPR + /* In earlier versions, there was no separate qdev for the PAPR * RTC, so the RTC offset was stored directly in sPAPREnvironment. * So when migrating from those versions, poke the incoming offset * value into the RTC device */ diff --git a/hw/s390x/css.c b/hw/s390x/css.c index 9a13b006dd..5561d807dc 100644 --- a/hw/s390x/css.c +++ b/hw/s390x/css.c @@ -745,20 +745,27 @@ static void css_update_chnmon(SubchDev *sch) /* Format 1, per-subchannel area. */ uint32_t count; - count = ldl_phys(&address_space_memory, sch->curr_status.mba); + count = address_space_ldl(&address_space_memory, + sch->curr_status.mba, + MEMTXATTRS_UNSPECIFIED, + NULL); count++; - stl_phys(&address_space_memory, sch->curr_status.mba, count); + address_space_stl(&address_space_memory, sch->curr_status.mba, count, + MEMTXATTRS_UNSPECIFIED, NULL); } else { /* Format 0, global area. */ uint32_t offset; uint16_t count; offset = sch->curr_status.pmcw.mbi << 5; - count = lduw_phys(&address_space_memory, - channel_subsys->chnmon_area + offset); + count = address_space_lduw(&address_space_memory, + channel_subsys->chnmon_area + offset, + MEMTXATTRS_UNSPECIFIED, + NULL); count++; - stw_phys(&address_space_memory, - channel_subsys->chnmon_area + offset, count); + address_space_stw(&address_space_memory, + channel_subsys->chnmon_area + offset, count, + MEMTXATTRS_UNSPECIFIED, NULL); } } diff --git a/hw/s390x/event-facility.c b/hw/s390x/event-facility.c index 78da718362..1cb116a297 100644 --- a/hw/s390x/event-facility.c +++ b/hw/s390x/event-facility.c @@ -362,6 +362,7 @@ static void init_event_facility_class(ObjectClass *klass, void *data) dc->reset = reset_event_facility; dc->vmsd = &vmstate_event_facility; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); k->init = init_event_facility; k->command_handler = command_handler; k->event_pending = event_pending; diff --git a/hw/s390x/ipl.c b/hw/s390x/ipl.c index 2e26d2aa2c..132004ae4f 100644 --- a/hw/s390x/ipl.c +++ b/hw/s390x/ipl.c @@ -315,6 +315,7 @@ static void s390_ipl_class_init(ObjectClass *klass, void *data) dc->props = s390_ipl_properties; dc->reset = s390_ipl_reset; dc->vmsd = &vmstate_ipl; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); } static const TypeInfo s390_ipl_info = { diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c index 3c086f6155..560b66a501 100644 --- a/hw/s390x/s390-pci-bus.c +++ b/hw/s390x/s390-pci-bus.c @@ -278,7 +278,8 @@ static uint64_t s390_guest_io_table_walk(uint64_t guest_iota, px = calc_px(guest_dma_address); sto_a = guest_iota + rtx * sizeof(uint64_t); - sto = ldq_phys(&address_space_memory, sto_a); + sto = address_space_ldq(&address_space_memory, sto_a, + MEMTXATTRS_UNSPECIFIED, NULL); sto = get_rt_sto(sto); if (!sto) { pte = 0; @@ -286,7 +287,8 @@ static uint64_t s390_guest_io_table_walk(uint64_t guest_iota, } pto_a = sto + sx * sizeof(uint64_t); - pto = ldq_phys(&address_space_memory, pto_a); + pto = address_space_ldq(&address_space_memory, pto_a, + MEMTXATTRS_UNSPECIFIED, NULL); pto = get_st_pto(pto); if (!pto) { pte = 0; @@ -294,7 +296,8 @@ static uint64_t s390_guest_io_table_walk(uint64_t guest_iota, } px_a = pto + px * sizeof(uint64_t); - pte = ldq_phys(&address_space_memory, px_a); + pte = address_space_ldq(&address_space_memory, px_a, + MEMTXATTRS_UNSPECIFIED, NULL); out: return pte; diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c index 08d8aa6b4b..f9151a9afb 100644 --- a/hw/s390x/s390-pci-inst.c +++ b/hw/s390x/s390-pci-inst.c @@ -155,7 +155,7 @@ int clp_service_call(S390CPU *cpu, uint8_t r2) return 0; } - if (s390_cpu_virt_mem_read(cpu, env->regs[r2], buffer, sizeof(*reqh))) { + if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) { return 0; } reqh = (ClpReqHdr *)buffer; @@ -165,7 +165,7 @@ int clp_service_call(S390CPU *cpu, uint8_t r2) return 0; } - if (s390_cpu_virt_mem_read(cpu, env->regs[r2], buffer, + if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, req_len + sizeof(*resh))) { return 0; } @@ -180,7 +180,7 @@ int clp_service_call(S390CPU *cpu, uint8_t r2) return 0; } - if (s390_cpu_virt_mem_read(cpu, env->regs[r2], buffer, + if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, req_len + res_len)) { return 0; } @@ -277,7 +277,7 @@ int clp_service_call(S390CPU *cpu, uint8_t r2) } out: - if (s390_cpu_virt_mem_write(cpu, env->regs[r2], buffer, + if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer, req_len + res_len)) { return 0; } @@ -331,7 +331,8 @@ int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2) return 0; } MemoryRegion *mr = pbdev->pdev->io_regions[pcias].memory; - io_mem_read(mr, offset, &data, len); + memory_region_dispatch_read(mr, offset, &data, len, + MEMTXATTRS_UNSPECIFIED); } else if (pcias == 15) { if ((4 - (offset & 0x3)) < len) { program_interrupt(env, PGM_OPERAND, 4); @@ -456,7 +457,8 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2) mr = pbdev->pdev->io_regions[pcias].memory; } - io_mem_write(mr, offset, data, len); + memory_region_dispatch_write(mr, offset, data, len, + MEMTXATTRS_UNSPECIFIED); } else if (pcias == 15) { if ((4 - (offset & 0x3)) < len) { program_interrupt(env, PGM_OPERAND, 4); @@ -544,7 +546,8 @@ out: return 0; } -int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr) +int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr, + uint8_t ar) { CPUS390XState *env = &cpu->env; S390PCIBusDevice *pbdev; @@ -601,12 +604,14 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr) return 0; } - if (s390_cpu_virt_mem_read(cpu, gaddr, buffer, len)) { + if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) { return 0; } for (i = 0; i < len / 8; i++) { - io_mem_write(mr, env->regs[r3] + i * 8, ldq_p(buffer + i * 8), 8); + memory_region_dispatch_write(mr, env->regs[r3] + i * 8, + ldq_p(buffer + i * 8), 8, + MEMTXATTRS_UNSPECIFIED); } setcc(cpu, ZPCI_PCI_LS_OK); @@ -694,7 +699,7 @@ static void dereg_ioat(S390PCIBusDevice *pbdev) pbdev->g_iota = 0; } -int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba) +int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar) { CPUS390XState *env = &cpu->env; uint8_t oc; @@ -723,7 +728,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba) return 0; } - if (s390_cpu_virt_mem_read(cpu, fiba, (uint8_t *)&fib, sizeof(fib))) { + if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) { return 0; } @@ -769,7 +774,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba) return 0; } -int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba) +int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar) { CPUS390XState *env = &cpu->env; uint32_t fh; @@ -825,7 +830,7 @@ int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba) fib.fc |= 0x10; } - if (s390_cpu_virt_mem_write(cpu, fiba, (uint8_t *)&fib, sizeof(fib))) { + if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) { return 0; } diff --git a/hw/s390x/s390-pci-inst.h b/hw/s390x/s390-pci-inst.h index 7e6c804737..70fa71395f 100644 --- a/hw/s390x/s390-pci-inst.h +++ b/hw/s390x/s390-pci-inst.h @@ -281,8 +281,9 @@ int clp_service_call(S390CPU *cpu, uint8_t r2); int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2); int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2); int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2); -int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr); -int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba); -int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba); +int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr, + uint8_t ar); +int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar); +int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar); #endif diff --git a/hw/s390x/s390-virtio-bus.c b/hw/s390x/s390-virtio-bus.c index c8a78ba577..0e35ac970a 100644 --- a/hw/s390x/s390-virtio-bus.c +++ b/hw/s390x/s390-virtio-bus.c @@ -75,10 +75,20 @@ void s390_virtio_reset_idx(VirtIOS390Device *dev) for (i = 0; i < num_vq; i++) { idx_addr = virtio_queue_get_avail_addr(dev->vdev, i) + VIRTIO_VRING_AVAIL_IDX_OFFS; - stw_phys(&address_space_memory, idx_addr, 0); + address_space_stw(&address_space_memory, idx_addr, 0, + MEMTXATTRS_UNSPECIFIED, NULL); + idx_addr = virtio_queue_get_avail_addr(dev->vdev, i) + + virtio_queue_get_avail_size(dev->vdev, i); + address_space_stw(&address_space_memory, idx_addr, 0, + MEMTXATTRS_UNSPECIFIED, NULL); idx_addr = virtio_queue_get_used_addr(dev->vdev, i) + VIRTIO_VRING_USED_IDX_OFFS; - stw_phys(&address_space_memory, idx_addr, 0); + address_space_stw(&address_space_memory, idx_addr, 0, + MEMTXATTRS_UNSPECIFIED, NULL); + idx_addr = virtio_queue_get_used_addr(dev->vdev, i) + + virtio_queue_get_used_size(dev->vdev, i); + address_space_stw(&address_space_memory, idx_addr, 0, + MEMTXATTRS_UNSPECIFIED, NULL); } } @@ -335,7 +345,8 @@ static uint64_t s390_virtio_device_vq_token(VirtIOS390Device *dev, int vq) (vq * VIRTIO_VQCONFIG_LEN) + VIRTIO_VQCONFIG_OFFS_TOKEN; - return ldq_be_phys(&address_space_memory, token_off); + return address_space_ldq_be(&address_space_memory, token_off, + MEMTXATTRS_UNSPECIFIED, NULL); } static ram_addr_t s390_virtio_device_num_vq(VirtIOS390Device *dev) @@ -370,21 +381,33 @@ void s390_virtio_device_sync(VirtIOS390Device *dev) virtio_reset(dev->vdev); /* Sync dev space */ - stb_phys(&address_space_memory, - dev->dev_offs + VIRTIO_DEV_OFFS_TYPE, dev->vdev->device_id); - - stb_phys(&address_space_memory, - dev->dev_offs + VIRTIO_DEV_OFFS_NUM_VQ, - s390_virtio_device_num_vq(dev)); - stb_phys(&address_space_memory, - dev->dev_offs + VIRTIO_DEV_OFFS_FEATURE_LEN, dev->feat_len); - - stb_phys(&address_space_memory, - dev->dev_offs + VIRTIO_DEV_OFFS_CONFIG_LEN, dev->vdev->config_len); + address_space_stb(&address_space_memory, + dev->dev_offs + VIRTIO_DEV_OFFS_TYPE, + dev->vdev->device_id, + MEMTXATTRS_UNSPECIFIED, + NULL); + + address_space_stb(&address_space_memory, + dev->dev_offs + VIRTIO_DEV_OFFS_NUM_VQ, + s390_virtio_device_num_vq(dev), + MEMTXATTRS_UNSPECIFIED, + NULL); + address_space_stb(&address_space_memory, + dev->dev_offs + VIRTIO_DEV_OFFS_FEATURE_LEN, + dev->feat_len, + MEMTXATTRS_UNSPECIFIED, + NULL); + + address_space_stb(&address_space_memory, + dev->dev_offs + VIRTIO_DEV_OFFS_CONFIG_LEN, + dev->vdev->config_len, + MEMTXATTRS_UNSPECIFIED, + NULL); num_vq = s390_virtio_device_num_vq(dev); - stb_phys(&address_space_memory, - dev->dev_offs + VIRTIO_DEV_OFFS_NUM_VQ, num_vq); + address_space_stb(&address_space_memory, + dev->dev_offs + VIRTIO_DEV_OFFS_NUM_VQ, num_vq, + MEMTXATTRS_UNSPECIFIED, NULL); /* Sync virtqueues */ for (i = 0; i < num_vq; i++) { @@ -395,11 +418,14 @@ void s390_virtio_device_sync(VirtIOS390Device *dev) vring = s390_virtio_next_ring(bus); virtio_queue_set_addr(dev->vdev, i, vring); virtio_queue_set_vector(dev->vdev, i, i); - stq_be_phys(&address_space_memory, - vq + VIRTIO_VQCONFIG_OFFS_ADDRESS, vring); - stw_be_phys(&address_space_memory, - vq + VIRTIO_VQCONFIG_OFFS_NUM, - virtio_queue_get_num(dev->vdev, i)); + address_space_stq_be(&address_space_memory, + vq + VIRTIO_VQCONFIG_OFFS_ADDRESS, vring, + MEMTXATTRS_UNSPECIFIED, NULL); + address_space_stw_be(&address_space_memory, + vq + VIRTIO_VQCONFIG_OFFS_NUM, + virtio_queue_get_num(dev->vdev, i), + MEMTXATTRS_UNSPECIFIED, + NULL); } cur_offs = dev->dev_offs; @@ -407,7 +433,8 @@ void s390_virtio_device_sync(VirtIOS390Device *dev) cur_offs += num_vq * VIRTIO_VQCONFIG_LEN; /* Sync feature bitmap */ - stl_le_phys(&address_space_memory, cur_offs, dev->host_features); + address_space_stl_le(&address_space_memory, cur_offs, dev->host_features, + MEMTXATTRS_UNSPECIFIED, NULL); dev->feat_offs = cur_offs + dev->feat_len; cur_offs += dev->feat_len * 2; @@ -425,12 +452,16 @@ void s390_virtio_device_update_status(VirtIOS390Device *dev) VirtIODevice *vdev = dev->vdev; uint32_t features; - virtio_set_status(vdev, ldub_phys(&address_space_memory, - dev->dev_offs + VIRTIO_DEV_OFFS_STATUS)); + virtio_set_status(vdev, + address_space_ldub(&address_space_memory, + dev->dev_offs + VIRTIO_DEV_OFFS_STATUS, + MEMTXATTRS_UNSPECIFIED, NULL)); /* Update guest supported feature bitmap */ - features = bswap32(ldl_be_phys(&address_space_memory, dev->feat_offs)); + features = bswap32(address_space_ldl_be(&address_space_memory, + dev->feat_offs, + MEMTXATTRS_UNSPECIFIED, NULL)); virtio_set_features(vdev, features); } @@ -505,18 +536,13 @@ static unsigned virtio_s390_get_features(DeviceState *d) /**************** S390 Virtio Bus Device Descriptions *******************/ -static Property s390_virtio_net_properties[] = { - DEFINE_VIRTIO_COMMON_FEATURES(VirtIOS390Device, host_features), - DEFINE_PROP_END_OF_LIST(), -}; - static void s390_virtio_net_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOS390DeviceClass *k = VIRTIO_S390_DEVICE_CLASS(klass); k->realize = s390_virtio_net_realize; - dc->props = s390_virtio_net_properties; + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); } static const TypeInfo s390_virtio_net = { @@ -530,8 +556,10 @@ static const TypeInfo s390_virtio_net = { static void s390_virtio_blk_class_init(ObjectClass *klass, void *data) { VirtIOS390DeviceClass *k = VIRTIO_S390_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); k->realize = s390_virtio_blk_realize; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } static const TypeInfo s390_virtio_blk = { @@ -553,6 +581,7 @@ static void s390_virtio_serial_class_init(ObjectClass *klass, void *data) k->realize = s390_virtio_serial_realize; dc->props = s390_virtio_serial_properties; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } static const TypeInfo s390_virtio_serial = { @@ -563,18 +592,13 @@ static const TypeInfo s390_virtio_serial = { .class_init = s390_virtio_serial_class_init, }; -static Property s390_virtio_rng_properties[] = { - DEFINE_VIRTIO_COMMON_FEATURES(VirtIOS390Device, host_features), - DEFINE_PROP_END_OF_LIST(), -}; - static void s390_virtio_rng_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOS390DeviceClass *k = VIRTIO_S390_DEVICE_CLASS(klass); k->realize = s390_virtio_rng_realize; - dc->props = s390_virtio_rng_properties; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); } static const TypeInfo s390_virtio_rng = { @@ -602,10 +626,16 @@ static void s390_virtio_busdev_reset(DeviceState *dev) virtio_reset(_dev->vdev); } +static Property virtio_s390_properties[] = { + DEFINE_VIRTIO_COMMON_FEATURES(VirtIOS390Device, host_features), + DEFINE_PROP_END_OF_LIST(), +}; + static void virtio_s390_device_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + dc->props = virtio_s390_properties; dc->realize = s390_virtio_busdev_realize; dc->bus_type = TYPE_S390_VIRTIO_BUS; dc->reset = s390_virtio_busdev_reset; @@ -620,18 +650,13 @@ static const TypeInfo virtio_s390_device_info = { .abstract = true, }; -static Property s390_virtio_scsi_properties[] = { - DEFINE_VIRTIO_COMMON_FEATURES(VirtIOS390Device, host_features), - DEFINE_PROP_END_OF_LIST(), -}; - static void s390_virtio_scsi_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOS390DeviceClass *k = VIRTIO_S390_DEVICE_CLASS(klass); k->realize = s390_virtio_scsi_realize; - dc->props = s390_virtio_scsi_properties; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } static const TypeInfo s390_virtio_scsi = { @@ -643,18 +668,13 @@ static const TypeInfo s390_virtio_scsi = { }; #ifdef CONFIG_VHOST_SCSI -static Property s390_vhost_scsi_properties[] = { - DEFINE_VIRTIO_COMMON_FEATURES(VirtIOS390Device, host_features), - DEFINE_PROP_END_OF_LIST(), -}; - static void s390_vhost_scsi_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOS390DeviceClass *k = VIRTIO_S390_DEVICE_CLASS(klass); k->realize = s390_vhost_scsi_realize; - dc->props = s390_vhost_scsi_properties; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } static const TypeInfo s390_vhost_scsi = { @@ -678,8 +698,10 @@ static int s390_virtio_bridge_init(SysBusDevice *dev) static void s390_virtio_bridge_class_init(ObjectClass *klass, void *data) { SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); k->init = s390_virtio_bridge_init; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); } static const TypeInfo s390_virtio_bridge_info = { diff --git a/hw/s390x/s390-virtio.c b/hw/s390x/s390-virtio.c index bdb538859f..59750dbfcd 100644 --- a/hw/s390x/s390-virtio.c +++ b/hw/s390x/s390-virtio.c @@ -77,6 +77,16 @@ static int s390_virtio_hcall_notify(const uint64_t *args) if (mem > ram_size) { VirtIOS390Device *dev = s390_virtio_bus_find_vring(s390_bus, mem, &i); if (dev) { + /* + * Older kernels will use the virtqueue before setting DRIVER_OK. + * In this case the feature bits are not yet up to date, meaning + * that several funny things can happen, e.g. the guest thinks + * EVENT_IDX is on and QEMU thinks it is off. Let's force a feature + * and status sync. + */ + if (!(dev->vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { + s390_virtio_device_update_status(dev); + } virtio_queue_notify(dev->vdev, i); } else { r = -EINVAL; @@ -97,7 +107,9 @@ static int s390_virtio_hcall_reset(const uint64_t *args) return -EINVAL; } virtio_reset(dev->vdev); - stb_phys(&address_space_memory, dev->dev_offs + VIRTIO_DEV_OFFS_STATUS, 0); + address_space_stb(&address_space_memory, + dev->dev_offs + VIRTIO_DEV_OFFS_STATUS, 0, + MEMTXATTRS_UNSPECIFIED, NULL); s390_virtio_device_sync(dev); s390_virtio_reset_idx(dev); diff --git a/hw/s390x/sclp.c b/hw/s390x/sclp.c index a969975a78..b3a6c5e5a4 100644 --- a/hw/s390x/sclp.c +++ b/hw/s390x/sclp.c @@ -457,10 +457,19 @@ sclpMemoryHotplugDev *get_sclp_memory_hotplug_dev(void) TYPE_SCLP_MEMORY_HOTPLUG_DEV, NULL)); } +static void sclp_memory_hotplug_dev_class_init(ObjectClass *klass, + void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + static TypeInfo sclp_memory_hotplug_dev_info = { .name = TYPE_SCLP_MEMORY_HOTPLUG_DEV, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(sclpMemoryHotplugDev), + .class_init = sclp_memory_hotplug_dev_class_init, }; static void register_types(void) diff --git a/hw/s390x/sclpcpu.c b/hw/s390x/sclpcpu.c index 3600fe231d..2fe8b5aa40 100644 --- a/hw/s390x/sclpcpu.c +++ b/hw/s390x/sclpcpu.c @@ -88,12 +88,14 @@ static int irq_cpu_hotplug_init(SCLPEvent *event) static void cpu_class_init(ObjectClass *oc, void *data) { SCLPEventClass *k = SCLP_EVENT_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); k->init = irq_cpu_hotplug_init; k->get_send_mask = send_mask; k->get_receive_mask = receive_mask; k->read_event_data = read_event_data; k->write_event_data = NULL; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); } static const TypeInfo sclp_cpu_info = { diff --git a/hw/s390x/sclpquiesce.c b/hw/s390x/sclpquiesce.c index 1a399bd1f0..ffa5553135 100644 --- a/hw/s390x/sclpquiesce.c +++ b/hw/s390x/sclpquiesce.c @@ -116,6 +116,7 @@ static void quiesce_class_init(ObjectClass *klass, void *data) dc->reset = quiesce_reset; dc->vmsd = &vmstate_sclpquiesce; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); k->init = quiesce_init; k->get_send_mask = send_mask; diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index db798a80b5..c96101aa7c 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -335,16 +335,23 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) if (!ccw.cda) { ret = -EFAULT; } else { - info.queue = ldq_phys(&address_space_memory, ccw.cda); - info.align = ldl_phys(&address_space_memory, - ccw.cda + sizeof(info.queue)); - info.index = lduw_phys(&address_space_memory, - ccw.cda + sizeof(info.queue) - + sizeof(info.align)); - info.num = lduw_phys(&address_space_memory, - ccw.cda + sizeof(info.queue) - + sizeof(info.align) - + sizeof(info.index)); + info.queue = address_space_ldq(&address_space_memory, ccw.cda, + MEMTXATTRS_UNSPECIFIED, NULL); + info.align = address_space_ldl(&address_space_memory, + ccw.cda + sizeof(info.queue), + MEMTXATTRS_UNSPECIFIED, + NULL); + info.index = address_space_lduw(&address_space_memory, + ccw.cda + sizeof(info.queue) + + sizeof(info.align), + MEMTXATTRS_UNSPECIFIED, + NULL); + info.num = address_space_lduw(&address_space_memory, + ccw.cda + sizeof(info.queue) + + sizeof(info.align) + + sizeof(info.index), + MEMTXATTRS_UNSPECIFIED, + NULL); ret = virtio_ccw_set_vqs(sch, info.queue, info.align, info.index, info.num); sch->curr_status.scsw.count = 0; @@ -369,15 +376,20 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) if (!ccw.cda) { ret = -EFAULT; } else { - features.index = ldub_phys(&address_space_memory, - ccw.cda + sizeof(features.features)); + features.index = address_space_ldub(&address_space_memory, + ccw.cda + + sizeof(features.features), + MEMTXATTRS_UNSPECIFIED, + NULL); if (features.index < ARRAY_SIZE(dev->host_features)) { features.features = dev->host_features[features.index]; } else { /* Return zeroes if the guest supports more feature bits. */ features.features = 0; } - stl_le_phys(&address_space_memory, ccw.cda, features.features); + address_space_stl_le(&address_space_memory, ccw.cda, + features.features, MEMTXATTRS_UNSPECIFIED, + NULL); sch->curr_status.scsw.count = ccw.count - sizeof(features); ret = 0; } @@ -396,9 +408,15 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) if (!ccw.cda) { ret = -EFAULT; } else { - features.index = ldub_phys(&address_space_memory, - ccw.cda + sizeof(features.features)); - features.features = ldl_le_phys(&address_space_memory, ccw.cda); + features.index = address_space_ldub(&address_space_memory, + ccw.cda + + sizeof(features.features), + MEMTXATTRS_UNSPECIFIED, + NULL); + features.features = address_space_ldl_le(&address_space_memory, + ccw.cda, + MEMTXATTRS_UNSPECIFIED, + NULL); if (features.index < ARRAY_SIZE(dev->host_features)) { virtio_set_features(vdev, features.features); } else { @@ -474,7 +492,8 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) if (!ccw.cda) { ret = -EFAULT; } else { - status = ldub_phys(&address_space_memory, ccw.cda); + status = address_space_ldub(&address_space_memory, ccw.cda, + MEMTXATTRS_UNSPECIFIED, NULL); if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) { virtio_ccw_stop_ioeventfd(dev); } @@ -508,7 +527,8 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) if (!ccw.cda) { ret = -EFAULT; } else { - indicators = ldq_be_phys(&address_space_memory, ccw.cda); + indicators = address_space_ldq_be(&address_space_memory, ccw.cda, + MEMTXATTRS_UNSPECIFIED, NULL); dev->indicators = get_indicator(indicators, sizeof(uint64_t)); sch->curr_status.scsw.count = ccw.count - sizeof(indicators); ret = 0; @@ -528,7 +548,8 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) if (!ccw.cda) { ret = -EFAULT; } else { - indicators = ldq_be_phys(&address_space_memory, ccw.cda); + indicators = address_space_ldq_be(&address_space_memory, ccw.cda, + MEMTXATTRS_UNSPECIFIED, NULL); dev->indicators2 = get_indicator(indicators, sizeof(uint64_t)); sch->curr_status.scsw.count = ccw.count - sizeof(indicators); ret = 0; @@ -548,15 +569,21 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) if (!ccw.cda) { ret = -EFAULT; } else { - vq_config.index = lduw_be_phys(&address_space_memory, ccw.cda); + vq_config.index = address_space_lduw_be(&address_space_memory, + ccw.cda, + MEMTXATTRS_UNSPECIFIED, + NULL); if (vq_config.index >= VIRTIO_PCI_QUEUE_MAX) { ret = -EINVAL; break; } vq_config.num_max = virtio_queue_get_num(vdev, vq_config.index); - stw_be_phys(&address_space_memory, - ccw.cda + sizeof(vq_config.index), vq_config.num_max); + address_space_stw_be(&address_space_memory, + ccw.cda + sizeof(vq_config.index), + vq_config.num_max, + MEMTXATTRS_UNSPECIFIED, + NULL); sch->curr_status.scsw.count = ccw.count - sizeof(vq_config); ret = 0; } @@ -615,8 +642,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) return ret; } -static void virtio_ccw_device_realize(VirtioCcwDevice *dev, - VirtIODevice *vdev, Error **errp) +static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp) { unsigned int cssid = 0; unsigned int ssid = 0; @@ -626,7 +652,8 @@ static void virtio_ccw_device_realize(VirtioCcwDevice *dev, bool found = false; SubchDev *sch; int num; - DeviceState *parent = DEVICE(dev); + Error *err = NULL; + VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_GET_CLASS(dev); sch = g_malloc0(sizeof(SubchDev)); @@ -739,17 +766,16 @@ static void virtio_ccw_device_realize(VirtioCcwDevice *dev, memset(&sch->id, 0, sizeof(SenseId)); sch->id.reserved = 0xff; sch->id.cu_type = VIRTIO_CCW_CU_TYPE; - sch->id.cu_model = vdev->device_id; - - /* Only the first 32 feature bits are used. */ - dev->host_features[0] = virtio_bus_get_vdev_features(&dev->bus, - dev->host_features[0]); - virtio_add_feature(&dev->host_features[0], VIRTIO_F_NOTIFY_ON_EMPTY); - virtio_add_feature(&dev->host_features[0], VIRTIO_F_BAD_FEATURE); + if (k->realize) { + k->realize(dev, &err); + } + if (err) { + error_propagate(errp, err); + css_subch_assign(cssid, ssid, schid, devno, NULL); + goto out_err; + } - css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid, - parent->hotplugged, 1); return; out_err: @@ -785,10 +811,7 @@ static void virtio_ccw_net_realize(VirtioCcwDevice *ccw_dev, Error **errp) object_property_set_bool(OBJECT(vdev), true, "realized", &err); if (err) { error_propagate(errp, err); - return; } - - virtio_ccw_device_realize(ccw_dev, VIRTIO_DEVICE(vdev), errp); } static void virtio_ccw_net_instance_init(Object *obj) @@ -811,10 +834,7 @@ static void virtio_ccw_blk_realize(VirtioCcwDevice *ccw_dev, Error **errp) object_property_set_bool(OBJECT(vdev), true, "realized", &err); if (err) { error_propagate(errp, err); - return; } - - virtio_ccw_device_realize(ccw_dev, VIRTIO_DEVICE(vdev), errp); } static void virtio_ccw_blk_instance_init(Object *obj) @@ -851,10 +871,7 @@ static void virtio_ccw_serial_realize(VirtioCcwDevice *ccw_dev, Error **errp) object_property_set_bool(OBJECT(vdev), true, "realized", &err); if (err) { error_propagate(errp, err); - return; } - - virtio_ccw_device_realize(ccw_dev, VIRTIO_DEVICE(vdev), errp); } @@ -876,10 +893,7 @@ static void virtio_ccw_balloon_realize(VirtioCcwDevice *ccw_dev, Error **errp) object_property_set_bool(OBJECT(vdev), true, "realized", &err); if (err) { error_propagate(errp, err); - return; } - - virtio_ccw_device_realize(ccw_dev, VIRTIO_DEVICE(vdev), errp); } static void balloon_ccw_stats_get_all(Object *obj, struct Visitor *v, @@ -944,10 +958,7 @@ static void virtio_ccw_scsi_realize(VirtioCcwDevice *ccw_dev, Error **errp) object_property_set_bool(OBJECT(vdev), true, "realized", &err); if (err) { error_propagate(errp, err); - return; } - - virtio_ccw_device_realize(ccw_dev, VIRTIO_DEVICE(vdev), errp); } static void virtio_ccw_scsi_instance_init(Object *obj) @@ -971,10 +982,7 @@ static void vhost_ccw_scsi_realize(VirtioCcwDevice *ccw_dev, Error **errp) object_property_set_bool(OBJECT(vdev), true, "realized", &err); if (err) { error_propagate(errp, err); - return; } - - virtio_ccw_device_realize(ccw_dev, VIRTIO_DEVICE(vdev), errp); } static void vhost_ccw_scsi_instance_init(Object *obj) @@ -1002,8 +1010,6 @@ static void virtio_ccw_rng_realize(VirtioCcwDevice *ccw_dev, Error **errp) object_property_set_link(OBJECT(dev), OBJECT(dev->vdev.conf.rng), "rng", NULL); - - virtio_ccw_device_realize(ccw_dev, VIRTIO_DEVICE(vdev), errp); } /* DeviceState to VirtioCcwDevice. Note: used on datapath, @@ -1067,9 +1073,13 @@ static void virtio_ccw_notify(DeviceState *d, uint16_t vector) css_adapter_interrupt(dev->thinint_isc); } } else { - indicators = ldq_phys(&address_space_memory, dev->indicators->addr); + indicators = address_space_ldq(&address_space_memory, + dev->indicators->addr, + MEMTXATTRS_UNSPECIFIED, + NULL); indicators |= 1ULL << vector; - stq_phys(&address_space_memory, dev->indicators->addr, indicators); + address_space_stq(&address_space_memory, dev->indicators->addr, + indicators, MEMTXATTRS_UNSPECIFIED, NULL); css_conditional_io_interrupt(sch); } } else { @@ -1077,9 +1087,13 @@ static void virtio_ccw_notify(DeviceState *d, uint16_t vector) return; } vector = 0; - indicators = ldq_phys(&address_space_memory, dev->indicators2->addr); + indicators = address_space_ldq(&address_space_memory, + dev->indicators2->addr, + MEMTXATTRS_UNSPECIFIED, + NULL); indicators |= 1ULL << vector; - stq_phys(&address_space_memory, dev->indicators2->addr, indicators); + address_space_stq(&address_space_memory, dev->indicators2->addr, + indicators, MEMTXATTRS_UNSPECIFIED, NULL); css_conditional_io_interrupt(sch); } } @@ -1398,6 +1412,30 @@ static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f) return 0; } +/* This is called by virtio-bus just after the device is plugged. */ +static void virtio_ccw_device_plugged(DeviceState *d) +{ + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); + SubchDev *sch = dev->sch; + + sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus); + + /* Only the first 32 feature bits are used. */ + virtio_add_feature(&dev->host_features[0], VIRTIO_F_NOTIFY_ON_EMPTY); + virtio_add_feature(&dev->host_features[0], VIRTIO_F_BAD_FEATURE); + dev->host_features[0] = virtio_bus_get_vdev_features(&dev->bus, + dev->host_features[0]); + + css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid, + d->hotplugged, 1); +} + +static void virtio_ccw_device_unplugged(DeviceState *d) +{ + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); + + virtio_ccw_stop_ioeventfd(dev); +} /**************** Virtio-ccw Bus Device Descriptions *******************/ static Property virtio_ccw_net_properties[] = { @@ -1416,6 +1454,7 @@ static void virtio_ccw_net_class_init(ObjectClass *klass, void *data) k->exit = virtio_ccw_exit; dc->reset = virtio_ccw_reset; dc->props = virtio_ccw_net_properties; + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); } static const TypeInfo virtio_ccw_net = { @@ -1442,6 +1481,7 @@ static void virtio_ccw_blk_class_init(ObjectClass *klass, void *data) k->exit = virtio_ccw_exit; dc->reset = virtio_ccw_reset; dc->props = virtio_ccw_blk_properties; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } static const TypeInfo virtio_ccw_blk = { @@ -1468,6 +1508,7 @@ static void virtio_ccw_serial_class_init(ObjectClass *klass, void *data) k->exit = virtio_ccw_exit; dc->reset = virtio_ccw_reset; dc->props = virtio_ccw_serial_properties; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } static const TypeInfo virtio_ccw_serial = { @@ -1494,6 +1535,7 @@ static void virtio_ccw_balloon_class_init(ObjectClass *klass, void *data) k->exit = virtio_ccw_exit; dc->reset = virtio_ccw_reset; dc->props = virtio_ccw_balloon_properties; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); } static const TypeInfo virtio_ccw_balloon = { @@ -1520,6 +1562,7 @@ static void virtio_ccw_scsi_class_init(ObjectClass *klass, void *data) k->exit = virtio_ccw_exit; dc->reset = virtio_ccw_reset; dc->props = virtio_ccw_scsi_properties; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } static const TypeInfo virtio_ccw_scsi = { @@ -1545,6 +1588,7 @@ static void vhost_ccw_scsi_class_init(ObjectClass *klass, void *data) k->exit = virtio_ccw_exit; dc->reset = virtio_ccw_reset; dc->props = vhost_ccw_scsi_properties; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } static const TypeInfo vhost_ccw_scsi = { @@ -1582,6 +1626,7 @@ static void virtio_ccw_rng_class_init(ObjectClass *klass, void *data) k->exit = virtio_ccw_exit; dc->reset = virtio_ccw_reset; dc->props = virtio_ccw_rng_properties; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); } static const TypeInfo virtio_ccw_rng = { @@ -1595,10 +1640,9 @@ static const TypeInfo virtio_ccw_rng = { static void virtio_ccw_busdev_realize(DeviceState *dev, Error **errp) { VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev; - VirtIOCCWDeviceClass *_info = VIRTIO_CCW_DEVICE_GET_CLASS(dev); virtio_ccw_bus_new(&_dev->bus, sizeof(_dev->bus), _dev); - _info->realize(_dev, errp); + virtio_ccw_device_realize(_dev, errp); } static int virtio_ccw_busdev_exit(DeviceState *dev) @@ -1668,9 +1712,11 @@ static void virtual_css_bridge_class_init(ObjectClass *klass, void *data) { SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); k->init = virtual_css_bridge_init; hc->unplug = virtio_ccw_busdev_unplug; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); } static const TypeInfo virtual_css_bridge_info = { @@ -1712,6 +1758,8 @@ static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data) k->load_queue = virtio_ccw_load_queue; k->save_config = virtio_ccw_save_config; k->load_config = virtio_ccw_load_config; + k->device_plugged = virtio_ccw_device_plugged; + k->device_unplugged = virtio_ccw_device_unplugged; } static const TypeInfo virtio_ccw_bus_info = { diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c index ad7317bfe9..91a5d97c73 100644 --- a/hw/scsi/megasas.c +++ b/hw/scsi/megasas.c @@ -804,7 +804,7 @@ static int megasas_ctrl_get_info(MegasasState *s, MegasasCmd *cmd) MFI_INFO_LDOPS_READ_POLICY); info.max_strips_per_io = cpu_to_le16(s->fw_sge); info.stripe_sz_ops.min = 3; - info.stripe_sz_ops.max = ffs(MEGASAS_MAX_SECTORS + 1) - 1; + info.stripe_sz_ops.max = ctz32(MEGASAS_MAX_SECTORS + 1); info.properties.pred_fail_poll_interval = cpu_to_le16(300); info.properties.intr_throttle_cnt = cpu_to_le16(16); info.properties.intr_throttle_timeout = cpu_to_le16(50); diff --git a/hw/sd/sd.c b/hw/sd/sd.c index f955265f74..8abf0c9e31 100644 --- a/hw/sd/sd.c +++ b/hw/sd/sd.c @@ -796,8 +796,9 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, sd->vhs = 0; /* No response if not exactly one VHS bit is set. */ - if (!(req.arg >> 8) || (req.arg >> ffs(req.arg & ~0xff))) + if (!(req.arg >> 8) || (req.arg >> (ctz32(req.arg & ~0xff) + 1))) { return sd->spi ? sd_r7 : sd_r0; + } /* Accept. */ sd->vhs = req.arg; diff --git a/hw/sh4/r2d.c b/hw/sh4/r2d.c index d1d0847ba2..4221060308 100644 --- a/hw/sh4/r2d.c +++ b/hw/sh4/r2d.c @@ -318,8 +318,10 @@ static void r2d_init(MachineState *machine) } /* initialization which should be done by firmware */ - stl_phys(&address_space_memory, SH7750_BCR1, 1<<3); /* cs3 SDRAM */ - stw_phys(&address_space_memory, SH7750_BCR2, 3<<(3*2)); /* cs3 32bit */ + address_space_stl(&address_space_memory, SH7750_BCR1, 1 << 3, + MEMTXATTRS_UNSPECIFIED, NULL); /* cs3 SDRAM */ + address_space_stw(&address_space_memory, SH7750_BCR2, 3 << (3 * 2), + MEMTXATTRS_UNSPECIFIED, NULL); /* cs3 32bit */ reset_info->vector = (SDRAM_BASE + LINUX_LOAD_OFFSET) | 0xa0000000; /* Start from P2 area */ } diff --git a/hw/timer/hpet.c b/hw/timer/hpet.c index 78d86be91c..b6b8a2063d 100644 --- a/hw/timer/hpet.c +++ b/hw/timer/hpet.c @@ -206,8 +206,9 @@ static void update_irq(struct HPETTimer *timer, int set) } } } else if (timer_fsb_route(timer)) { - stl_le_phys(&address_space_memory, - timer->fsb >> 32, timer->fsb & 0xffffffff); + address_space_stl_le(&address_space_memory, timer->fsb >> 32, + timer->fsb & 0xffffffff, MEMTXATTRS_UNSPECIFIED, + NULL); } else if (timer->config & HPET_TN_TYPE_LEVEL) { s->isr |= mask; /* fold the ICH PIRQ# pin's internal inversion logic into hpet */ diff --git a/hw/tpm/tpm_passthrough.c b/hw/tpm/tpm_passthrough.c index 2a45071e36..73ca906282 100644 --- a/hw/tpm/tpm_passthrough.c +++ b/hw/tpm/tpm_passthrough.c @@ -34,15 +34,13 @@ #include "sysemu/tpm_backend_int.h" #include "tpm_tis.h" -/* #define DEBUG_TPM */ - -#ifdef DEBUG_TPM -#define DPRINTF(fmt, ...) \ - do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) -#else -#define DPRINTF(fmt, ...) \ - do { } while (0) -#endif +#define DEBUG_TPM 0 + +#define DPRINTF(fmt, ...) do { \ + if (DEBUG_TPM) { \ + fprintf(stderr, fmt, ## __VA_ARGS__); \ + } \ +} while (0); #define TYPE_TPM_PASSTHROUGH "tpm-passthrough" #define TPM_PASSTHROUGH(obj) \ diff --git a/hw/tpm/tpm_tis.c b/hw/tpm/tpm_tis.c index 815c8eace1..b8235d5c9f 100644 --- a/hw/tpm/tpm_tis.c +++ b/hw/tpm/tpm_tis.c @@ -30,15 +30,13 @@ #include "qemu-common.h" #include "qemu/main-loop.h" -/*#define DEBUG_TIS */ +#define DEBUG_TIS 0 -#ifdef DEBUG_TIS -#define DPRINTF(fmt, ...) \ - do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) -#else -#define DPRINTF(fmt, ...) \ - do { } while (0) -#endif +#define DPRINTF(fmt, ...) do { \ + if (DEBUG_TIS) { \ + printf(fmt, ## __VA_ARGS__); \ + } \ +} while (0); /* whether the STS interrupt is supported */ #define RAISE_STS_IRQ @@ -421,7 +419,7 @@ static void tpm_tis_dump_state(void *opaque, hwaddr addr) for (idx = 0; regs[idx] != 0xfff; idx++) { DPRINTF("tpm_tis: 0x%04x : 0x%08x\n", regs[idx], - (uint32_t)tpm_tis_mmio_read(opaque, base + regs[idx], 4)); + (int)tpm_tis_mmio_read(opaque, base + regs[idx], 4)); } DPRINTF("tpm_tis: read offset : %d\n" @@ -555,7 +553,7 @@ static uint64_t tpm_tis_mmio_read(void *opaque, hwaddr addr, val >>= shift; } - DPRINTF("tpm_tis: read.%u(%08x) = %08x\n", size, (int)addr, (uint32_t)val); + DPRINTF("tpm_tis: read.%u(%08x) = %08x\n", size, (int)addr, (int)val); return val; } @@ -578,7 +576,7 @@ static void tpm_tis_mmio_write_intern(void *opaque, hwaddr addr, uint16_t len; uint32_t mask = (size == 1) ? 0xff : ((size == 2) ? 0xffff : ~0); - DPRINTF("tpm_tis: write.%u(%08x) = %08x\n", size, (int)addr, (uint32_t)val); + DPRINTF("tpm_tis: write.%u(%08x) = %08x\n", size, (int)addr, (int)val); if (locty == 4 && !hw_access) { DPRINTF("tpm_tis: Access to locality 4 only allowed from hardware\n"); @@ -815,7 +813,7 @@ static void tpm_tis_mmio_write_intern(void *opaque, hwaddr addr, /* drop the byte */ } else { DPRINTF("tpm_tis: Data to send to TPM: %08x (size=%d)\n", - val, size); + (int)val, size); if (tis->loc[locty].state == TPM_TIS_STATE_READY) { tis->loc[locty].state = TPM_TIS_STATE_RECEPTION; tpm_tis_sts_set(&tis->loc[locty], @@ -844,7 +842,7 @@ static void tpm_tis_mmio_write_intern(void *opaque, hwaddr addr, (tis->loc[locty].sts & TPM_TIS_STS_EXPECT)) { /* we have a packet length - see if we have all of it */ #ifdef RAISE_STS_IRQ - bool needIrq = !(tis->loc[locty].sts & TPM_TIS_STS_VALID); + bool need_irq = !(tis->loc[locty].sts & TPM_TIS_STS_VALID); #endif len = tpm_tis_get_size_from_buffer(&tis->loc[locty].w_buffer); if (len > tis->loc[locty].w_offset) { @@ -855,7 +853,7 @@ static void tpm_tis_mmio_write_intern(void *opaque, hwaddr addr, tpm_tis_sts_set(&tis->loc[locty], TPM_TIS_STS_VALID); } #ifdef RAISE_STS_IRQ - if (needIrq) { + if (need_irq) { tpm_tis_raise_irq(s, locty, TPM_TIS_INT_STS_VALID); } #endif diff --git a/hw/usb/core.c b/hw/usb/core.c index cf34755bba..d0025db60d 100644 --- a/hw/usb/core.c +++ b/hw/usb/core.c @@ -331,23 +331,6 @@ void usb_generic_async_ctrl_complete(USBDevice *s, USBPacket *p) usb_packet_complete(s, p); } -/* XXX: fix overflow */ -int set_usb_string(uint8_t *buf, const char *str) -{ - int len, i; - uint8_t *q; - - q = buf; - len = strlen(str); - *q++ = 2 * len + 2; - *q++ = 3; - for(i = 0; i < len; i++) { - *q++ = str[i]; - *q++ = 0; - } - return q - buf; -} - USBDevice *usb_find_device(USBPort *port, uint8_t addr) { USBDevice *dev = port->dev; @@ -749,12 +732,6 @@ void usb_ep_set_type(USBDevice *dev, int pid, int ep, uint8_t type) uep->type = type; } -uint8_t usb_ep_get_ifnum(USBDevice *dev, int pid, int ep) -{ - struct USBEndpoint *uep = usb_ep_get(dev, pid, ep); - return uep->ifnum; -} - void usb_ep_set_ifnum(USBDevice *dev, int pid, int ep, uint8_t ifnum) { struct USBEndpoint *uep = usb_ep_get(dev, pid, ep); @@ -782,12 +759,6 @@ void usb_ep_set_max_packet_size(USBDevice *dev, int pid, int ep, uep->max_packet_size = size * microframes; } -int usb_ep_get_max_packet_size(USBDevice *dev, int pid, int ep) -{ - struct USBEndpoint *uep = usb_ep_get(dev, pid, ep); - return uep->max_packet_size; -} - void usb_ep_set_max_streams(USBDevice *dev, int pid, int ep, uint8_t raw) { struct USBEndpoint *uep = usb_ep_get(dev, pid, ep); @@ -801,18 +772,6 @@ void usb_ep_set_max_streams(USBDevice *dev, int pid, int ep, uint8_t raw) } } -int usb_ep_get_max_streams(USBDevice *dev, int pid, int ep) -{ - struct USBEndpoint *uep = usb_ep_get(dev, pid, ep); - return uep->max_streams; -} - -void usb_ep_set_pipeline(USBDevice *dev, int pid, int ep, bool enabled) -{ - struct USBEndpoint *uep = usb_ep_get(dev, pid, ep); - uep->pipeline = enabled; -} - void usb_ep_set_halted(USBDevice *dev, int pid, int ep, bool halted) { struct USBEndpoint *uep = usb_ep_get(dev, pid, ep); diff --git a/hw/usb/dev-audio.c b/hw/usb/dev-audio.c index 67deffebcf..f092bb8496 100644 --- a/hw/usb/dev-audio.c +++ b/hw/usb/dev-audio.c @@ -361,6 +361,9 @@ typedef struct USBAudioState { uint32_t buffer; } USBAudioState; +#define TYPE_USB_AUDIO "usb-audio" +#define USB_AUDIO(obj) OBJECT_CHECK(USBAudioState, (obj), TYPE_USB_AUDIO) + static void output_callback(void *opaque, int avail) { USBAudioState *s = opaque; @@ -506,7 +509,7 @@ static void usb_audio_handle_control(USBDevice *dev, USBPacket *p, int request, int value, int index, int length, uint8_t *data) { - USBAudioState *s = DO_UPCAST(USBAudioState, dev, dev); + USBAudioState *s = USB_AUDIO(dev); int ret = 0; if (s->debug) { @@ -565,7 +568,7 @@ fail: static void usb_audio_set_interface(USBDevice *dev, int iface, int old, int value) { - USBAudioState *s = DO_UPCAST(USBAudioState, dev, dev); + USBAudioState *s = USB_AUDIO(dev); if (iface == 1) { usb_audio_set_output_altset(s, value); @@ -574,7 +577,7 @@ static void usb_audio_set_interface(USBDevice *dev, int iface, static void usb_audio_handle_reset(USBDevice *dev) { - USBAudioState *s = DO_UPCAST(USBAudioState, dev, dev); + USBAudioState *s = USB_AUDIO(dev); if (s->debug) { fprintf(stderr, "usb-audio: reset\n"); @@ -615,7 +618,7 @@ static void usb_audio_handle_data(USBDevice *dev, USBPacket *p) static void usb_audio_handle_destroy(USBDevice *dev) { - USBAudioState *s = DO_UPCAST(USBAudioState, dev, dev); + USBAudioState *s = USB_AUDIO(dev); if (s->debug) { fprintf(stderr, "usb-audio: destroy\n"); @@ -630,12 +633,12 @@ static void usb_audio_handle_destroy(USBDevice *dev) static void usb_audio_realize(USBDevice *dev, Error **errp) { - USBAudioState *s = DO_UPCAST(USBAudioState, dev, dev); + USBAudioState *s = USB_AUDIO(dev); usb_desc_create_serial(dev); usb_desc_init(dev); s->dev.opaque = s; - AUD_register_card("usb-audio", &s->card); + AUD_register_card(TYPE_USB_AUDIO, &s->card); s->out.altset = ALTSET_OFF; s->out.mute = false; @@ -647,14 +650,14 @@ static void usb_audio_realize(USBDevice *dev, Error **errp) s->out.as.endianness = 0; streambuf_init(&s->out.buf, s->buffer); - s->out.voice = AUD_open_out(&s->card, s->out.voice, "usb-audio", + s->out.voice = AUD_open_out(&s->card, s->out.voice, TYPE_USB_AUDIO, s, output_callback, &s->out.as); AUD_set_volume_out(s->out.voice, s->out.mute, s->out.vol[0], s->out.vol[1]); AUD_set_active_out(s->out.voice, 0); } static const VMStateDescription vmstate_usb_audio = { - .name = "usb-audio", + .name = TYPE_USB_AUDIO, .unmigratable = 1, }; @@ -684,7 +687,7 @@ static void usb_audio_class_init(ObjectClass *klass, void *data) } static const TypeInfo usb_audio_info = { - .name = "usb-audio", + .name = TYPE_USB_AUDIO, .parent = TYPE_USB_DEVICE, .instance_size = sizeof(USBAudioState), .class_init = usb_audio_class_init, @@ -693,7 +696,7 @@ static const TypeInfo usb_audio_info = { static void usb_audio_register_types(void) { type_register_static(&usb_audio_info); - usb_legacy_register("usb-audio", "audio", NULL); + usb_legacy_register(TYPE_USB_AUDIO, "audio", NULL); } type_init(usb_audio_register_types) diff --git a/hw/usb/dev-bluetooth.c b/hw/usb/dev-bluetooth.c index 9bf673057a..b19ec76b00 100644 --- a/hw/usb/dev-bluetooth.c +++ b/hw/usb/dev-bluetooth.c @@ -49,6 +49,9 @@ struct USBBtState { } outcmd, outacl, outsco; }; +#define TYPE_USB_BT "usb-bt-dongle" +#define USB_BT(obj) OBJECT_CHECK(struct USBBtState, (obj), TYPE_USB_BT) + #define USB_EVT_EP 1 #define USB_ACL_EP 2 #define USB_SCO_EP 3 @@ -503,7 +506,7 @@ static void usb_bt_handle_destroy(USBDevice *dev) static void usb_bt_realize(USBDevice *dev, Error **errp) { - struct USBBtState *s = DO_UPCAST(struct USBBtState, dev, dev); + struct USBBtState *s = USB_BT(dev); usb_desc_create_serial(dev); usb_desc_init(dev); @@ -523,7 +526,7 @@ static USBDevice *usb_bt_init(USBBus *bus, const char *cmdline) USBDevice *dev; struct USBBtState *s; HCIInfo *hci; - const char *name = "usb-bt-dongle"; + const char *name = TYPE_USB_BT; if (*cmdline) { hci = hci_init(cmdline); @@ -534,7 +537,7 @@ static USBDevice *usb_bt_init(USBBus *bus, const char *cmdline) return NULL; dev = usb_create(bus, name); - s = DO_UPCAST(struct USBBtState, dev, dev); + s = USB_BT(dev); s->hci = hci; return dev; } @@ -561,7 +564,7 @@ static void usb_bt_class_initfn(ObjectClass *klass, void *data) } static const TypeInfo bt_info = { - .name = "usb-bt-dongle", + .name = TYPE_USB_BT, .parent = TYPE_USB_DEVICE, .instance_size = sizeof(struct USBBtState), .class_init = usb_bt_class_initfn, @@ -570,7 +573,7 @@ static const TypeInfo bt_info = { static void usb_bt_register_types(void) { type_register_static(&bt_info); - usb_legacy_register("usb-bt-dongle", "bt", usb_bt_init); + usb_legacy_register(TYPE_USB_BT, "bt", usb_bt_init); } type_init(usb_bt_register_types) diff --git a/hw/usb/dev-hid.c b/hw/usb/dev-hid.c index 507c9663c5..2e7dcd96cb 100644 --- a/hw/usb/dev-hid.c +++ b/hw/usb/dev-hid.c @@ -51,6 +51,9 @@ typedef struct USBHIDState { uint32_t head; } USBHIDState; +#define TYPE_USB_HID "usb-hid" +#define USB_HID(obj) OBJECT_CHECK(USBHIDState, (obj), TYPE_USB_HID) + enum { STR_MANUFACTURER = 1, STR_PRODUCT_MOUSE, @@ -564,7 +567,7 @@ static void usb_hid_changed(HIDState *hs) static void usb_hid_handle_reset(USBDevice *dev) { - USBHIDState *us = DO_UPCAST(USBHIDState, dev, dev); + USBHIDState *us = USB_HID(dev); hid_reset(&us->hid); } @@ -572,7 +575,7 @@ static void usb_hid_handle_reset(USBDevice *dev) static void usb_hid_handle_control(USBDevice *dev, USBPacket *p, int request, int value, int index, int length, uint8_t *data) { - USBHIDState *us = DO_UPCAST(USBHIDState, dev, dev); + USBHIDState *us = USB_HID(dev); HIDState *hs = &us->hid; int ret; @@ -651,7 +654,7 @@ static void usb_hid_handle_control(USBDevice *dev, USBPacket *p, static void usb_hid_handle_data(USBDevice *dev, USBPacket *p) { - USBHIDState *us = DO_UPCAST(USBHIDState, dev, dev); + USBHIDState *us = USB_HID(dev); HIDState *hs = &us->hid; uint8_t buf[p->iov.size]; int len = 0; @@ -687,7 +690,7 @@ static void usb_hid_handle_data(USBDevice *dev, USBPacket *p) static void usb_hid_handle_destroy(USBDevice *dev) { - USBHIDState *us = DO_UPCAST(USBHIDState, dev, dev); + USBHIDState *us = USB_HID(dev); hid_free(&us->hid); } @@ -696,7 +699,7 @@ static void usb_hid_initfn(USBDevice *dev, int kind, const USBDesc *usb1, const USBDesc *usb2, Error **errp) { - USBHIDState *us = DO_UPCAST(USBHIDState, dev, dev); + USBHIDState *us = USB_HID(dev); switch (us->usb_version) { case 1: dev->usb_desc = usb1; @@ -784,6 +787,14 @@ static void usb_hid_class_initfn(ObjectClass *klass, void *data) uc->handle_attach = usb_desc_attach; } +static const TypeInfo usb_hid_type_info = { + .name = TYPE_USB_HID, + .parent = TYPE_USB_DEVICE, + .instance_size = sizeof(USBHIDState), + .abstract = true, + .class_init = usb_hid_class_initfn, +}; + static Property usb_tablet_properties[] = { DEFINE_PROP_UINT32("usb_version", USBHIDState, usb_version, 2), DEFINE_PROP_STRING("display", USBHIDState, display), @@ -796,7 +807,6 @@ static void usb_tablet_class_initfn(ObjectClass *klass, void *data) DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); - usb_hid_class_initfn(klass, data); uc->realize = usb_tablet_realize; uc->product_desc = "QEMU USB Tablet"; dc->vmsd = &vmstate_usb_ptr; @@ -806,8 +816,7 @@ static void usb_tablet_class_initfn(ObjectClass *klass, void *data) static const TypeInfo usb_tablet_info = { .name = "usb-tablet", - .parent = TYPE_USB_DEVICE, - .instance_size = sizeof(USBHIDState), + .parent = TYPE_USB_HID, .class_init = usb_tablet_class_initfn, }; @@ -821,7 +830,6 @@ static void usb_mouse_class_initfn(ObjectClass *klass, void *data) DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); - usb_hid_class_initfn(klass, data); uc->realize = usb_mouse_realize; uc->product_desc = "QEMU USB Mouse"; dc->vmsd = &vmstate_usb_ptr; @@ -831,8 +839,7 @@ static void usb_mouse_class_initfn(ObjectClass *klass, void *data) static const TypeInfo usb_mouse_info = { .name = "usb-mouse", - .parent = TYPE_USB_DEVICE, - .instance_size = sizeof(USBHIDState), + .parent = TYPE_USB_HID, .class_init = usb_mouse_class_initfn, }; @@ -847,7 +854,6 @@ static void usb_keyboard_class_initfn(ObjectClass *klass, void *data) DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); - usb_hid_class_initfn(klass, data); uc->realize = usb_keyboard_realize; uc->product_desc = "QEMU USB Keyboard"; dc->vmsd = &vmstate_usb_kbd; @@ -857,13 +863,13 @@ static void usb_keyboard_class_initfn(ObjectClass *klass, void *data) static const TypeInfo usb_keyboard_info = { .name = "usb-kbd", - .parent = TYPE_USB_DEVICE, - .instance_size = sizeof(USBHIDState), + .parent = TYPE_USB_HID, .class_init = usb_keyboard_class_initfn, }; static void usb_hid_register_types(void) { + type_register_static(&usb_hid_type_info); type_register_static(&usb_tablet_info); usb_legacy_register("usb-tablet", "tablet", NULL); type_register_static(&usb_mouse_info); diff --git a/hw/usb/dev-hub.c b/hw/usb/dev-hub.c index 0482f58719..c8c6855505 100644 --- a/hw/usb/dev-hub.c +++ b/hw/usb/dev-hub.c @@ -41,6 +41,9 @@ typedef struct USBHubState { USBHubPort ports[NUM_PORTS]; } USBHubState; +#define TYPE_USB_HUB "usb-hub" +#define USB_HUB(obj) OBJECT_CHECK(USBHubState, (obj), TYPE_USB_HUB) + #define ClearHubFeature (0x2000 | USB_REQ_CLEAR_FEATURE) #define ClearPortFeature (0x2300 | USB_REQ_CLEAR_FEATURE) #define GetHubDescriptor (0xa000 | USB_REQ_GET_DESCRIPTOR) @@ -227,7 +230,7 @@ static void usb_hub_complete(USBPort *port, USBPacket *packet) static USBDevice *usb_hub_find_device(USBDevice *dev, uint8_t addr) { - USBHubState *s = DO_UPCAST(USBHubState, dev, dev); + USBHubState *s = USB_HUB(dev); USBHubPort *port; USBDevice *downstream; int i; @@ -247,7 +250,7 @@ static USBDevice *usb_hub_find_device(USBDevice *dev, uint8_t addr) static void usb_hub_handle_reset(USBDevice *dev) { - USBHubState *s = DO_UPCAST(USBHubState, dev, dev); + USBHubState *s = USB_HUB(dev); USBHubPort *port; int i; @@ -513,7 +516,7 @@ static USBPortOps usb_hub_port_ops = { static void usb_hub_realize(USBDevice *dev, Error **errp) { - USBHubState *s = DO_UPCAST(USBHubState, dev, dev); + USBHubState *s = USB_HUB(dev); USBHubPort *port; int i; @@ -577,7 +580,7 @@ static void usb_hub_class_initfn(ObjectClass *klass, void *data) } static const TypeInfo hub_info = { - .name = "usb-hub", + .name = TYPE_USB_HUB, .parent = TYPE_USB_DEVICE, .instance_size = sizeof(USBHubState), .class_init = usb_hub_class_initfn, diff --git a/hw/usb/dev-mtp.c b/hw/usb/dev-mtp.c index 108ece8190..809b1cb118 100644 --- a/hw/usb/dev-mtp.c +++ b/hw/usb/dev-mtp.c @@ -130,6 +130,9 @@ struct MTPState { QTAILQ_HEAD(, MTPObject) objects; }; +#define TYPE_USB_MTP "usb-mtp" +#define USB_MTP(obj) OBJECT_CHECK(MTPState, (obj), TYPE_USB_MTP) + #define QEMU_STORAGE_ID 0x00010001 #define MTP_FLAG_WRITABLE 0 @@ -878,7 +881,7 @@ static void usb_mtp_command(MTPState *s, MTPControl *c) static void usb_mtp_handle_reset(USBDevice *dev) { - MTPState *s = DO_UPCAST(MTPState, dev, dev); + MTPState *s = USB_MTP(dev); trace_usb_mtp_reset(s->dev.addr); @@ -914,7 +917,7 @@ static void usb_mtp_cancel_packet(USBDevice *dev, USBPacket *p) static void usb_mtp_handle_data(USBDevice *dev, USBPacket *p) { - MTPState *s = DO_UPCAST(MTPState, dev, dev); + MTPState *s = USB_MTP(dev); MTPControl cmd; mtp_container container; uint32_t params[5]; @@ -1062,12 +1065,16 @@ static void usb_mtp_handle_data(USBDevice *dev, USBPacket *p) static void usb_mtp_realize(USBDevice *dev, Error **errp) { - MTPState *s = DO_UPCAST(MTPState, dev, dev); + MTPState *s = USB_MTP(dev); usb_desc_create_serial(dev); usb_desc_init(dev); QTAILQ_INIT(&s->objects); if (s->desc == NULL) { + if (s->root == NULL) { + error_setg(errp, "usb-mtp: x-root property must be configured"); + return; + } s->desc = strrchr(s->root, '/'); if (s->desc && s->desc[0]) { s->desc = g_strdup(s->desc + 1); @@ -1113,7 +1120,7 @@ static void usb_mtp_class_initfn(ObjectClass *klass, void *data) } static TypeInfo mtp_info = { - .name = "usb-mtp", + .name = TYPE_USB_MTP, .parent = TYPE_USB_DEVICE, .instance_size = sizeof(MTPState), .class_init = usb_mtp_class_initfn, diff --git a/hw/usb/dev-network.c b/hw/usb/dev-network.c index 18669917f5..743c231d6b 100644 --- a/hw/usb/dev-network.c +++ b/hw/usb/dev-network.c @@ -648,6 +648,9 @@ typedef struct USBNetState { QTAILQ_HEAD(rndis_resp_head, rndis_response) rndis_resp; } USBNetState; +#define TYPE_USB_NET "usb-net" +#define USB_NET(obj) OBJECT_CHECK(USBNetState, (obj), TYPE_USB_NET) + static int is_rndis(USBNetState *s) { return s->dev.config->bConfigurationValue == DEV_RNDIS_CONFIG_VALUE; @@ -1310,6 +1313,10 @@ static int usbnet_can_receive(NetClientState *nc) { USBNetState *s = qemu_get_nic_opaque(nc); + if (!s->dev.config) { + return 0; + } + if (is_rndis(s) && s->rndis_state != RNDIS_DATA_INITIALIZED) { return 1; } @@ -1343,7 +1350,7 @@ static NetClientInfo net_usbnet_info = { static void usb_net_realize(USBDevice *dev, Error **errrp) { - USBNetState *s = DO_UPCAST(USBNetState, dev, dev); + USBNetState *s = USB_NET(dev); usb_desc_create_serial(dev); usb_desc_init(dev); @@ -1376,7 +1383,7 @@ static void usb_net_realize(USBDevice *dev, Error **errrp) static void usb_net_instance_init(Object *obj) { USBDevice *dev = USB_DEVICE(obj); - USBNetState *s = DO_UPCAST(USBNetState, dev, dev); + USBNetState *s = USB_NET(dev); device_add_bootindex_property(obj, &s->conf.bootindex, "bootindex", "/ethernet-phy@0", @@ -1437,7 +1444,7 @@ static void usb_net_class_initfn(ObjectClass *klass, void *data) } static const TypeInfo net_info = { - .name = "usb-net", + .name = TYPE_USB_NET, .parent = TYPE_USB_DEVICE, .instance_size = sizeof(USBNetState), .class_init = usb_net_class_initfn, @@ -1447,7 +1454,7 @@ static const TypeInfo net_info = { static void usb_net_register_types(void) { type_register_static(&net_info); - usb_legacy_register("usb-net", "net", usb_net_init); + usb_legacy_register(TYPE_USB_NET, "net", usb_net_init); } type_init(usb_net_register_types) diff --git a/hw/usb/dev-serial.c b/hw/usb/dev-serial.c index 67c2072ce7..6ca3da9727 100644 --- a/hw/usb/dev-serial.c +++ b/hw/usb/dev-serial.c @@ -103,6 +103,9 @@ typedef struct { CharDriverState *cs; } USBSerialState; +#define TYPE_USB_SERIAL "usb-serial-dev" +#define USB_SERIAL_DEV(obj) OBJECT_CHECK(USBSerialState, (obj), TYPE_USB_SERIAL) + enum { STR_MANUFACTURER = 1, STR_PRODUCT_SERIAL, @@ -473,7 +476,7 @@ static void usb_serial_event(void *opaque, int event) static void usb_serial_realize(USBDevice *dev, Error **errp) { - USBSerialState *s = DO_UPCAST(USBSerialState, dev, dev); + USBSerialState *s = USB_SERIAL_DEV(dev); Error *local_err = NULL; usb_desc_create_serial(dev); @@ -576,26 +579,40 @@ static Property serial_properties[] = { DEFINE_PROP_END_OF_LIST(), }; -static void usb_serial_class_initfn(ObjectClass *klass, void *data) +static void usb_serial_dev_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); - uc->realize = usb_serial_realize; - uc->product_desc = "QEMU USB Serial"; - uc->usb_desc = &desc_serial; + uc->realize = usb_serial_realize; uc->handle_reset = usb_serial_handle_reset; uc->handle_control = usb_serial_handle_control; uc->handle_data = usb_serial_handle_data; dc->vmsd = &vmstate_usb_serial; - dc->props = serial_properties; set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } +static const TypeInfo usb_serial_dev_type_info = { + .name = TYPE_USB_SERIAL, + .parent = TYPE_USB_DEVICE, + .instance_size = sizeof(USBSerialState), + .abstract = true, + .class_init = usb_serial_dev_class_init, +}; + +static void usb_serial_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + + uc->product_desc = "QEMU USB Serial"; + uc->usb_desc = &desc_serial; + dc->props = serial_properties; +} + static const TypeInfo serial_info = { .name = "usb-serial", - .parent = TYPE_USB_DEVICE, - .instance_size = sizeof(USBSerialState), + .parent = TYPE_USB_SERIAL, .class_init = usb_serial_class_initfn, }; @@ -609,26 +626,20 @@ static void usb_braille_class_initfn(ObjectClass *klass, void *data) DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); - uc->realize = usb_serial_realize; uc->product_desc = "QEMU USB Braille"; uc->usb_desc = &desc_braille; - uc->handle_reset = usb_serial_handle_reset; - uc->handle_control = usb_serial_handle_control; - uc->handle_data = usb_serial_handle_data; - dc->vmsd = &vmstate_usb_serial; dc->props = braille_properties; - set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } static const TypeInfo braille_info = { .name = "usb-braille", - .parent = TYPE_USB_DEVICE, - .instance_size = sizeof(USBSerialState), + .parent = TYPE_USB_SERIAL, .class_init = usb_braille_class_initfn, }; static void usb_serial_register_types(void) { + type_register_static(&usb_serial_dev_type_info); type_register_static(&serial_info); usb_legacy_register("usb-serial", "serial", usb_serial_init); type_register_static(&braille_info); diff --git a/hw/usb/dev-smartcard-reader.c b/hw/usb/dev-smartcard-reader.c index 78ce681671..2d29367ae7 100644 --- a/hw/usb/dev-smartcard-reader.c +++ b/hw/usb/dev-smartcard-reader.c @@ -55,7 +55,7 @@ do { \ #define D_VERBOSE 4 #define CCID_DEV_NAME "usb-ccid" - +#define USB_CCID_DEV(obj) OBJECT_CHECK(USBCCIDState, (obj), CCID_DEV_NAME) /* * The two options for variable sized buffers: * make them constant size, for large enough constant, @@ -649,7 +649,7 @@ static void ccid_detach(USBCCIDState *s) static void ccid_handle_reset(USBDevice *dev) { - USBCCIDState *s = DO_UPCAST(USBCCIDState, dev, dev); + USBCCIDState *s = USB_CCID_DEV(dev); DPRINTF(s, 1, "Reset\n"); @@ -692,7 +692,7 @@ static const char *ccid_control_to_str(USBCCIDState *s, int request) static void ccid_handle_control(USBDevice *dev, USBPacket *p, int request, int value, int index, int length, uint8_t *data) { - USBCCIDState *s = DO_UPCAST(USBCCIDState, dev, dev); + USBCCIDState *s = USB_CCID_DEV(dev); int ret; DPRINTF(s, 1, "%s: got control %s (%x), value %x\n", __func__, @@ -1104,7 +1104,7 @@ static void ccid_bulk_in_copy_to_guest(USBCCIDState *s, USBPacket *p) static void ccid_handle_data(USBDevice *dev, USBPacket *p) { - USBCCIDState *s = DO_UPCAST(USBCCIDState, dev, dev); + USBCCIDState *s = USB_CCID_DEV(dev); uint8_t buf[2]; switch (p->pid) { @@ -1148,7 +1148,7 @@ static void ccid_handle_data(USBDevice *dev, USBPacket *p) static void ccid_handle_destroy(USBDevice *dev) { - USBCCIDState *s = DO_UPCAST(USBCCIDState, dev, dev); + USBCCIDState *s = USB_CCID_DEV(dev); ccid_bulk_in_clear(s); } @@ -1184,8 +1184,9 @@ static const TypeInfo ccid_bus_info = { void ccid_card_send_apdu_to_guest(CCIDCardState *card, uint8_t *apdu, uint32_t len) { - USBCCIDState *s = DO_UPCAST(USBCCIDState, dev.qdev, - card->qdev.parent_bus->parent); + DeviceState *qdev = DEVICE(card); + USBDevice *dev = USB_DEVICE(qdev); + USBCCIDState *s = USB_CCID_DEV(dev); Answer *answer; if (!ccid_has_pending_answers(s)) { @@ -1206,8 +1207,9 @@ void ccid_card_send_apdu_to_guest(CCIDCardState *card, void ccid_card_card_removed(CCIDCardState *card) { - USBCCIDState *s = - DO_UPCAST(USBCCIDState, dev.qdev, card->qdev.parent_bus->parent); + DeviceState *qdev = DEVICE(card); + USBDevice *dev = USB_DEVICE(qdev); + USBCCIDState *s = USB_CCID_DEV(dev); ccid_on_slot_change(s, false); ccid_flush_pending_answers(s); @@ -1216,8 +1218,9 @@ void ccid_card_card_removed(CCIDCardState *card) int ccid_card_ccid_attach(CCIDCardState *card) { - USBCCIDState *s = - DO_UPCAST(USBCCIDState, dev.qdev, card->qdev.parent_bus->parent); + DeviceState *qdev = DEVICE(card); + USBDevice *dev = USB_DEVICE(qdev); + USBCCIDState *s = USB_CCID_DEV(dev); DPRINTF(s, 1, "CCID Attach\n"); if (s->migration_state == MIGRATION_MIGRATED) { @@ -1228,8 +1231,9 @@ int ccid_card_ccid_attach(CCIDCardState *card) void ccid_card_ccid_detach(CCIDCardState *card) { - USBCCIDState *s = - DO_UPCAST(USBCCIDState, dev.qdev, card->qdev.parent_bus->parent); + DeviceState *qdev = DEVICE(card); + USBDevice *dev = USB_DEVICE(qdev); + USBCCIDState *s = USB_CCID_DEV(dev); DPRINTF(s, 1, "CCID Detach\n"); if (ccid_card_inserted(s)) { @@ -1240,8 +1244,9 @@ void ccid_card_ccid_detach(CCIDCardState *card) void ccid_card_card_error(CCIDCardState *card, uint64_t error) { - USBCCIDState *s = - DO_UPCAST(USBCCIDState, dev.qdev, card->qdev.parent_bus->parent); + DeviceState *qdev = DEVICE(card); + USBDevice *dev = USB_DEVICE(qdev); + USBCCIDState *s = USB_CCID_DEV(dev); s->bmCommandStatus = COMMAND_STATUS_FAILED; s->last_answer_error = error; @@ -1258,8 +1263,9 @@ void ccid_card_card_error(CCIDCardState *card, uint64_t error) void ccid_card_card_inserted(CCIDCardState *card) { - USBCCIDState *s = - DO_UPCAST(USBCCIDState, dev.qdev, card->qdev.parent_bus->parent); + DeviceState *qdev = DEVICE(card); + USBDevice *dev = USB_DEVICE(qdev); + USBCCIDState *s = USB_CCID_DEV(dev); s->bmCommandStatus = COMMAND_STATUS_NO_ERROR; ccid_flush_pending_answers(s); @@ -1270,8 +1276,8 @@ static int ccid_card_exit(DeviceState *qdev) { int ret = 0; CCIDCardState *card = CCID_CARD(qdev); - USBCCIDState *s = - DO_UPCAST(USBCCIDState, dev.qdev, card->qdev.parent_bus->parent); + USBDevice *dev = USB_DEVICE(qdev); + USBCCIDState *s = USB_CCID_DEV(dev); if (ccid_card_inserted(s)) { ccid_card_card_removed(card); @@ -1284,8 +1290,8 @@ static int ccid_card_exit(DeviceState *qdev) static int ccid_card_init(DeviceState *qdev) { CCIDCardState *card = CCID_CARD(qdev); - USBCCIDState *s = - DO_UPCAST(USBCCIDState, dev.qdev, card->qdev.parent_bus->parent); + USBDevice *dev = USB_DEVICE(qdev); + USBCCIDState *s = USB_CCID_DEV(dev); int ret = 0; if (card->slot != 0) { @@ -1306,7 +1312,7 @@ static int ccid_card_init(DeviceState *qdev) static void ccid_realize(USBDevice *dev, Error **errp) { - USBCCIDState *s = DO_UPCAST(USBCCIDState, dev, dev); + USBCCIDState *s = USB_CCID_DEV(dev); usb_desc_create_serial(dev); usb_desc_init(dev); diff --git a/hw/usb/dev-storage.c b/hw/usb/dev-storage.c index ae8d40dc77..abe0e1d6a1 100644 --- a/hw/usb/dev-storage.c +++ b/hw/usb/dev-storage.c @@ -64,6 +64,9 @@ typedef struct { SCSIDevice *scsi_dev; } MSDState; +#define TYPE_USB_STORAGE "usb-storage-dev" +#define USB_STORAGE_DEV(obj) OBJECT_CHECK(MSDState, (obj), TYPE_USB_STORAGE) + struct usb_msd_cbw { uint32_t sig; uint32_t tag; @@ -385,7 +388,7 @@ static void usb_msd_handle_control(USBDevice *dev, USBPacket *p, static void usb_msd_cancel_io(USBDevice *dev, USBPacket *p) { - MSDState *s = DO_UPCAST(MSDState, dev, dev); + MSDState *s = USB_STORAGE_DEV(dev); assert(s->packet == p); s->packet = NULL; @@ -599,7 +602,7 @@ static const struct SCSIBusInfo usb_msd_scsi_info_bot = { static void usb_msd_realize_storage(USBDevice *dev, Error **errp) { - MSDState *s = DO_UPCAST(MSDState, dev, dev); + MSDState *s = USB_STORAGE_DEV(dev); BlockBackend *blk = s->conf.blk; SCSIDevice *scsi_dev; Error *err = NULL; @@ -658,7 +661,7 @@ static void usb_msd_realize_storage(USBDevice *dev, Error **errp) static void usb_msd_realize_bot(USBDevice *dev, Error **errp) { - MSDState *s = DO_UPCAST(MSDState, dev, dev); + MSDState *s = USB_STORAGE_DEV(dev); usb_desc_create_serial(dev); usb_desc_init(dev); @@ -748,7 +751,7 @@ static Property msd_properties[] = { DEFINE_PROP_END_OF_LIST(), }; -static void usb_msd_class_initfn_common(ObjectClass *klass) +static void usb_msd_class_initfn_common(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); @@ -772,14 +775,13 @@ static void usb_msd_class_initfn_storage(ObjectClass *klass, void *data) uc->realize = usb_msd_realize_storage; dc->props = msd_properties; - usb_msd_class_initfn_common(klass); } static void usb_msd_get_bootindex(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { USBDevice *dev = USB_DEVICE(obj); - MSDState *s = DO_UPCAST(MSDState, dev, dev); + MSDState *s = USB_STORAGE_DEV(dev); visit_type_int32(v, &s->conf.bootindex, name, errp); } @@ -788,7 +790,7 @@ static void usb_msd_set_bootindex(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { USBDevice *dev = USB_DEVICE(obj); - MSDState *s = DO_UPCAST(MSDState, dev, dev); + MSDState *s = USB_STORAGE_DEV(dev); int32_t boot_index; Error *local_err = NULL; @@ -815,6 +817,14 @@ out: } } +static const TypeInfo usb_storage_dev_type_info = { + .name = TYPE_USB_STORAGE, + .parent = TYPE_USB_DEVICE, + .instance_size = sizeof(MSDState), + .abstract = true, + .class_init = usb_msd_class_initfn_common, +}; + static void usb_msd_instance_init(Object *obj) { object_property_add(obj, "bootindex", "int32", @@ -829,27 +839,25 @@ static void usb_msd_class_initfn_bot(ObjectClass *klass, void *data) DeviceClass *dc = DEVICE_CLASS(klass); uc->realize = usb_msd_realize_bot; - usb_msd_class_initfn_common(klass); dc->hotpluggable = false; } static const TypeInfo msd_info = { .name = "usb-storage", - .parent = TYPE_USB_DEVICE, - .instance_size = sizeof(MSDState), + .parent = TYPE_USB_STORAGE, .class_init = usb_msd_class_initfn_storage, .instance_init = usb_msd_instance_init, }; static const TypeInfo bot_info = { .name = "usb-bot", - .parent = TYPE_USB_DEVICE, - .instance_size = sizeof(MSDState), + .parent = TYPE_USB_STORAGE, .class_init = usb_msd_class_initfn_bot, }; static void usb_msd_register_types(void) { + type_register_static(&usb_storage_dev_type_info); type_register_static(&msd_info); type_register_static(&bot_info); usb_legacy_register("usb-storage", "disk", usb_msd_init); diff --git a/hw/usb/dev-uas.c b/hw/usb/dev-uas.c index 04fc515dbe..38b26c586d 100644 --- a/hw/usb/dev-uas.c +++ b/hw/usb/dev-uas.c @@ -127,6 +127,9 @@ struct UASDevice { USBPacket *status3[UAS_MAX_STREAMS + 1]; }; +#define TYPE_USB_UAS "usb-uas" +#define USB_UAS(obj) OBJECT_CHECK(UASDevice, (obj), TYPE_USB_UAS) + struct UASRequest { uint16_t tag; uint64_t lun; @@ -626,7 +629,7 @@ static const struct SCSIBusInfo usb_uas_scsi_info = { static void usb_uas_handle_reset(USBDevice *dev) { - UASDevice *uas = DO_UPCAST(UASDevice, dev, dev); + UASDevice *uas = USB_UAS(dev); UASRequest *req, *nreq; UASStatus *st, *nst; @@ -655,7 +658,7 @@ static void usb_uas_handle_control(USBDevice *dev, USBPacket *p, static void usb_uas_cancel_io(USBDevice *dev, USBPacket *p) { - UASDevice *uas = DO_UPCAST(UASDevice, dev, dev); + UASDevice *uas = USB_UAS(dev); UASRequest *req, *nreq; int i; @@ -797,7 +800,7 @@ incorrect_lun: static void usb_uas_handle_data(USBDevice *dev, USBPacket *p) { - UASDevice *uas = DO_UPCAST(UASDevice, dev, dev); + UASDevice *uas = USB_UAS(dev); uas_iu iu; UASStatus *st; UASRequest *req; @@ -888,14 +891,14 @@ static void usb_uas_handle_data(USBDevice *dev, USBPacket *p) static void usb_uas_handle_destroy(USBDevice *dev) { - UASDevice *uas = DO_UPCAST(UASDevice, dev, dev); + UASDevice *uas = USB_UAS(dev); qemu_bh_delete(uas->status_bh); } static void usb_uas_realize(USBDevice *dev, Error **errp) { - UASDevice *uas = DO_UPCAST(UASDevice, dev, dev); + UASDevice *uas = USB_UAS(dev); usb_desc_create_serial(dev); usb_desc_init(dev); @@ -943,7 +946,7 @@ static void usb_uas_class_initfn(ObjectClass *klass, void *data) } static const TypeInfo uas_info = { - .name = "usb-uas", + .name = TYPE_USB_UAS, .parent = TYPE_USB_DEVICE, .instance_size = sizeof(UASDevice), .class_init = usb_uas_class_initfn, diff --git a/hw/usb/dev-wacom.c b/hw/usb/dev-wacom.c index 844eafadf7..c2450e7297 100644 --- a/hw/usb/dev-wacom.c +++ b/hw/usb/dev-wacom.c @@ -56,6 +56,9 @@ typedef struct USBWacomState { int changed; } USBWacomState; +#define TYPE_USB_WACOM "usb-wacom-tablet" +#define USB_WACOM(obj) OBJECT_CHECK(USBWacomState, (obj), TYPE_USB_WACOM) + enum { STR_MANUFACTURER = 1, STR_PRODUCT, @@ -337,7 +340,7 @@ static void usb_wacom_handle_destroy(USBDevice *dev) static void usb_wacom_realize(USBDevice *dev, Error **errp) { - USBWacomState *s = DO_UPCAST(USBWacomState, dev, dev); + USBWacomState *s = USB_WACOM(dev); usb_desc_create_serial(dev); usb_desc_init(dev); s->intr = usb_ep_get(dev, USB_TOKEN_IN, 1); @@ -367,7 +370,7 @@ static void usb_wacom_class_init(ObjectClass *klass, void *data) } static const TypeInfo wacom_info = { - .name = "usb-wacom-tablet", + .name = TYPE_USB_WACOM, .parent = TYPE_USB_DEVICE, .instance_size = sizeof(USBWacomState), .class_init = usb_wacom_class_init, @@ -376,7 +379,7 @@ static const TypeInfo wacom_info = { static void usb_wacom_register_types(void) { type_register_static(&wacom_info); - usb_legacy_register("usb-wacom-tablet", "wacom-tablet", NULL); + usb_legacy_register(TYPE_USB_WACOM, "wacom-tablet", NULL); } type_init(usb_wacom_register_types) diff --git a/hw/usb/hcd-uhci.c b/hw/usb/hcd-uhci.c index 327f26da70..3f0ed62689 100644 --- a/hw/usb/hcd-uhci.c +++ b/hw/usb/hcd-uhci.c @@ -154,6 +154,9 @@ static void uhci_async_cancel(UHCIAsync *async); static void uhci_queue_fill(UHCIQueue *q, UHCI_TD *td); static void uhci_resume(void *opaque); +#define TYPE_UHCI "pci-uhci-usb" +#define UHCI(obj) OBJECT_CHECK(UHCIState, (obj), TYPE_UHCI) + static inline int32_t uhci_queue_token(UHCI_TD *td) { if ((td->token & (0xf << 15)) == 0) { @@ -351,7 +354,7 @@ static void uhci_update_irq(UHCIState *s) static void uhci_reset(DeviceState *dev) { PCIDevice *d = PCI_DEVICE(dev); - UHCIState *s = DO_UPCAST(UHCIState, dev, d); + UHCIState *s = UHCI(d); uint8_t *pci_conf; int i; UHCIPort *port; @@ -363,7 +366,7 @@ static void uhci_reset(DeviceState *dev) pci_conf[0x6a] = 0x01; /* usb clock */ pci_conf[0x6b] = 0x00; s->cmd = 0; - s->status = 0; + s->status = UHCI_STS_HCHALTED; s->status2 = 0; s->intr = 0; s->fl_base_addr = 0; @@ -1196,7 +1199,7 @@ static void usb_uhci_common_realize(PCIDevice *dev, Error **errp) Error *err = NULL; PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev); UHCIPCIDeviceClass *u = container_of(pc, UHCIPCIDeviceClass, parent_class); - UHCIState *s = DO_UPCAST(UHCIState, dev, dev); + UHCIState *s = UHCI(dev); uint8_t *pci_conf = s->dev.config; int i; @@ -1241,7 +1244,7 @@ static void usb_uhci_common_realize(PCIDevice *dev, Error **errp) static void usb_uhci_vt82c686b_realize(PCIDevice *dev, Error **errp) { - UHCIState *s = DO_UPCAST(UHCIState, dev, dev); + UHCIState *s = UHCI(dev); uint8_t *pci_conf = s->dev.config; /* USB misc control 1/2 */ @@ -1256,7 +1259,7 @@ static void usb_uhci_vt82c686b_realize(PCIDevice *dev, Error **errp) static void usb_uhci_exit(PCIDevice *dev) { - UHCIState *s = DO_UPCAST(UHCIState, dev, dev); + UHCIState *s = UHCI(dev); trace_usb_uhci_exit(); @@ -1294,6 +1297,26 @@ static void uhci_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->class_id = PCI_CLASS_SERIAL_USB; + dc->vmsd = &vmstate_uhci; + dc->reset = uhci_reset; + set_bit(DEVICE_CATEGORY_USB, dc->categories); +} + +static const TypeInfo uhci_pci_type_info = { + .name = TYPE_UHCI, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(UHCIState), + .class_size = sizeof(UHCIPCIDeviceClass), + .abstract = true, + .class_init = uhci_class_init, +}; + +static void uhci_data_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); UHCIPCIDeviceClass *u = container_of(k, UHCIPCIDeviceClass, parent_class); UHCIInfo *info = data; @@ -1302,9 +1325,6 @@ static void uhci_class_init(ObjectClass *klass, void *data) k->vendor_id = info->vendor_id; k->device_id = info->device_id; k->revision = info->revision; - k->class_id = PCI_CLASS_SERIAL_USB; - dc->vmsd = &vmstate_uhci; - dc->reset = uhci_reset; if (!info->unplug) { /* uhci controllers in companion setups can't be hotplugged */ dc->hotpluggable = false; @@ -1312,7 +1332,6 @@ static void uhci_class_init(ObjectClass *klass, void *data) } else { dc->props = uhci_properties_standalone; } - set_bit(DEVICE_CATEGORY_USB, dc->categories); u->info = *info; } @@ -1387,13 +1406,13 @@ static UHCIInfo uhci_info[] = { static void uhci_register_types(void) { TypeInfo uhci_type_info = { - .parent = TYPE_PCI_DEVICE, - .instance_size = sizeof(UHCIState), - .class_size = sizeof(UHCIPCIDeviceClass), - .class_init = uhci_class_init, + .parent = TYPE_UHCI, + .class_init = uhci_data_class_init, }; int i; + type_register_static(&uhci_pci_type_info); + for (i = 0; i < ARRAY_SIZE(uhci_info); i++) { uhci_type_info.name = uhci_info[i].name; uhci_type_info.class_data = uhci_info + i; diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c index ba15ae0019..90a5fbff29 100644 --- a/hw/usb/hcd-xhci.c +++ b/hw/usb/hcd-xhci.c @@ -1767,18 +1767,9 @@ static void xhci_xfer_report(XHCITransfer *xfer) break; } - /* - * XHCI 1.1, 4.11.3.1 Transfer Event TRB -- "each Transfer TRB - * encountered with its IOC flag set to '1' shall generate a Transfer - * Event." - * - * Otherwise, longer transfers can have multiple data TRBs (for scatter - * gather). Short transfers and errors should be reported once per - * transfer only. - */ - if ((trb->control & TRB_TR_IOC) || - (!reported && ((shortpkt && (trb->control & TRB_TR_ISP)) || - (xfer->status != CC_SUCCESS && left == 0)))) { + if (!reported && ((trb->control & TRB_TR_IOC) || + (shortpkt && (trb->control & TRB_TR_ISP)) || + (xfer->status != CC_SUCCESS && left == 0))) { event.slotid = xfer->slotid; event.epid = xfer->epid; event.length = (trb->status & 0x1ffff) - chunk; @@ -1802,6 +1793,14 @@ static void xhci_xfer_report(XHCITransfer *xfer) return; } } + + switch (TRB_TYPE(*trb)) { + case TR_SETUP: + reported = 0; + shortpkt = 0; + break; + } + } } @@ -2204,7 +2203,6 @@ static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid, if (epid == 1) { if (xhci_fire_ctl_transfer(xhci, xfer) >= 0) { epctx->next_xfer = (epctx->next_xfer + 1) % TD_QUEUE; - ep = xfer->packet.ep; } else { DPRINTF("xhci: error firing CTL transfer\n"); } @@ -2224,6 +2222,8 @@ static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid, if (xfer->running_retry) { DPRINTF("xhci: xfer nacked, stopping schedule\n"); epctx->retry = xfer; + timer_mod(epctx->kick_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + epctx->interval * 125000); break; } } diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c index 2416de83e9..242a654583 100644 --- a/hw/usb/redirect.c +++ b/hw/usb/redirect.c @@ -130,6 +130,9 @@ struct USBRedirDevice { int compatible_speedmask; }; +#define TYPE_USB_REDIR "usb-redir" +#define USB_REDIRECT(obj) OBJECT_CHECK(USBRedirDevice, (obj), TYPE_USB_REDIR) + static void usbredir_hello(void *priv, struct usb_redir_hello_header *h); static void usbredir_device_connect(void *priv, struct usb_redir_device_connect_header *device_connect); @@ -360,7 +363,7 @@ static void packet_id_queue_empty(struct PacketIdQueue *q) static void usbredir_cancel_packet(USBDevice *udev, USBPacket *p) { - USBRedirDevice *dev = DO_UPCAST(USBRedirDevice, dev, udev); + USBRedirDevice *dev = USB_REDIRECT(udev); int i = USBEP2I(p->ep); if (p->combined) { @@ -500,7 +503,7 @@ static void usbredir_free_bufpq(USBRedirDevice *dev, uint8_t ep) static void usbredir_handle_reset(USBDevice *udev) { - USBRedirDevice *dev = DO_UPCAST(USBRedirDevice, dev, udev); + USBRedirDevice *dev = USB_REDIRECT(udev); DPRINTF("reset device\n"); usbredirparser_send_reset(dev->parser); @@ -907,7 +910,7 @@ static void usbredir_stop_interrupt_receiving(USBRedirDevice *dev, static void usbredir_handle_data(USBDevice *udev, USBPacket *p) { - USBRedirDevice *dev = DO_UPCAST(USBRedirDevice, dev, udev); + USBRedirDevice *dev = USB_REDIRECT(udev); uint8_t ep; ep = p->ep->nr; @@ -976,7 +979,7 @@ static void usbredir_stop_ep(USBRedirDevice *dev, int i) static void usbredir_ep_stopped(USBDevice *udev, USBEndpoint *uep) { - USBRedirDevice *dev = DO_UPCAST(USBRedirDevice, dev, udev); + USBRedirDevice *dev = USB_REDIRECT(udev); usbredir_stop_ep(dev, USBEP2I(uep)); usbredirparser_do_write(dev->parser); @@ -1046,7 +1049,7 @@ static void usbredir_get_interface(USBRedirDevice *dev, USBPacket *p, static void usbredir_handle_control(USBDevice *udev, USBPacket *p, int request, int value, int index, int length, uint8_t *data) { - USBRedirDevice *dev = DO_UPCAST(USBRedirDevice, dev, udev); + USBRedirDevice *dev = USB_REDIRECT(udev); struct usb_redir_control_packet_header control_packet; if (usbredir_already_in_flight(dev, p->id)) { @@ -1101,7 +1104,7 @@ static void usbredir_handle_control(USBDevice *udev, USBPacket *p, static int usbredir_alloc_streams(USBDevice *udev, USBEndpoint **eps, int nr_eps, int streams) { - USBRedirDevice *dev = DO_UPCAST(USBRedirDevice, dev, udev); + USBRedirDevice *dev = USB_REDIRECT(udev); #if USBREDIR_VERSION >= 0x000700 struct usb_redir_alloc_bulk_streams_header alloc_streams; int i; @@ -1140,7 +1143,7 @@ static void usbredir_free_streams(USBDevice *udev, USBEndpoint **eps, int nr_eps) { #if USBREDIR_VERSION >= 0x000700 - USBRedirDevice *dev = DO_UPCAST(USBRedirDevice, dev, udev); + USBRedirDevice *dev = USB_REDIRECT(udev); struct usb_redir_free_bulk_streams_header free_streams; int i; @@ -1362,7 +1365,7 @@ static void usbredir_init_endpoints(USBRedirDevice *dev) static void usbredir_realize(USBDevice *udev, Error **errp) { - USBRedirDevice *dev = DO_UPCAST(USBRedirDevice, dev, udev); + USBRedirDevice *dev = USB_REDIRECT(udev); int i; if (dev->cs == NULL) { @@ -1415,7 +1418,7 @@ static void usbredir_cleanup_device_queues(USBRedirDevice *dev) static void usbredir_handle_destroy(USBDevice *udev) { - USBRedirDevice *dev = DO_UPCAST(USBRedirDevice, dev, udev); + USBRedirDevice *dev = USB_REDIRECT(udev); qemu_chr_delete(dev->cs); dev->cs = NULL; @@ -2496,7 +2499,7 @@ static void usbredir_class_initfn(ObjectClass *klass, void *data) static void usbredir_instance_init(Object *obj) { USBDevice *udev = USB_DEVICE(obj); - USBRedirDevice *dev = DO_UPCAST(USBRedirDevice, dev, udev); + USBRedirDevice *dev = USB_REDIRECT(udev); device_add_bootindex_property(obj, &dev->bootindex, "bootindex", NULL, @@ -2504,7 +2507,7 @@ static void usbredir_instance_init(Object *obj) } static const TypeInfo usbredir_dev_info = { - .name = "usb-redir", + .name = TYPE_USB_REDIR, .parent = TYPE_USB_DEVICE, .instance_size = sizeof(USBRedirDevice), .class_init = usbredir_class_initfn, diff --git a/hw/vfio/common.c b/hw/vfio/common.c index b01262063d..b1045da857 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -270,13 +270,14 @@ static void vfio_iommu_map_notify(Notifier *n, void *data) * this IOMMU to its immediate target. We need to translate * it the rest of the way through to memory. */ + rcu_read_lock(); mr = address_space_translate(&address_space_memory, iotlb->translated_addr, &xlat, &len, iotlb->perm & IOMMU_WO); if (!memory_region_is_ram(mr)) { error_report("iommu map to non memory area %"HWADDR_PRIx"", xlat); - return; + goto out; } /* * Translation truncates length to the IOMMU page size, @@ -284,7 +285,7 @@ static void vfio_iommu_map_notify(Notifier *n, void *data) */ if (len & iotlb->addr_mask) { error_report("iommu has granularity incompatible with target AS"); - return; + goto out; } if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { @@ -307,6 +308,8 @@ static void vfio_iommu_map_notify(Notifier *n, void *data) iotlb->addr_mask + 1, ret); } } +out: + rcu_read_unlock(); } static void vfio_listener_region_add(MemoryListener *listener, diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index 6b80539c1f..e0e339a534 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -154,6 +154,7 @@ typedef struct VFIOPCIDevice { PCIHostDeviceAddress host; EventNotifier err_notifier; EventNotifier req_notifier; + int (*resetfn)(struct VFIOPCIDevice *); uint32_t features; #define VFIO_FEATURE_ENABLE_VGA_BIT 0 #define VFIO_FEATURE_ENABLE_VGA (1 << VFIO_FEATURE_ENABLE_VGA_BIT) @@ -1531,9 +1532,12 @@ static uint64_t vfio_rtl8168_window_quirk_read(void *opaque, return 0; } - io_mem_read(&vdev->pdev.msix_table_mmio, - (hwaddr)(quirk->data.address_match & 0xfff), - &val, size); + memory_region_dispatch_read(&vdev->pdev.msix_table_mmio, + (hwaddr)(quirk->data.address_match + & 0xfff), + &val, + size, + MEMTXATTRS_UNSPECIFIED); return val; } } @@ -1561,9 +1565,12 @@ static void vfio_rtl8168_window_quirk_write(void *opaque, hwaddr addr, memory_region_name(&quirk->mem), vdev->vbasedev.name); - io_mem_write(&vdev->pdev.msix_table_mmio, - (hwaddr)(quirk->data.address_match & 0xfff), - data, size); + memory_region_dispatch_write(&vdev->pdev.msix_table_mmio, + (hwaddr)(quirk->data.address_match + & 0xfff), + data, + size, + MEMTXATTRS_UNSPECIFIED); } quirk->data.flags = 1; @@ -2394,7 +2401,7 @@ static void vfio_map_bar(VFIOPCIDevice *vdev, int nr) if (vdev->msix && vdev->msix->table_bar == nr) { uint64_t start; - start = HOST_PAGE_ALIGN(vdev->msix->table_offset + + start = HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset + (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE)); size = start < bar->region.size ? bar->region.size - start : 0; @@ -3319,6 +3326,162 @@ static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev) vdev->req_enabled = false; } +/* + * AMD Radeon PCI config reset, based on Linux: + * drivers/gpu/drm/radeon/ci_smc.c:ci_is_smc_running() + * drivers/gpu/drm/radeon/radeon_device.c:radeon_pci_config_reset + * drivers/gpu/drm/radeon/ci_smc.c:ci_reset_smc() + * drivers/gpu/drm/radeon/ci_smc.c:ci_stop_smc_clock() + * IDs: include/drm/drm_pciids.h + * Registers: http://cgit.freedesktop.org/~agd5f/linux/commit/?id=4e2aa447f6f0 + * + * Bonaire and Hawaii GPUs do not respond to a bus reset. This is a bug in the + * hardware that should be fixed on future ASICs. The symptom of this is that + * once the accerlated driver loads, Windows guests will bsod on subsequent + * attmpts to load the driver, such as after VM reset or shutdown/restart. To + * work around this, we do an AMD specific PCI config reset, followed by an SMC + * reset. The PCI config reset only works if SMC firmware is running, so we + * have a dependency on the state of the device as to whether this reset will + * be effective. There are still cases where we won't be able to kick the + * device into working, but this greatly improves the usability overall. The + * config reset magic is relatively common on AMD GPUs, but the setup and SMC + * poking is largely ASIC specific. + */ +static bool vfio_radeon_smc_is_running(VFIOPCIDevice *vdev) +{ + uint32_t clk, pc_c; + + /* + * Registers 200h and 204h are index and data registers for acessing + * indirect configuration registers within the device. + */ + vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000004, 4); + clk = vfio_region_read(&vdev->bars[5].region, 0x204, 4); + vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000370, 4); + pc_c = vfio_region_read(&vdev->bars[5].region, 0x204, 4); + + return (!(clk & 1) && (0x20100 <= pc_c)); +} + +/* + * The scope of a config reset is controlled by a mode bit in the misc register + * and a fuse, exposed as a bit in another register. The fuse is the default + * (0 = GFX, 1 = whole GPU), the misc bit is a toggle, with the forumula + * scope = !(misc ^ fuse), where the resulting scope is defined the same as + * the fuse. A truth table therefore tells us that if misc == fuse, we need + * to flip the value of the bit in the misc register. + */ +static void vfio_radeon_set_gfx_only_reset(VFIOPCIDevice *vdev) +{ + uint32_t misc, fuse; + bool a, b; + + vfio_region_write(&vdev->bars[5].region, 0x200, 0xc00c0000, 4); + fuse = vfio_region_read(&vdev->bars[5].region, 0x204, 4); + b = fuse & 64; + + vfio_region_write(&vdev->bars[5].region, 0x200, 0xc0000010, 4); + misc = vfio_region_read(&vdev->bars[5].region, 0x204, 4); + a = misc & 2; + + if (a == b) { + vfio_region_write(&vdev->bars[5].region, 0x204, misc ^ 2, 4); + vfio_region_read(&vdev->bars[5].region, 0x204, 4); /* flush */ + } +} + +static int vfio_radeon_reset(VFIOPCIDevice *vdev) +{ + PCIDevice *pdev = &vdev->pdev; + int i, ret = 0; + uint32_t data; + + /* Defer to a kernel implemented reset */ + if (vdev->vbasedev.reset_works) { + return -ENODEV; + } + + /* Enable only memory BAR access */ + vfio_pci_write_config(pdev, PCI_COMMAND, PCI_COMMAND_MEMORY, 2); + + /* Reset only works if SMC firmware is loaded and running */ + if (!vfio_radeon_smc_is_running(vdev)) { + ret = -EINVAL; + goto out; + } + + /* Make sure only the GFX function is reset */ + vfio_radeon_set_gfx_only_reset(vdev); + + /* AMD PCI config reset */ + vfio_pci_write_config(pdev, 0x7c, 0x39d5e86b, 4); + usleep(100); + + /* Read back the memory size to make sure we're out of reset */ + for (i = 0; i < 100000; i++) { + if (vfio_region_read(&vdev->bars[5].region, 0x5428, 4) != 0xffffffff) { + break; + } + usleep(1); + } + + /* Reset SMC */ + vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000000, 4); + data = vfio_region_read(&vdev->bars[5].region, 0x204, 4); + data |= 1; + vfio_region_write(&vdev->bars[5].region, 0x204, data, 4); + + /* Disable SMC clock */ + vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000004, 4); + data = vfio_region_read(&vdev->bars[5].region, 0x204, 4); + data |= 1; + vfio_region_write(&vdev->bars[5].region, 0x204, data, 4); + +out: + /* Restore PCI command register */ + vfio_pci_write_config(pdev, PCI_COMMAND, 0, 2); + + return ret; +} + +static void vfio_setup_resetfn(VFIOPCIDevice *vdev) +{ + PCIDevice *pdev = &vdev->pdev; + uint16_t vendor, device; + + vendor = pci_get_word(pdev->config + PCI_VENDOR_ID); + device = pci_get_word(pdev->config + PCI_DEVICE_ID); + + switch (vendor) { + case 0x1002: + switch (device) { + /* Bonaire */ + case 0x6649: /* Bonaire [FirePro W5100] */ + case 0x6650: + case 0x6651: + case 0x6658: /* Bonaire XTX [Radeon R7 260X] */ + case 0x665c: /* Bonaire XT [Radeon HD 7790/8770 / R9 260 OEM] */ + case 0x665d: /* Bonaire [Radeon R7 200 Series] */ + /* Hawaii */ + case 0x67A0: /* Hawaii XT GL [FirePro W9100] */ + case 0x67A1: /* Hawaii PRO GL [FirePro W8100] */ + case 0x67A2: + case 0x67A8: + case 0x67A9: + case 0x67AA: + case 0x67B0: /* Hawaii XT [Radeon R9 290X] */ + case 0x67B1: /* Hawaii PRO [Radeon R9 290] */ + case 0x67B8: + case 0x67B9: + case 0x67BA: + case 0x67BE: + vdev->resetfn = vfio_radeon_reset; + break; + } + break; + } +} + static int vfio_initfn(PCIDevice *pdev) { VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); @@ -3352,7 +3515,7 @@ static int vfio_initfn(PCIDevice *pdev) len = readlink(path, iommu_group_path, sizeof(path)); if (len <= 0 || len >= sizeof(path)) { error_report("vfio: error no iommu_group for device"); - return len < 0 ? -errno : ENAMETOOLONG; + return len < 0 ? -errno : -ENAMETOOLONG; } iommu_group_path[len] = 0; @@ -3467,6 +3630,7 @@ static int vfio_initfn(PCIDevice *pdev) vfio_register_err_notifier(vdev); vfio_register_req_notifier(vdev); + vfio_setup_resetfn(vdev); return 0; @@ -3514,6 +3678,10 @@ static void vfio_pci_reset(DeviceState *dev) vfio_pci_pre_reset(vdev); + if (vdev->resetfn && !vdev->resetfn(vdev)) { + goto post_reset; + } + if (vdev->vbasedev.reset_works && (vdev->has_flr || !vdev->has_pm_reset) && !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) { diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index aefe0bbaaf..e7ab8293d1 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -128,7 +128,7 @@ static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) r = qemu_chr_fe_read_all(chr, p, size); if (r != size) { - error_report("Failed to read msg header. Read %d instead of %d.\n", r, + error_report("Failed to read msg header. Read %d instead of %d.", r, size); goto fail; } @@ -136,7 +136,7 @@ static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) /* validate received flags */ if (msg->flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) { error_report("Failed to read msg header." - " Flags 0x%x instead of 0x%x.\n", msg->flags, + " Flags 0x%x instead of 0x%x.", msg->flags, VHOST_USER_REPLY_MASK | VHOST_USER_VERSION); goto fail; } @@ -144,7 +144,7 @@ static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) /* validate message size is sane */ if (msg->size > VHOST_USER_PAYLOAD_SIZE) { error_report("Failed to read msg header." - " Size %d exceeds the maximum %zu.\n", msg->size, + " Size %d exceeds the maximum %zu.", msg->size, VHOST_USER_PAYLOAD_SIZE); goto fail; } @@ -155,7 +155,7 @@ static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) r = qemu_chr_fe_read_all(chr, p, size); if (r != size) { error_report("Failed to read msg payload." - " Read %d instead of %d.\n", r, msg->size); + " Read %d instead of %d.", r, msg->size); goto fail; } } @@ -235,8 +235,8 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request, msg.memory.nregions = fd_num; if (!fd_num) { - error_report("Failed initializing vhost-user memory map\n" - "consider using -object memory-backend-file share=on\n"); + error_report("Failed initializing vhost-user memory map, " + "consider using -object memory-backend-file share=on"); return -1; } @@ -280,7 +280,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request, } break; default: - error_report("vhost-user trying to send unhandled ioctl\n"); + error_report("vhost-user trying to send unhandled ioctl"); return -1; break; } @@ -296,27 +296,27 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request, if (msg_request != msg.request) { error_report("Received unexpected msg type." - " Expected %d received %d\n", msg_request, msg.request); + " Expected %d received %d", msg_request, msg.request); return -1; } switch (msg_request) { case VHOST_USER_GET_FEATURES: if (msg.size != sizeof(m.u64)) { - error_report("Received bad msg size.\n"); + error_report("Received bad msg size."); return -1; } *((__u64 *) arg) = msg.u64; break; case VHOST_USER_GET_VRING_BASE: if (msg.size != sizeof(m.state)) { - error_report("Received bad msg size.\n"); + error_report("Received bad msg size."); return -1; } memcpy(arg, &msg.state, sizeof(struct vhost_vring_state)); break; default: - error_report("Received unexpected msg type.\n"); + error_report("Received unexpected msg type."); return -1; break; } diff --git a/include/block/aio.h b/include/block/aio.h index 7d1e26b33b..d2bb423de1 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -82,9 +82,6 @@ struct AioContext { /* Used for aio_notify. */ EventNotifier notifier; - /* GPollFDs for aio_poll() */ - GArray *pollfds; - /* Thread pool for performing work and receiving completion callbacks */ struct ThreadPool *thread_pool; @@ -121,13 +118,14 @@ void aio_context_ref(AioContext *ctx); void aio_context_unref(AioContext *ctx); /* Take ownership of the AioContext. If the AioContext will be shared between - * threads, a thread must have ownership when calling aio_poll(). + * threads, and a thread does not want to be interrupted, it will have to + * take ownership around calls to aio_poll(). Otherwise, aio_poll() + * automatically takes care of calling aio_context_acquire and + * aio_context_release. * - * Note that multiple threads calling aio_poll() means timers, BHs, and - * callbacks may be invoked from a different thread than they were registered - * from. Therefore, code must use AioContext acquire/release or use - * fine-grained synchronization to protect shared state if other threads will - * be accessing it simultaneously. + * Access to timers and BHs from a thread that has not acquired AioContext + * is possible. Access to callbacks for now must be done while the AioContext + * is owned by the thread (FIXME). */ void aio_context_acquire(AioContext *ctx); diff --git a/include/block/block.h b/include/block/block.h index 4c57d63fe2..7d1a7174f6 100644 --- a/include/block/block.h +++ b/include/block/block.h @@ -382,7 +382,7 @@ void bdrv_lock_medium(BlockDriverState *bs, bool locked); void bdrv_eject(BlockDriverState *bs, bool eject_flag); const char *bdrv_get_format_name(BlockDriverState *bs); BlockDriverState *bdrv_find_node(const char *node_name); -BlockDeviceInfoList *bdrv_named_nodes_list(void); +BlockDeviceInfoList *bdrv_named_nodes_list(Error **errp); BlockDriverState *bdrv_lookup_bs(const char *device, const char *node_name, Error **errp); @@ -398,6 +398,7 @@ void bdrv_iterate_format(void (*it)(void *opaque, const char *name), void *opaque); const char *bdrv_get_node_name(const BlockDriverState *bs); const char *bdrv_get_device_name(const BlockDriverState *bs); +const char *bdrv_get_device_or_node_name(const BlockDriverState *bs); int bdrv_get_flags(BlockDriverState *bs); int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors); @@ -449,18 +450,39 @@ bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov); struct HBitmapIter; typedef struct BdrvDirtyBitmap BdrvDirtyBitmap; -BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity, +BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, + uint32_t granularity, + const char *name, Error **errp); +int bdrv_dirty_bitmap_create_successor(BlockDriverState *bs, + BdrvDirtyBitmap *bitmap, + Error **errp); +BdrvDirtyBitmap *bdrv_dirty_bitmap_abdicate(BlockDriverState *bs, + BdrvDirtyBitmap *bitmap, + Error **errp); +BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap(BlockDriverState *bs, + BdrvDirtyBitmap *bitmap, + Error **errp); +BdrvDirtyBitmap *bdrv_find_dirty_bitmap(BlockDriverState *bs, + const char *name); +void bdrv_dirty_bitmap_make_anon(BdrvDirtyBitmap *bitmap); void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap); +void bdrv_disable_dirty_bitmap(BdrvDirtyBitmap *bitmap); +void bdrv_enable_dirty_bitmap(BdrvDirtyBitmap *bitmap); BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs); +uint32_t bdrv_get_default_bitmap_granularity(BlockDriverState *bs); +uint32_t bdrv_dirty_bitmap_granularity(BdrvDirtyBitmap *bitmap); +bool bdrv_dirty_bitmap_enabled(BdrvDirtyBitmap *bitmap); +bool bdrv_dirty_bitmap_frozen(BdrvDirtyBitmap *bitmap); int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector); -void bdrv_set_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, +void bdrv_set_dirty_bitmap(BdrvDirtyBitmap *bitmap, int64_t cur_sector, int nr_sectors); -void bdrv_reset_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, +void bdrv_reset_dirty_bitmap(BdrvDirtyBitmap *bitmap, int64_t cur_sector, int nr_sectors); -void bdrv_dirty_iter_init(BlockDriverState *bs, - BdrvDirtyBitmap *bitmap, struct HBitmapIter *hbi); -int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap); +void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap); +void bdrv_dirty_iter_init(BdrvDirtyBitmap *bitmap, struct HBitmapIter *hbi); +void bdrv_set_dirty_iter(struct HBitmapIter *hbi, int64_t offset); +int64_t bdrv_get_dirty_count(BdrvDirtyBitmap *bitmap); void bdrv_enable_copy_on_read(BlockDriverState *bs); void bdrv_disable_copy_on_read(BlockDriverState *bs); diff --git a/include/block/block_int.h b/include/block/block_int.h index dccb092df7..db29b7424e 100644 --- a/include/block/block_int.h +++ b/include/block/block_int.h @@ -439,6 +439,14 @@ extern BlockDriver bdrv_file; extern BlockDriver bdrv_raw; extern BlockDriver bdrv_qcow2; +/** + * bdrv_setup_io_funcs: + * + * Prepare a #BlockDriver for I/O request processing by populating + * unimplemented coroutine and AIO interfaces with generic wrapper functions + * that fall back to implemented interfaces. + */ +void bdrv_setup_io_funcs(BlockDriver *bdrv); int get_tmp_filename(char *filename, int size); BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size, @@ -590,7 +598,7 @@ void commit_active_start(BlockDriverState *bs, BlockDriverState *base, */ void mirror_start(BlockDriverState *bs, BlockDriverState *target, const char *replaces, - int64_t speed, int64_t granularity, int64_t buf_size, + int64_t speed, uint32_t granularity, int64_t buf_size, MirrorSyncMode mode, BlockdevOnError on_source_error, BlockdevOnError on_target_error, BlockCompletionFunc *cb, @@ -602,6 +610,7 @@ void mirror_start(BlockDriverState *bs, BlockDriverState *target, * @target: Block device to write to. * @speed: The maximum speed, in bytes per second, or 0 for unlimited. * @sync_mode: What parts of the disk image should be copied to the destination. + * @sync_bitmap: The dirty bitmap if sync_mode is MIRROR_SYNC_MODE_DIRTY_BITMAP. * @on_source_error: The action to take upon error reading from the source. * @on_target_error: The action to take upon error writing to the target. * @cb: Completion function for the job. @@ -612,6 +621,7 @@ void mirror_start(BlockDriverState *bs, BlockDriverState *target, */ void backup_start(BlockDriverState *bs, BlockDriverState *target, int64_t speed, MirrorSyncMode sync_mode, + BdrvDirtyBitmap *sync_bitmap, BlockdevOnError on_source_error, BlockdevOnError on_target_error, BlockCompletionFunc *cb, void *opaque, @@ -624,4 +634,8 @@ bool blk_dev_is_tray_open(BlockBackend *blk); bool blk_dev_is_medium_locked(BlockBackend *blk); void blk_dev_resize_cb(BlockBackend *blk); +void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors); +void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, + int nr_sectors); + #endif /* BLOCK_INT_H */ diff --git a/include/block/blockjob.h b/include/block/blockjob.h index b6d4ebbe03..57d8ef13e2 100644 --- a/include/block/blockjob.h +++ b/include/block/blockjob.h @@ -79,10 +79,16 @@ struct BlockJob { bool cancelled; /** - * Set to true if the job is either paused, or will pause itself - * as soon as possible (if busy == true). + * Counter for pause request. If non-zero, the block job is either paused, + * or if busy == true will pause itself as soon as possible. */ - bool paused; + int pause_count; + + /** + * Set to true if the job is paused by user. Can be unpaused with the + * block-job-resume QMP command. + */ + bool user_paused; /** * Set to false by the job while it is in a quiescent state, where @@ -225,11 +231,19 @@ void block_job_pause(BlockJob *job); * block_job_resume: * @job: The job to be resumed. * - * Resume the specified job. + * Resume the specified job. Must be paired with a preceding block_job_pause. */ void block_job_resume(BlockJob *job); /** + * block_job_enter: + * @job: The job to enter. + * + * Continue the specified job by entering the coroutine. + */ +void block_job_enter(BlockJob *job); + +/** * block_job_event_cancelled: * @job: The job whose information is requested. * diff --git a/include/block/qapi.h b/include/block/qapi.h index 168d788521..327549d917 100644 --- a/include/block/qapi.h +++ b/include/block/qapi.h @@ -29,7 +29,7 @@ #include "block/block.h" #include "block/snapshot.h" -BlockDeviceInfo *bdrv_block_device_info(BlockDriverState *bs); +BlockDeviceInfo *bdrv_block_device_info(BlockDriverState *bs, Error **errp); int bdrv_query_snapshot_info_list(BlockDriverState *bs, SnapshotInfoList **p_list, Error **errp); diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h index fcc316271e..43428bd030 100644 --- a/include/exec/cpu-common.h +++ b/include/exec/cpu-common.h @@ -82,7 +82,8 @@ void *cpu_physical_memory_map(hwaddr addr, int is_write); void cpu_physical_memory_unmap(void *buffer, hwaddr len, int is_write, hwaddr access_len); -void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)); +void cpu_register_map_client(QEMUBH *bh); +void cpu_unregister_map_client(QEMUBH *bh); bool cpu_physical_memory_is_io(hwaddr phys_addr); diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h index 0ca6f0b953..3f56546066 100644 --- a/include/exec/cpu-defs.h +++ b/include/exec/cpu-defs.h @@ -30,6 +30,7 @@ #ifndef CONFIG_USER_ONLY #include "exec/hwaddr.h" #endif +#include "exec/memattrs.h" #ifndef TARGET_LONG_BITS #error TARGET_LONG_BITS must be defined before including this header @@ -102,12 +103,22 @@ typedef struct CPUTLBEntry { QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS)); +/* The IOTLB is not accessed directly inline by generated TCG code, + * so the CPUIOTLBEntry layout is not as critical as that of the + * CPUTLBEntry. (This is also why we don't want to combine the two + * structs into one.) + */ +typedef struct CPUIOTLBEntry { + hwaddr addr; + MemTxAttrs attrs; +} CPUIOTLBEntry; + #define CPU_COMMON_TLB \ /* The meaning of the MMU modes is defined in the target code. */ \ CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \ - hwaddr iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ - hwaddr iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \ + CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ + CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \ target_ulong tlb_flush_addr; \ target_ulong tlb_flush_mask; \ target_ulong vtlb_index; \ diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 8eb0db3910..b58cd47ced 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -105,6 +105,9 @@ void tlb_flush(CPUState *cpu, int flush_global); void tlb_set_page(CPUState *cpu, target_ulong vaddr, hwaddr paddr, int prot, int mmu_idx, target_ulong size); +void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, + hwaddr paddr, MemTxAttrs attrs, + int prot, int mmu_idx, target_ulong size); void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr); #else static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) @@ -341,10 +344,6 @@ void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align)); struct MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index); -bool io_mem_read(struct MemoryRegion *mr, hwaddr addr, - uint64_t *pvalue, unsigned size); -bool io_mem_write(struct MemoryRegion *mr, hwaddr addr, - uint64_t value, unsigned size); void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx, uintptr_t retaddr); diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h new file mode 100644 index 0000000000..1389b4b01d --- /dev/null +++ b/include/exec/memattrs.h @@ -0,0 +1,45 @@ +/* + * Memory transaction attributes + * + * Copyright (c) 2015 Linaro Limited. + * + * Authors: + * Peter Maydell <peter.maydell@linaro.org> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef MEMATTRS_H +#define MEMATTRS_H + +/* Every memory transaction has associated with it a set of + * attributes. Some of these are generic (such as the ID of + * the bus master); some are specific to a particular kind of + * bus (such as the ARM Secure/NonSecure bit). We define them + * all as non-overlapping bitfields in a single struct to avoid + * confusion if different parts of QEMU used the same bit for + * different semantics. + */ +typedef struct MemTxAttrs { + /* Bus masters which don't specify any attributes will get this + * (via the MEMTXATTRS_UNSPECIFIED constant), so that we can + * distinguish "all attributes deliberately clear" from + * "didn't specify" if necessary. + */ + unsigned int unspecified:1; + /* ARM/AMBA TrustZone Secure access */ + unsigned int secure:1; + /* Memory access is usermode (unprivileged) */ + unsigned int user:1; +} MemTxAttrs; + +/* Bus masters which don't specify any attributes will get this, + * which has all attribute bits clear except the topmost one + * (so that we can distinguish "all attributes deliberately clear" + * from "didn't specify" if necessary). + */ +#define MEMTXATTRS_UNSPECIFIED ((MemTxAttrs) { .unspecified = 1 }) + +#endif diff --git a/include/exec/memory.h b/include/exec/memory.h index 06ffa1d185..b61c84f62a 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -28,6 +28,7 @@ #ifndef CONFIG_USER_ONLY #include "exec/hwaddr.h" #endif +#include "exec/memattrs.h" #include "qemu/queue.h" #include "qemu/int128.h" #include "qemu/notify.h" @@ -68,6 +69,16 @@ struct IOMMUTLBEntry { IOMMUAccessFlags perm; }; +/* New-style MMIO accessors can indicate that the transaction failed. + * A zero (MEMTX_OK) response means success; anything else is a failure + * of some kind. The memory subsystem will bitwise-OR together results + * if it is synthesizing an operation from multiple smaller accesses. + */ +#define MEMTX_OK 0 +#define MEMTX_ERROR (1U << 0) /* device returned an error */ +#define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */ +typedef uint32_t MemTxResult; + /* * Memory region callbacks */ @@ -84,6 +95,17 @@ struct MemoryRegionOps { uint64_t data, unsigned size); + MemTxResult (*read_with_attrs)(void *opaque, + hwaddr addr, + uint64_t *data, + unsigned size, + MemTxAttrs attrs); + MemTxResult (*write_with_attrs)(void *opaque, + hwaddr addr, + uint64_t data, + unsigned size, + MemTxAttrs attrs); + enum device_endian endianness; /* Guest-visible constraints: */ struct { @@ -605,6 +627,18 @@ int memory_region_get_fd(MemoryRegion *mr); */ void *memory_region_get_ram_ptr(MemoryRegion *mr); +/* memory_region_ram_resize: Resize a RAM region. + * + * Only legal before guest might have detected the memory size: e.g. on + * incoming migration, or right after reset. + * + * @mr: a memory region created with @memory_region_init_resizeable_ram. + * @newsize: the new size the region + * @errp: pointer to Error*, to store an error if it happens. + */ +void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, + Error **errp); + /** * memory_region_set_log: Turn dirty logging on or off for a region. * @@ -1031,6 +1065,37 @@ void memory_global_dirty_log_stop(void); void mtree_info(fprintf_function mon_printf, void *f); /** + * memory_region_dispatch_read: perform a read directly to the specified + * MemoryRegion. + * + * @mr: #MemoryRegion to access + * @addr: address within that region + * @pval: pointer to uint64_t which the data is written to + * @size: size of the access in bytes + * @attrs: memory transaction attributes to use for the access + */ +MemTxResult memory_region_dispatch_read(MemoryRegion *mr, + hwaddr addr, + uint64_t *pval, + unsigned size, + MemTxAttrs attrs); +/** + * memory_region_dispatch_write: perform a write directly to the specified + * MemoryRegion. + * + * @mr: #MemoryRegion to access + * @addr: address within that region + * @data: data to write + * @size: size of the access in bytes + * @attrs: memory transaction attributes to use for the access + */ +MemTxResult memory_region_dispatch_write(MemoryRegion *mr, + hwaddr addr, + uint64_t data, + unsigned size, + MemTxAttrs attrs); + +/** * address_space_init: initializes an address space * * @as: an uninitialized #AddressSpace @@ -1055,44 +1120,122 @@ void address_space_destroy(AddressSpace *as); /** * address_space_rw: read from or write to an address space. * - * Return true if the operation hit any unassigned memory or encountered an - * IOMMU fault. + * Return a MemTxResult indicating whether the operation succeeded + * or failed (eg unassigned memory, device rejected the transaction, + * IOMMU fault). * * @as: #AddressSpace to be accessed * @addr: address within that address space + * @attrs: memory transaction attributes * @buf: buffer with the data transferred * @is_write: indicates the transfer direction */ -bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, - int len, bool is_write); +MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, uint8_t *buf, + int len, bool is_write); /** * address_space_write: write to address space. * - * Return true if the operation hit any unassigned memory or encountered an - * IOMMU fault. + * Return a MemTxResult indicating whether the operation succeeded + * or failed (eg unassigned memory, device rejected the transaction, + * IOMMU fault). * * @as: #AddressSpace to be accessed * @addr: address within that address space + * @attrs: memory transaction attributes * @buf: buffer with the data transferred */ -bool address_space_write(AddressSpace *as, hwaddr addr, - const uint8_t *buf, int len); +MemTxResult address_space_write(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, + const uint8_t *buf, int len); /** * address_space_read: read from an address space. * - * Return true if the operation hit any unassigned memory or encountered an - * IOMMU fault. + * Return a MemTxResult indicating whether the operation succeeded + * or failed (eg unassigned memory, device rejected the transaction, + * IOMMU fault). * * @as: #AddressSpace to be accessed * @addr: address within that address space + * @attrs: memory transaction attributes * @buf: buffer with the data transferred */ -bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len); +MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, + uint8_t *buf, int len); + +/** + * address_space_ld*: load from an address space + * address_space_st*: store to an address space + * + * These functions perform a load or store of the byte, word, + * longword or quad to the specified address within the AddressSpace. + * The _le suffixed functions treat the data as little endian; + * _be indicates big endian; no suffix indicates "same endianness + * as guest CPU". + * + * The "guest CPU endianness" accessors are deprecated for use outside + * target-* code; devices should be CPU-agnostic and use either the LE + * or the BE accessors. + * + * @as #AddressSpace to be accessed + * @addr: address within that address space + * @val: data value, for stores + * @attrs: memory transaction attributes + * @result: location to write the success/failure of the transaction; + * if NULL, this information is discarded + */ +uint32_t address_space_ldub(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val, + MemTxAttrs attrs, MemTxResult *result); + +#ifdef NEED_CPU_H +uint32_t address_space_lduw(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint32_t address_space_ldl(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint64_t address_space_ldq(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val, + MemTxAttrs attrs, MemTxResult *result); +#endif /* address_space_translate: translate an address range into an address space - * into a MemoryRegion and an address range into that section + * into a MemoryRegion and an address range into that section. Should be + * called from an RCU critical section, to avoid that the last reference + * to the returned region disappears after address_space_translate returns. * * @as: #AddressSpace to be accessed * @addr: address within that address space diff --git a/include/glib-compat.h b/include/glib-compat.h index 011352b2b6..28d9f15bd3 100644 --- a/include/glib-compat.h +++ b/include/glib-compat.h @@ -115,7 +115,7 @@ static inline void g_mutex_init(CompatGMutex *mutex) static inline void g_mutex_clear(CompatGMutex *mutex) { - assert(mutex->once.status != G_ONCE_STATUS_PROGRESS); + g_assert(mutex->once.status != G_ONCE_STATUS_PROGRESS); if (mutex->once.retval) { g_mutex_free((GMutex *) mutex->once.retval); } @@ -155,7 +155,7 @@ static inline void g_cond_init(CompatGCond *cond) static inline void g_cond_clear(CompatGCond *cond) { - assert(cond->once.status != G_ONCE_STATUS_PROGRESS); + g_assert(cond->once.status != G_ONCE_STATUS_PROGRESS); if (cond->once.retval) { g_cond_free((GCond *) cond->once.retval); } @@ -164,7 +164,7 @@ static inline void g_cond_clear(CompatGCond *cond) static inline void (g_cond_wait)(CompatGCond *cond, CompatGMutex *mutex) { - assert(mutex->once.status != G_ONCE_STATUS_PROGRESS); + g_assert(mutex->once.status != G_ONCE_STATUS_PROGRESS); g_once(&cond->once, do_g_cond_new, NULL); g_cond_wait((GCond *) cond->once.retval, (GMutex *) mutex->once.retval); } diff --git a/include/hw/i386/ich9.h b/include/hw/i386/ich9.h index 59ea25b49a..f4e522cc1f 100644 --- a/include/hw/i386/ich9.h +++ b/include/hw/i386/ich9.h @@ -2,7 +2,6 @@ #define HW_ICH9_H #include "hw/hw.h" -#include "qemu/range.h" #include "hw/isa/isa.h" #include "hw/sysbus.h" #include "hw/i386/pc.h" @@ -19,7 +18,6 @@ void ich9_lpc_set_irq(void *opaque, int irq_num, int level); int ich9_lpc_map_irq(PCIDevice *pci_dev, int intx); PCIINTxRoute ich9_route_intx_pin_to_irq(void *opaque, int pirq_pin); void ich9_lpc_pm_init(PCIDevice *pci_lpc); -PCIBus *ich9_d2pbr_init(PCIBus *bus, int devfn, int sec_bus); I2CBus *ich9_smb_init(PCIBus *bus, int devfn, uint32_t smb_io_base); #define ICH9_CC_SIZE (16 * 1024) /* 16KB */ diff --git a/include/hw/loader.h b/include/hw/loader.h index 4f0681b0c8..485ff8f2f1 100644 --- a/include/hw/loader.h +++ b/include/hw/loader.h @@ -68,9 +68,11 @@ extern bool rom_file_has_mr; int rom_add_file(const char *file, const char *fw_dir, hwaddr addr, int32_t bootindex, bool option_rom); -ram_addr_t rom_add_blob(const char *name, const void *blob, size_t len, - size_t max_len, hwaddr addr, const char *fw_file_name, - FWCfgReadCallback fw_callback, void *callback_opaque); +MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len, + size_t max_len, hwaddr addr, + const char *fw_file_name, + FWCfgReadCallback fw_callback, + void *callback_opaque); int rom_add_elf_program(const char *name, void *data, size_t datasize, size_t romsize, hwaddr addr); int rom_load_all(void); diff --git a/include/hw/pci-host/q35.h b/include/hw/pci-host/q35.h index 025d6e69af..96d4cdc713 100644 --- a/include/hw/pci-host/q35.h +++ b/include/hw/pci-host/q35.h @@ -23,7 +23,6 @@ #define HW_Q35_H #include "hw/hw.h" -#include "qemu/range.h" #include "hw/isa/isa.h" #include "hw/sysbus.h" #include "hw/i386/pc.h" diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h index b97c2956ec..d4ffead48a 100644 --- a/include/hw/pci/pci.h +++ b/include/hw/pci/pci.h @@ -568,7 +568,7 @@ static inline void pci_set_byte_by_mask(uint8_t *config, uint8_t mask, uint8_t reg) { uint8_t val = pci_get_byte(config); - uint8_t rval = reg << (ffs(mask) - 1); + uint8_t rval = reg << ctz32(mask); pci_set_byte(config, (~mask & val) | (mask & rval)); } @@ -576,14 +576,14 @@ static inline uint8_t pci_get_byte_by_mask(uint8_t *config, uint8_t mask) { uint8_t val = pci_get_byte(config); - return (val & mask) >> (ffs(mask) - 1); + return (val & mask) >> ctz32(mask); } static inline void pci_set_word_by_mask(uint8_t *config, uint16_t mask, uint16_t reg) { uint16_t val = pci_get_word(config); - uint16_t rval = reg << (ffs(mask) - 1); + uint16_t rval = reg << ctz32(mask); pci_set_word(config, (~mask & val) | (mask & rval)); } @@ -591,14 +591,14 @@ static inline uint16_t pci_get_word_by_mask(uint8_t *config, uint16_t mask) { uint16_t val = pci_get_word(config); - return (val & mask) >> (ffs(mask) - 1); + return (val & mask) >> ctz32(mask); } static inline void pci_set_long_by_mask(uint8_t *config, uint32_t mask, uint32_t reg) { uint32_t val = pci_get_long(config); - uint32_t rval = reg << (ffs(mask) - 1); + uint32_t rval = reg << ctz32(mask); pci_set_long(config, (~mask & val) | (mask & rval)); } @@ -606,14 +606,14 @@ static inline uint32_t pci_get_long_by_mask(uint8_t *config, uint32_t mask) { uint32_t val = pci_get_long(config); - return (val & mask) >> (ffs(mask) - 1); + return (val & mask) >> ctz32(mask); } static inline void pci_set_quad_by_mask(uint8_t *config, uint64_t mask, uint64_t reg) { uint64_t val = pci_get_quad(config); - uint64_t rval = reg << (ffs(mask) - 1); + uint64_t rval = reg << ctz32(mask); pci_set_quad(config, (~mask & val) | (mask & rval)); } @@ -621,7 +621,7 @@ static inline uint64_t pci_get_quad_by_mask(uint8_t *config, uint64_t mask) { uint64_t val = pci_get_quad(config); - return (val & mask) >> (ffs(mask) - 1); + return (val & mask) >> ctz32(mask); } PCIDevice *pci_create_multifunction(PCIBus *bus, int devfn, bool multifunction, diff --git a/include/hw/pci/pcie_regs.h b/include/hw/pci/pcie_regs.h index 848ab1c206..6a28b33e69 100644 --- a/include/hw/pci/pcie_regs.h +++ b/include/hw/pci/pcie_regs.h @@ -27,34 +27,34 @@ /* PCI_EXP_FLAGS */ #define PCI_EXP_FLAGS_VER2 2 /* for now, supports only ver. 2 */ -#define PCI_EXP_FLAGS_IRQ_SHIFT (ffs(PCI_EXP_FLAGS_IRQ) - 1) -#define PCI_EXP_FLAGS_TYPE_SHIFT (ffs(PCI_EXP_FLAGS_TYPE) - 1) +#define PCI_EXP_FLAGS_IRQ_SHIFT ctz32(PCI_EXP_FLAGS_IRQ) +#define PCI_EXP_FLAGS_TYPE_SHIFT ctz32(PCI_EXP_FLAGS_TYPE) /* PCI_EXP_LINK{CAP, STA} */ /* link speed */ #define PCI_EXP_LNK_LS_25 1 -#define PCI_EXP_LNK_MLW_SHIFT (ffs(PCI_EXP_LNKCAP_MLW) - 1) +#define PCI_EXP_LNK_MLW_SHIFT ctz32(PCI_EXP_LNKCAP_MLW) #define PCI_EXP_LNK_MLW_1 (1 << PCI_EXP_LNK_MLW_SHIFT) /* PCI_EXP_LINKCAP */ -#define PCI_EXP_LNKCAP_ASPMS_SHIFT (ffs(PCI_EXP_LNKCAP_ASPMS) - 1) +#define PCI_EXP_LNKCAP_ASPMS_SHIFT ctz32(PCI_EXP_LNKCAP_ASPMS) #define PCI_EXP_LNKCAP_ASPMS_0S (1 << PCI_EXP_LNKCAP_ASPMS_SHIFT) -#define PCI_EXP_LNKCAP_PN_SHIFT (ffs(PCI_EXP_LNKCAP_PN) - 1) +#define PCI_EXP_LNKCAP_PN_SHIFT ctz32(PCI_EXP_LNKCAP_PN) -#define PCI_EXP_SLTCAP_PSN_SHIFT (ffs(PCI_EXP_SLTCAP_PSN) - 1) +#define PCI_EXP_SLTCAP_PSN_SHIFT ctz32(PCI_EXP_SLTCAP_PSN) #define PCI_EXP_SLTCTL_IND_RESERVED 0x0 #define PCI_EXP_SLTCTL_IND_ON 0x1 #define PCI_EXP_SLTCTL_IND_BLINK 0x2 #define PCI_EXP_SLTCTL_IND_OFF 0x3 -#define PCI_EXP_SLTCTL_AIC_SHIFT (ffs(PCI_EXP_SLTCTL_AIC) - 1) +#define PCI_EXP_SLTCTL_AIC_SHIFT ctz32(PCI_EXP_SLTCTL_AIC) #define PCI_EXP_SLTCTL_AIC_OFF \ (PCI_EXP_SLTCTL_IND_OFF << PCI_EXP_SLTCTL_AIC_SHIFT) -#define PCI_EXP_SLTCTL_PIC_SHIFT (ffs(PCI_EXP_SLTCTL_PIC) - 1) +#define PCI_EXP_SLTCTL_PIC_SHIFT ctz32(PCI_EXP_SLTCTL_PIC) #define PCI_EXP_SLTCTL_PIC_OFF \ (PCI_EXP_SLTCTL_IND_OFF << PCI_EXP_SLTCTL_PIC_SHIFT) #define PCI_EXP_SLTCTL_PIC_ON \ @@ -109,7 +109,7 @@ #define PCI_ERR_ROOT_IRQ_MAX 32 #define PCI_ERR_ROOT_IRQ 0xf8000000 -#define PCI_ERR_ROOT_IRQ_SHIFT (ffs(PCI_ERR_ROOT_IRQ) - 1) +#define PCI_ERR_ROOT_IRQ_SHIFT ctz32(PCI_ERR_ROOT_IRQ) #define PCI_ERR_ROOT_STATUS_REPORT_MASK (PCI_ERR_ROOT_COR_RCV | \ PCI_ERR_ROOT_MULTI_COR_RCV | \ PCI_ERR_ROOT_UNCOR_RCV | \ diff --git a/include/hw/usb.h b/include/hw/usb.h index 5be29375a2..c8b6e7b571 100644 --- a/include/hw/usb.h +++ b/include/hw/usb.h @@ -445,15 +445,11 @@ void usb_ep_reset(USBDevice *dev); void usb_ep_dump(USBDevice *dev); struct USBEndpoint *usb_ep_get(USBDevice *dev, int pid, int ep); uint8_t usb_ep_get_type(USBDevice *dev, int pid, int ep); -uint8_t usb_ep_get_ifnum(USBDevice *dev, int pid, int ep); void usb_ep_set_type(USBDevice *dev, int pid, int ep, uint8_t type); void usb_ep_set_ifnum(USBDevice *dev, int pid, int ep, uint8_t ifnum); void usb_ep_set_max_packet_size(USBDevice *dev, int pid, int ep, uint16_t raw); -int usb_ep_get_max_packet_size(USBDevice *dev, int pid, int ep); void usb_ep_set_max_streams(USBDevice *dev, int pid, int ep, uint8_t raw); -int usb_ep_get_max_streams(USBDevice *dev, int pid, int ep); -void usb_ep_set_pipeline(USBDevice *dev, int pid, int ep, bool enabled); void usb_ep_set_halted(USBDevice *dev, int pid, int ep, bool halted); USBPacket *usb_ep_find_packet_by_id(USBDevice *dev, int pid, int ep, uint64_t id); @@ -469,7 +465,6 @@ void usb_port_reset(USBPort *port); void usb_device_reset(USBDevice *dev); void usb_wakeup(USBEndpoint *ep, unsigned int stream); void usb_generic_async_ctrl_complete(USBDevice *s, USBPacket *p); -int set_usb_string(uint8_t *buf, const char *str); /* usb-linux.c */ USBDevice *usb_host_device_open(USBBus *bus, const char *devname); diff --git a/include/migration/migration.h b/include/migration/migration.h index bf09968d76..a6e025a248 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -50,6 +50,7 @@ struct MigrationState QemuThread thread; QEMUBH *cleanup_bh; QEMUFile *file; + int parameters[MIGRATION_PARAMETER_MAX]; int state; MigrationParams params; @@ -104,6 +105,10 @@ bool migration_has_finished(MigrationState *); bool migration_has_failed(MigrationState *); MigrationState *migrate_get_current(void); +void migrate_compress_threads_create(void); +void migrate_compress_threads_join(void); +void migrate_decompress_threads_create(void); +void migrate_decompress_threads_join(void); uint64_t ram_bytes_remaining(void); uint64_t ram_bytes_transferred(void); uint64_t ram_bytes_total(void); @@ -152,6 +157,11 @@ int64_t migrate_xbzrle_cache_size(void); int64_t xbzrle_cache_resize(int64_t new_size); +bool migrate_use_compression(void); +int migrate_compress_level(void); +int migrate_compress_threads(void); +int migrate_decompress_threads(void); + void ram_control_before_iterate(QEMUFile *f, uint64_t flags); void ram_control_after_iterate(QEMUFile *f, uint64_t flags); void ram_control_load_hook(QEMUFile *f, uint64_t flags); diff --git a/include/migration/qemu-file.h b/include/migration/qemu-file.h index 745a850e51..a01c5b817e 100644 --- a/include/migration/qemu-file.h +++ b/include/migration/qemu-file.h @@ -159,6 +159,9 @@ void qemu_put_be32(QEMUFile *f, unsigned int v); void qemu_put_be64(QEMUFile *f, uint64_t v); int qemu_peek_buffer(QEMUFile *f, uint8_t *buf, int size, size_t offset); int qemu_get_buffer(QEMUFile *f, uint8_t *buf, int size); +ssize_t qemu_put_compression_data(QEMUFile *f, const uint8_t *p, size_t size, + int level); +int qemu_put_qemu_file(QEMUFile *f_des, QEMUFile *f_src); /* * Note that you can only peek continuous bytes from where the current pointer * is; you aren't guaranteed to be able to peak to +n bytes unless you've diff --git a/include/monitor/monitor.h b/include/monitor/monitor.h index 1c06bed39d..df67d56ec0 100644 --- a/include/monitor/monitor.h +++ b/include/monitor/monitor.h @@ -47,7 +47,6 @@ typedef void (MonitorCompletion)(void *opaque, QObject *ret_data); void monitor_set_error(Monitor *mon, QError *qerror); void monitor_read_command(Monitor *mon, int show_prompt); -ReadLineState *monitor_get_rs(Monitor *mon); int monitor_read_password(Monitor *mon, ReadLineFunc *readline_func, void *opaque); diff --git a/include/qapi/qmp/qerror.h b/include/qapi/qmp/qerror.h index 57a62d4b76..e5673394d3 100644 --- a/include/qapi/qmp/qerror.h +++ b/include/qapi/qmp/qerror.h @@ -37,9 +37,6 @@ void qerror_report_err(Error *err); #define QERR_BASE_NOT_FOUND \ ERROR_CLASS_GENERIC_ERROR, "Base '%s' not found" -#define QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED \ - ERROR_CLASS_GENERIC_ERROR, "Block format '%s' used by device '%s' does not support feature '%s'" - #define QERR_BLOCK_JOB_NOT_READY \ ERROR_CLASS_GENERIC_ERROR, "The active block job for device '%s' cannot be completed" @@ -58,9 +55,6 @@ void qerror_report_err(Error *err); #define QERR_DEVICE_IN_USE \ ERROR_CLASS_GENERIC_ERROR, "Device '%s' is in use" -#define QERR_DEVICE_IS_READ_ONLY \ - ERROR_CLASS_GENERIC_ERROR, "Device '%s' is read only" - #define QERR_DEVICE_NO_HOTPLUG \ ERROR_CLASS_GENERIC_ERROR, "Device '%s' does not support hotplugging" diff --git a/include/qemu-common.h b/include/qemu-common.h index 1b5cffb403..6b373ff7e3 100644 --- a/include/qemu-common.h +++ b/include/qemu-common.h @@ -217,10 +217,6 @@ void *qemu_oom_check(void *ptr); ssize_t qemu_write_full(int fd, const void *buf, size_t count) QEMU_WARN_UNUSED_RESULT; -ssize_t qemu_send_full(int fd, const void *buf, size_t count, int flags) - QEMU_WARN_UNUSED_RESULT; -ssize_t qemu_recv_full(int fd, void *buf, size_t count, int flags) - QEMU_WARN_UNUSED_RESULT; #ifndef _WIN32 int qemu_pipe(int pipefd[2]); diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h index 90ca8df4e2..8abdcf9077 100644 --- a/include/qemu/bitops.h +++ b/include/qemu/bitops.h @@ -20,10 +20,10 @@ #define BITS_PER_BYTE CHAR_BIT #define BITS_PER_LONG (sizeof (unsigned long) * BITS_PER_BYTE) -#define BIT(nr) (1UL << (nr)) -#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) -#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) -#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) +#define BIT(nr) (1UL << (nr)) +#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) /** * set_bit - Set a bit in memory @@ -32,10 +32,10 @@ */ static inline void set_bit(long nr, unsigned long *addr) { - unsigned long mask = BIT_MASK(nr); - unsigned long *p = addr + BIT_WORD(nr); + unsigned long mask = BIT_MASK(nr); + unsigned long *p = addr + BIT_WORD(nr); - *p |= mask; + *p |= mask; } /** @@ -45,10 +45,10 @@ static inline void set_bit(long nr, unsigned long *addr) */ static inline void clear_bit(long nr, unsigned long *addr) { - unsigned long mask = BIT_MASK(nr); - unsigned long *p = addr + BIT_WORD(nr); + unsigned long mask = BIT_MASK(nr); + unsigned long *p = addr + BIT_WORD(nr); - *p &= ~mask; + *p &= ~mask; } /** @@ -58,10 +58,10 @@ static inline void clear_bit(long nr, unsigned long *addr) */ static inline void change_bit(long nr, unsigned long *addr) { - unsigned long mask = BIT_MASK(nr); - unsigned long *p = addr + BIT_WORD(nr); + unsigned long mask = BIT_MASK(nr); + unsigned long *p = addr + BIT_WORD(nr); - *p ^= mask; + *p ^= mask; } /** @@ -71,12 +71,12 @@ static inline void change_bit(long nr, unsigned long *addr) */ static inline int test_and_set_bit(long nr, unsigned long *addr) { - unsigned long mask = BIT_MASK(nr); - unsigned long *p = addr + BIT_WORD(nr); - unsigned long old = *p; + unsigned long mask = BIT_MASK(nr); + unsigned long *p = addr + BIT_WORD(nr); + unsigned long old = *p; - *p = old | mask; - return (old & mask) != 0; + *p = old | mask; + return (old & mask) != 0; } /** @@ -86,12 +86,12 @@ static inline int test_and_set_bit(long nr, unsigned long *addr) */ static inline int test_and_clear_bit(long nr, unsigned long *addr) { - unsigned long mask = BIT_MASK(nr); - unsigned long *p = addr + BIT_WORD(nr); - unsigned long old = *p; + unsigned long mask = BIT_MASK(nr); + unsigned long *p = addr + BIT_WORD(nr); + unsigned long old = *p; - *p = old & ~mask; - return (old & mask) != 0; + *p = old & ~mask; + return (old & mask) != 0; } /** @@ -101,12 +101,12 @@ static inline int test_and_clear_bit(long nr, unsigned long *addr) */ static inline int test_and_change_bit(long nr, unsigned long *addr) { - unsigned long mask = BIT_MASK(nr); - unsigned long *p = addr + BIT_WORD(nr); - unsigned long old = *p; + unsigned long mask = BIT_MASK(nr); + unsigned long *p = addr + BIT_WORD(nr); + unsigned long old = *p; - *p = old ^ mask; - return (old & mask) != 0; + *p = old ^ mask; + return (old & mask) != 0; } /** @@ -116,7 +116,7 @@ static inline int test_and_change_bit(long nr, unsigned long *addr) */ static inline int test_bit(long nr, const unsigned long *addr) { - return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); + return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } /** @@ -136,7 +136,8 @@ unsigned long find_last_bit(const unsigned long *addr, * @size: The bitmap size in bits */ unsigned long find_next_bit(const unsigned long *addr, - unsigned long size, unsigned long offset); + unsigned long size, + unsigned long offset); /** * find_next_zero_bit - find the next cleared bit in a memory region diff --git a/include/qemu/compatfd.h b/include/qemu/compatfd.h index 6b04877b97..fc3791520f 100644 --- a/include/qemu/compatfd.h +++ b/include/qemu/compatfd.h @@ -39,6 +39,5 @@ struct qemu_signalfd_siginfo { }; int qemu_signalfd(const sigset_t *mask); -bool qemu_signalfd_available(void); #endif diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h index 550d7ce2c3..f0a85f8649 100644 --- a/include/qemu/hbitmap.h +++ b/include/qemu/hbitmap.h @@ -65,6 +65,29 @@ struct HBitmapIter { HBitmap *hbitmap_alloc(uint64_t size, int granularity); /** + * hbitmap_truncate: + * @hb: The bitmap to change the size of. + * @size: The number of elements to change the bitmap to accommodate. + * + * truncate or grow an existing bitmap to accommodate a new number of elements. + * This may invalidate existing HBitmapIterators. + */ +void hbitmap_truncate(HBitmap *hb, uint64_t size); + +/** + * hbitmap_merge: + * @a: The bitmap to store the result in. + * @b: The bitmap to merge into @a. + * @return true if the merge was successful, + * false if it was not attempted. + * + * Merge two bitmaps together. + * A := A (BITOR) B. + * B is left unmodified. + */ +bool hbitmap_merge(HBitmap *a, const HBitmap *b); + +/** * hbitmap_empty: * @hb: HBitmap to operate on. * diff --git a/include/qom/cpu.h b/include/qom/cpu.h index 9dafb4817e..39f0f19fb0 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -24,6 +24,7 @@ #include <setjmp.h> #include "hw/qdev-core.h" #include "exec/hwaddr.h" +#include "exec/memattrs.h" #include "qemu/queue.h" #include "qemu/thread.h" #include "qemu/tls.h" @@ -195,6 +196,7 @@ typedef struct CPUWatchpoint { vaddr vaddr; vaddr len; vaddr hitaddr; + MemTxAttrs hitattrs; int flags; /* BP_* */ QTAILQ_ENTRY(CPUWatchpoint) entry; } CPUWatchpoint; diff --git a/include/standard-headers/linux/virtio_balloon.h b/include/standard-headers/linux/virtio_balloon.h index 799376d414..88ada1d048 100644 --- a/include/standard-headers/linux/virtio_balloon.h +++ b/include/standard-headers/linux/virtio_balloon.h @@ -25,6 +25,7 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ +#include "standard-headers/linux/types.h" #include "standard-headers/linux/virtio_ids.h" #include "standard-headers/linux/virtio_config.h" @@ -51,9 +52,32 @@ struct virtio_balloon_config { #define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */ #define VIRTIO_BALLOON_S_NR 6 +/* + * Memory statistics structure. + * Driver fills an array of these structures and passes to device. + * + * NOTE: fields are laid out in a way that would make compiler add padding + * between and after fields, so we have to use compiler-specific attributes to + * pack it, to disable this padding. This also often causes compiler to + * generate suboptimal code. + * + * We maintain this statistics structure format for backwards compatibility, + * but don't follow this example. + * + * If implementing a similar structure, do something like the below instead: + * struct virtio_balloon_stat { + * __virtio16 tag; + * uint8_t reserved[6]; + * __virtio64 val; + * }; + * + * In other words, add explicit reserved fields to align field and + * structure boundaries at field size, avoiding compiler padding + * without the packed attribute. + */ struct virtio_balloon_stat { - uint16_t tag; - uint64_t val; + __virtio16 tag; + __virtio64 val; } QEMU_PACKED; #endif /* _LINUX_VIRTIO_BALLOON_H */ diff --git a/include/standard-headers/linux/virtio_blk.h b/include/standard-headers/linux/virtio_blk.h index 12016b47f3..cd601f4069 100644 --- a/include/standard-headers/linux/virtio_blk.h +++ b/include/standard-headers/linux/virtio_blk.h @@ -58,7 +58,7 @@ struct virtio_blk_config { uint32_t size_max; /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */ uint32_t seg_max; - /* geometry the device (if VIRTIO_BLK_F_GEOMETRY) */ + /* geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */ struct virtio_blk_geometry { uint16_t cylinders; uint8_t heads; @@ -117,7 +117,11 @@ struct virtio_blk_config { #define VIRTIO_BLK_T_BARRIER 0x80000000 #endif /* !VIRTIO_BLK_NO_LEGACY */ -/* This is the first element of the read scatter-gather list. */ +/* + * This comes first in the read scatter-gather list. + * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, + * this is the first element of the read scatter-gather list. + */ struct virtio_blk_outhdr { /* VIRTIO_BLK_T* */ __virtio32 type; diff --git a/include/standard-headers/linux/virtio_ids.h b/include/standard-headers/linux/virtio_ids.h index 284fc3a05f..5f60aa4be5 100644 --- a/include/standard-headers/linux/virtio_ids.h +++ b/include/standard-headers/linux/virtio_ids.h @@ -39,5 +39,6 @@ #define VIRTIO_ID_9P 9 /* 9p virtio console */ #define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */ #define VIRTIO_ID_CAIF 12 /* Virtio caif */ +#define VIRTIO_ID_INPUT 18 /* virtio input */ #endif /* _LINUX_VIRTIO_IDS_H */ diff --git a/include/standard-headers/linux/virtio_input.h b/include/standard-headers/linux/virtio_input.h new file mode 100644 index 0000000000..a98a7974c2 --- /dev/null +++ b/include/standard-headers/linux/virtio_input.h @@ -0,0 +1,76 @@ +#ifndef _LINUX_VIRTIO_INPUT_H +#define _LINUX_VIRTIO_INPUT_H +/* This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. */ + +#include "standard-headers/linux/types.h" + +enum virtio_input_config_select { + VIRTIO_INPUT_CFG_UNSET = 0x00, + VIRTIO_INPUT_CFG_ID_NAME = 0x01, + VIRTIO_INPUT_CFG_ID_SERIAL = 0x02, + VIRTIO_INPUT_CFG_ID_DEVIDS = 0x03, + VIRTIO_INPUT_CFG_PROP_BITS = 0x10, + VIRTIO_INPUT_CFG_EV_BITS = 0x11, + VIRTIO_INPUT_CFG_ABS_INFO = 0x12, +}; + +struct virtio_input_absinfo { + uint32_t min; + uint32_t max; + uint32_t fuzz; + uint32_t flat; + uint32_t res; +}; + +struct virtio_input_devids { + uint16_t bustype; + uint16_t vendor; + uint16_t product; + uint16_t version; +}; + +struct virtio_input_config { + uint8_t select; + uint8_t subsel; + uint8_t size; + uint8_t reserved[5]; + union { + char string[128]; + uint8_t bitmap[128]; + struct virtio_input_absinfo abs; + struct virtio_input_devids ids; + } u; +}; + +struct virtio_input_event { + uint16_t type; + uint16_t code; + uint32_t value; +}; + +#endif /* _LINUX_VIRTIO_INPUT_H */ diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h index 77e9b9c370..b4a4d5e0b9 100644 --- a/include/sysemu/block-backend.h +++ b/include/sysemu/block-backend.h @@ -87,6 +87,8 @@ int blk_read_unthrottled(BlockBackend *blk, int64_t sector_num, uint8_t *buf, int nb_sectors); int blk_write(BlockBackend *blk, int64_t sector_num, const uint8_t *buf, int nb_sectors); +int blk_write_zeroes(BlockBackend *blk, int64_t sector_num, + int nb_sectors, BdrvRequestFlags flags); BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t sector_num, int nb_sectors, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque); diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h index 3f2f4c89e3..efa8b9993a 100644 --- a/include/sysemu/dma.h +++ b/include/sysemu/dma.h @@ -88,7 +88,8 @@ static inline int dma_memory_rw_relaxed(AddressSpace *as, dma_addr_t addr, void *buf, dma_addr_t len, DMADirection dir) { - return address_space_rw(as, addr, buf, len, dir == DMA_DIRECTION_FROM_DEVICE); + return (bool)address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED, + buf, len, dir == DMA_DIRECTION_FROM_DEVICE); } static inline int dma_memory_read_relaxed(AddressSpace *as, dma_addr_t addr, diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h index 7c01a61d5e..2eefea1cc2 100644 --- a/include/sysemu/iothread.h +++ b/include/sysemu/iothread.h @@ -33,7 +33,6 @@ typedef struct { #define IOTHREAD(obj) \ OBJECT_CHECK(IOThread, obj, TYPE_IOTHREAD) -IOThread *iothread_find(const char *id); char *iothread_get_id(IOThread *iothread); AioContext *iothread_get_aio_context(IOThread *iothread); diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index 197e6c0214..4878959404 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -18,6 +18,7 @@ #include "config-host.h" #include "qemu/queue.h" #include "qom/cpu.h" +#include "exec/memattrs.h" #ifdef CONFIG_KVM #include <linux/kvm.h> @@ -254,7 +255,7 @@ int kvm_create_device(KVMState *s, uint64_t type, bool test); extern const KVMCapabilityInfo kvm_arch_required_capabilities[]; void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run); -void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run); +MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run); int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run); diff --git a/include/sysemu/os-win32.h b/include/sysemu/os-win32.h index 9cc9e08139..4035c4fe54 100644 --- a/include/sysemu/os-win32.h +++ b/include/sysemu/os-win32.h @@ -72,9 +72,6 @@ #define sigsetjmp(env, savemask) setjmp(env) #define siglongjmp(env, val) longjmp(env, val) -/* Declaration of ffs() is missing in MinGW's strings.h. */ -int ffs(int i); - /* Missing POSIX functions. Don't use MinGW-w64 macros. */ #undef gmtime_r struct tm *gmtime_r(const time_t *timep, struct tm *result); diff --git a/include/sysemu/tpm_backend_int.h b/include/sysemu/tpm_backend_int.h index 05d94d0f5b..40f693a0cc 100644 --- a/include/sysemu/tpm_backend_int.h +++ b/include/sysemu/tpm_backend_int.h @@ -32,8 +32,6 @@ void tpm_backend_thread_deliver_request(TPMBackendThread *tbt); void tpm_backend_thread_create(TPMBackendThread *tbt, GFunc func, gpointer user_data); void tpm_backend_thread_end(TPMBackendThread *tbt); -void tpm_backend_thread_tpm_reset(TPMBackendThread *tbt, - GFunc func, gpointer user_data); typedef enum TPMBackendCmd { TPM_BACKEND_CMD_INIT = 1, diff --git a/include/ui/console.h b/include/ui/console.h index 2f5b9f0634..e8b3a9ea8d 100644 --- a/include/ui/console.h +++ b/include/ui/console.h @@ -9,6 +9,11 @@ #include "qapi-types.h" #include "qapi/error.h" +#ifdef CONFIG_OPENGL +# include <GLES2/gl2.h> +# include <GLES2/gl2ext.h> +#endif + /* keyboard/mouse support */ #define MOUSE_EVENT_LBUTTON 0x01 @@ -117,6 +122,11 @@ struct DisplaySurface { pixman_format_code_t format; pixman_image_t *image; uint8_t flags; +#ifdef CONFIG_OPENGL + GLenum glformat; + GLenum gltype; + GLuint texture; +#endif }; typedef struct QemuUIInfo { @@ -218,6 +228,7 @@ void update_displaychangelistener(DisplayChangeListener *dcl, uint64_t interval); void unregister_displaychangelistener(DisplayChangeListener *dcl); +bool dpy_ui_info_supported(QemuConsole *con); int dpy_set_ui_info(QemuConsole *con, QemuUIInfo *info); void dpy_gfx_update(QemuConsole *con, int x, int y, int w, int h); @@ -270,6 +281,11 @@ static inline int surface_bytes_per_pixel(DisplaySurface *s) return (bits + 7) / 8; } +static inline pixman_format_code_t surface_format(DisplaySurface *s) +{ + return s->format; +} + #ifdef CONFIG_CURSES #include <curses.h> typedef chtype console_ch_t; @@ -307,6 +323,7 @@ QemuConsole *qemu_console_lookup_by_device(DeviceState *dev, uint32_t head); bool qemu_console_is_visible(QemuConsole *con); bool qemu_console_is_graphic(QemuConsole *con); bool qemu_console_is_fixedsize(QemuConsole *con); +char *qemu_console_get_label(QemuConsole *con); int qemu_console_get_index(QemuConsole *con); uint32_t qemu_console_get_head(QemuConsole *con); QemuUIInfo *qemu_console_get_ui_info(QemuConsole *con); @@ -321,7 +338,29 @@ void qemu_console_copy(QemuConsole *con, int src_x, int src_y, int dst_x, int dst_y, int w, int h); DisplaySurface *qemu_console_surface(QemuConsole *con); +/* console-gl.c */ +typedef struct ConsoleGLState ConsoleGLState; +#ifdef CONFIG_OPENGL +ConsoleGLState *console_gl_init_context(void); +void console_gl_fini_context(ConsoleGLState *gls); +bool console_gl_check_format(DisplayChangeListener *dcl, + pixman_format_code_t format); +void surface_gl_create_texture(ConsoleGLState *gls, + DisplaySurface *surface); +void surface_gl_update_texture(ConsoleGLState *gls, + DisplaySurface *surface, + int x, int y, int w, int h); +void surface_gl_render_texture(ConsoleGLState *gls, + DisplaySurface *surface); +void surface_gl_destroy_texture(ConsoleGLState *gls, + DisplaySurface *surface); +void surface_gl_setup_viewport(ConsoleGLState *gls, + DisplaySurface *surface, + int ww, int wh); +#endif + /* sdl.c */ +void sdl_display_early_init(int opengl); void sdl_display_init(DisplayState *ds, int full_screen, int no_frame); /* cocoa.m */ diff --git a/include/ui/gtk.h b/include/ui/gtk.h new file mode 100644 index 0000000000..b750845ab5 --- /dev/null +++ b/include/ui/gtk.h @@ -0,0 +1,76 @@ +#ifndef UI_GTK_H +#define UI_GTK_H + +#ifdef _WIN32 +# define _WIN32_WINNT 0x0601 /* needed to get definition of MAPVK_VK_TO_VSC */ +#endif + +#ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE +/* Work around an -Wstrict-prototypes warning in GTK headers */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-prototypes" +#endif +#include <gtk/gtk.h> +#ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE +#pragma GCC diagnostic pop +#endif + +#include <gdk/gdkkeysyms.h> + +#ifdef GDK_WINDOWING_X11 +#include <gdk/gdkx.h> +#include <X11/XKBlib.h> +#endif + +/* Compatibility define to let us build on both Gtk2 and Gtk3 */ +#if GTK_CHECK_VERSION(3, 0, 0) +static inline void gdk_drawable_get_size(GdkWindow *w, gint *ww, gint *wh) +{ + *ww = gdk_window_get_width(w); + *wh = gdk_window_get_height(w); +} +#endif + +typedef struct GtkDisplayState GtkDisplayState; + +typedef struct VirtualGfxConsole { + GtkWidget *drawing_area; + DisplayChangeListener dcl; + DisplaySurface *ds; + pixman_image_t *convert; + cairo_surface_t *surface; + double scale_x; + double scale_y; +} VirtualGfxConsole; + +#if defined(CONFIG_VTE) +typedef struct VirtualVteConsole { + GtkWidget *box; + GtkWidget *scrollbar; + GtkWidget *terminal; + CharDriverState *chr; +} VirtualVteConsole; +#endif + +typedef enum VirtualConsoleType { + GD_VC_GFX, + GD_VC_VTE, +} VirtualConsoleType; + +typedef struct VirtualConsole { + GtkDisplayState *s; + char *label; + GtkWidget *window; + GtkWidget *menu_item; + GtkWidget *tab_item; + GtkWidget *focus; + VirtualConsoleType type; + union { + VirtualGfxConsole gfx; +#if defined(CONFIG_VTE) + VirtualVteConsole vte; +#endif + }; +} VirtualConsole; + +#endif /* UI_GTK_H */ diff --git a/include/ui/qemu-pixman.h b/include/ui/qemu-pixman.h index 5d7a9ac6f2..e34c4effcb 100644 --- a/include/ui/qemu-pixman.h +++ b/include/ui/qemu-pixman.h @@ -35,6 +35,7 @@ # define PIXMAN_BE_r8g8b8a8 PIXMAN_r8g8b8a8 # define PIXMAN_BE_x8b8g8r8 PIXMAN_x8b8g8r8 # define PIXMAN_BE_a8b8g8r8 PIXMAN_a8b8g8r8 +# define PIXMAN_LE_x8r8g8b8 PIXMAN_b8g8r8x8 #else # define PIXMAN_BE_r8g8b8 PIXMAN_b8g8r8 # define PIXMAN_BE_x8r8g8b8 PIXMAN_b8g8r8x8 @@ -45,6 +46,7 @@ # define PIXMAN_BE_r8g8b8a8 PIXMAN_a8b8g8r8 # define PIXMAN_BE_x8b8g8r8 PIXMAN_r8g8b8x8 # define PIXMAN_BE_a8b8g8r8 PIXMAN_r8g8b8a8 +# define PIXMAN_LE_x8r8g8b8 PIXMAN_x8r8g8b8 #endif /* -------------------------------------------------------------------- */ diff --git a/include/ui/qemu-spice.h b/include/ui/qemu-spice.h index 25b94c7530..42db3c1645 100644 --- a/include/ui/qemu-spice.h +++ b/include/ui/qemu-spice.h @@ -42,8 +42,7 @@ int qemu_spice_set_passwd(const char *passwd, bool fail_if_connected, bool disconnect_if_connected); int qemu_spice_set_pw_expire(time_t expires); int qemu_spice_migrate_info(const char *hostname, int port, int tls_port, - const char *subject, - MonitorCompletion cb, void *opaque); + const char *subject); CharDriverState *qemu_chr_open_spice_vmc(const char *type); #if SPICE_SERVER_VERSION >= 0x000c02 @@ -70,10 +69,8 @@ static inline int qemu_spice_set_pw_expire(time_t expires) return -1; } static inline int qemu_spice_migrate_info(const char *h, int p, int t, - const char *s, - MonitorCompletion cb, void *opaque) + const char *s) { - cb(opaque, NULL); return -1; } diff --git a/include/ui/sdl2.h b/include/ui/sdl2.h index 51fff2e9b8..2fdad8f300 100644 --- a/include/ui/sdl2.h +++ b/include/ui/sdl2.h @@ -1,6 +1,12 @@ #ifndef SDL2_H #define SDL2_H +/* Avoid compiler warning because macro is redefined in SDL_syswm.h. */ +#undef WIN32_LEAN_AND_MEAN + +#include <SDL.h> +#include <SDL_syswm.h> + struct sdl2_console { DisplayChangeListener dcl; DisplaySurface *surface; @@ -11,6 +17,10 @@ struct sdl2_console { int last_vm_running; /* per console for caption reasons */ int x, y; int hidden; + int opengl; + int updates; + SDL_GLContext winctx; + ConsoleGLState *gls; }; void sdl2_window_create(struct sdl2_console *scon); @@ -31,4 +41,11 @@ void sdl2_2d_redraw(struct sdl2_console *scon); bool sdl2_2d_check_format(DisplayChangeListener *dcl, pixman_format_code_t format); +void sdl2_gl_update(DisplayChangeListener *dcl, + int x, int y, int w, int h); +void sdl2_gl_switch(DisplayChangeListener *dcl, + DisplaySurface *new_surface); +void sdl2_gl_refresh(DisplayChangeListener *dcl); +void sdl2_gl_redraw(struct sdl2_console *scon); + #endif /* SDL2_H */ diff --git a/include/ui/shader.h b/include/ui/shader.h new file mode 100644 index 0000000000..1ff926c9e1 --- /dev/null +++ b/include/ui/shader.h @@ -0,0 +1,11 @@ +#ifdef CONFIG_OPENGL +# include <GLES2/gl2.h> +# include <GLES2/gl2ext.h> +#endif + +void qemu_gl_run_texture_blit(GLint texture_blit_prog); + +GLuint qemu_gl_create_compile_shader(GLenum type, const GLchar *src); +GLuint qemu_gl_create_link_program(GLuint vert, GLuint frag); +GLuint qemu_gl_create_compile_link_program(const GLchar *vert_src, + const GLchar *frag_src); diff --git a/include/ui/spice-display.h b/include/ui/spice-display.h index 53883a17fc..b25328a6ba 100644 --- a/include/ui/spice-display.h +++ b/include/ui/spice-display.h @@ -97,7 +97,8 @@ struct SimpleSpiceDisplay { /* cursor (without qxl): displaychangelistener -> spice server */ SimpleSpiceCursor *ptr_define; SimpleSpiceCursor *ptr_move; - uint16_t ptr_x, ptr_y; + int16_t ptr_x, ptr_y; + int16_t hot_x, hot_y; /* cursor (with qxl): qxl local renderer -> displaychangelistener */ QEMUCursor *cursor; @@ -64,7 +64,8 @@ void cpu_outb(pio_addr_t addr, uint8_t val) { LOG_IOPORT("outb: %04"FMT_pioaddr" %02"PRIx8"\n", addr, val); trace_cpu_out(addr, val); - address_space_write(&address_space_io, addr, &val, 1); + address_space_write(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, + &val, 1); } void cpu_outw(pio_addr_t addr, uint16_t val) @@ -74,7 +75,8 @@ void cpu_outw(pio_addr_t addr, uint16_t val) LOG_IOPORT("outw: %04"FMT_pioaddr" %04"PRIx16"\n", addr, val); trace_cpu_out(addr, val); stw_p(buf, val); - address_space_write(&address_space_io, addr, buf, 2); + address_space_write(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, + buf, 2); } void cpu_outl(pio_addr_t addr, uint32_t val) @@ -84,14 +86,16 @@ void cpu_outl(pio_addr_t addr, uint32_t val) LOG_IOPORT("outl: %04"FMT_pioaddr" %08"PRIx32"\n", addr, val); trace_cpu_out(addr, val); stl_p(buf, val); - address_space_write(&address_space_io, addr, buf, 4); + address_space_write(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, + buf, 4); } uint8_t cpu_inb(pio_addr_t addr) { uint8_t val; - address_space_read(&address_space_io, addr, &val, 1); + address_space_read(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, + &val, 1); trace_cpu_in(addr, val); LOG_IOPORT("inb : %04"FMT_pioaddr" %02"PRIx8"\n", addr, val); return val; @@ -102,7 +106,7 @@ uint16_t cpu_inw(pio_addr_t addr) uint8_t buf[2]; uint16_t val; - address_space_read(&address_space_io, addr, buf, 2); + address_space_read(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, buf, 2); val = lduw_p(buf); trace_cpu_in(addr, val); LOG_IOPORT("inw : %04"FMT_pioaddr" %04"PRIx16"\n", addr, val); @@ -114,7 +118,7 @@ uint32_t cpu_inl(pio_addr_t addr) uint8_t buf[4]; uint32_t val; - address_space_read(&address_space_io, addr, buf, 4); + address_space_read(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, buf, 4); val = ldl_p(buf); trace_cpu_in(addr, val); LOG_IOPORT("inl : %04"FMT_pioaddr" %08"PRIx32"\n", addr, val); @@ -187,9 +191,14 @@ static uint64_t portio_read(void *opaque, hwaddr addr, unsigned size) data = mrp->read(mrpio->portio_opaque, mrp->base + addr); } else if (size == 2) { mrp = find_portio(mrpio, addr, 1, false); - assert(mrp); - data = mrp->read(mrpio->portio_opaque, mrp->base + addr) | - (mrp->read(mrpio->portio_opaque, mrp->base + addr + 1) << 8); + if (mrp) { + data = mrp->read(mrpio->portio_opaque, mrp->base + addr); + if (addr + 1 < mrp->offset + mrp->len) { + data |= mrp->read(mrpio->portio_opaque, mrp->base + addr + 1) << 8; + } else { + data |= 0xff00; + } + } } return data; } @@ -204,9 +213,12 @@ static void portio_write(void *opaque, hwaddr addr, uint64_t data, mrp->write(mrpio->portio_opaque, mrp->base + addr, data); } else if (size == 2) { mrp = find_portio(mrpio, addr, 1, true); - assert(mrp); - mrp->write(mrpio->portio_opaque, mrp->base + addr, data & 0xff); - mrp->write(mrpio->portio_opaque, mrp->base + addr + 1, data >> 8); + if (mrp) { + mrp->write(mrpio->portio_opaque, mrp->base + addr, data & 0xff); + if (addr + 1 < mrp->offset + mrp->len) { + mrp->write(mrpio->portio_opaque, mrp->base + addr + 1, data >> 8); + } + } } } @@ -239,10 +251,6 @@ static void portio_list_add_1(PortioList *piolist, mrpio->ports[i].base = start + off_low; } - /* - * Use an alias so that the callback is called with an absolute address, - * rather than an offset relative to to start + off_low. - */ memory_region_init_io(&mrpio->mr, piolist->owner, &portio_ops, mrpio, piolist->name, off_high - off_low); if (piolist->flush_coalesced_mmio) { @@ -265,7 +273,7 @@ void portio_list_add(PortioList *piolist, /* Handle the first entry specially. */ off_last = off_low = pio_start->offset; - off_high = off_low + pio_start->len; + off_high = off_low + pio_start->len + pio_start->size - 1; count = 1; for (pio = pio_start + 1; pio->size != 0; pio++, count++) { @@ -280,10 +288,10 @@ void portio_list_add(PortioList *piolist, /* ... and start collecting anew. */ pio_start = pio; off_low = off_last; - off_high = off_low + pio->len; + off_high = off_low + pio->len + pio_start->size - 1; count = 0; } else if (off_last + pio->len > off_high) { - off_high = off_last + pio->len; + off_high = off_last + pio->len + pio_start->size - 1; } } diff --git a/iothread.c b/iothread.c index 342a23fcb0..0416fc4268 100644 --- a/iothread.c +++ b/iothread.c @@ -31,21 +31,14 @@ typedef ObjectClass IOThreadClass; static void *iothread_run(void *opaque) { IOThread *iothread = opaque; - bool blocking; qemu_mutex_lock(&iothread->init_done_lock); iothread->thread_id = qemu_get_thread_id(); qemu_cond_signal(&iothread->init_done_cond); qemu_mutex_unlock(&iothread->init_done_lock); - while (!iothread->stopping) { - aio_context_acquire(iothread->ctx); - blocking = true; - while (!iothread->stopping && aio_poll(iothread->ctx, blocking)) { - /* Progress was made, keep going */ - blocking = false; - } - aio_context_release(iothread->ctx); + while (!atomic_read(&iothread->stopping)) { + aio_poll(iothread->ctx, true); } return NULL; } @@ -121,18 +114,6 @@ static void iothread_register_types(void) type_init(iothread_register_types) -IOThread *iothread_find(const char *id) -{ - Object *container = container_get(object_get_root(), IOTHREADS_PATH); - Object *child; - - child = object_property_get_link(container, id, NULL); - if (!child) { - return NULL; - } - return (IOThread *)object_dynamic_cast(child, TYPE_IOTHREAD); -} - char *iothread_get_id(IOThread *iothread) { return object_get_canonical_path_component(OBJECT(iothread)); @@ -552,13 +552,13 @@ static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val, bool assign, uint32_t size, bool datamatch) { int ret; - struct kvm_ioeventfd iofd; - - iofd.datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0; - iofd.addr = addr; - iofd.len = size; - iofd.flags = 0; - iofd.fd = fd; + struct kvm_ioeventfd iofd = { + .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0, + .addr = addr, + .len = size, + .flags = 0, + .fd = fd, + }; if (!kvm_enabled()) { return -ENOSYS; @@ -1141,18 +1141,18 @@ static int kvm_irqchip_get_virq(KVMState *s) { uint32_t *word = s->used_gsi_bitmap; int max_words = ALIGN(s->gsi_count, 32) / 32; - int i, bit; + int i, zeroes; bool retry = true; again: /* Return the lowest unused GSI in the bitmap */ for (i = 0; i < max_words; i++) { - bit = ffs(~word[i]); - if (!bit) { + zeroes = ctz32(~word[i]); + if (zeroes == 32) { continue; } - return bit - 1 + i * 32; + return zeroes + i * 32; } if (!s->direct_msi && retry) { retry = false; @@ -1544,8 +1544,17 @@ static int kvm_init(MachineState *ms) strerror(-ret)); #ifdef TARGET_S390X - fprintf(stderr, "Please add the 'switch_amode' kernel parameter to " - "your host kernel command line\n"); + if (ret == -EINVAL) { + fprintf(stderr, + "Host kernel setup problem detected. Please verify:\n"); + fprintf(stderr, "- for kernels supporting the switch_amode or" + " user_mode parameters, whether\n"); + fprintf(stderr, + " user space is running in primary address space\n"); + fprintf(stderr, + "- for kernels supporting the vm.allocate_pgste sysctl, " + "whether it is enabled\n"); + } #endif goto err; } @@ -1660,14 +1669,15 @@ void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len) s->sigmask_len = sigmask_len; } -static void kvm_handle_io(uint16_t port, void *data, int direction, int size, - uint32_t count) +static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction, + int size, uint32_t count) { int i; uint8_t *ptr = data; for (i = 0; i < count; i++) { - address_space_rw(&address_space_io, port, ptr, size, + address_space_rw(&address_space_io, port, attrs, + ptr, size, direction == KVM_EXIT_IO_OUT); ptr += size; } @@ -1786,6 +1796,8 @@ int kvm_cpu_exec(CPUState *cpu) } do { + MemTxAttrs attrs; + if (cpu->kvm_vcpu_dirty) { kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE); cpu->kvm_vcpu_dirty = false; @@ -1806,7 +1818,7 @@ int kvm_cpu_exec(CPUState *cpu) run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0); qemu_mutex_lock_iothread(); - kvm_arch_post_run(cpu, run); + attrs = kvm_arch_post_run(cpu, run); if (run_ret < 0) { if (run_ret == -EINTR || run_ret == -EAGAIN) { @@ -1824,7 +1836,7 @@ int kvm_cpu_exec(CPUState *cpu) switch (run->exit_reason) { case KVM_EXIT_IO: DPRINTF("handle_io\n"); - kvm_handle_io(run->io.port, + kvm_handle_io(run->io.port, attrs, (uint8_t *)run + run->io.data_offset, run->io.direction, run->io.size, @@ -1833,10 +1845,11 @@ int kvm_cpu_exec(CPUState *cpu) break; case KVM_EXIT_MMIO: DPRINTF("handle_mmio\n"); - cpu_physical_memory_rw(run->mmio.phys_addr, - run->mmio.data, - run->mmio.len, - run->mmio.is_write); + address_space_rw(&address_space_memory, + run->mmio.phys_addr, attrs, + run->mmio.data, + run->mmio.len, + run->mmio.is_write); ret = 0; break; case KVM_EXIT_IRQ_WINDOW_OPEN: diff --git a/libcacard/cac.c b/libcacard/cac.c index f38fdceddd..bc84534f9c 100644 --- a/libcacard/cac.c +++ b/libcacard/cac.c @@ -5,7 +5,10 @@ * See the COPYING.LIB file in the top-level directory. */ -#include "qemu-common.h" +#include "glib-compat.h" + +#include <string.h> +#include <stdbool.h> #include "cac.h" #include "vcard.h" diff --git a/libcacard/card_7816.c b/libcacard/card_7816.c index 814fa1662f..22fd334d15 100644 --- a/libcacard/card_7816.c +++ b/libcacard/card_7816.c @@ -5,7 +5,9 @@ * See the COPYING.LIB file in the top-level directory. */ -#include "qemu-common.h" +#include "glib-compat.h" + +#include <string.h> #include "vcard.h" #include "vcard_emul.h" diff --git a/libcacard/event.c b/libcacard/event.c index 4c551e4e38..63f4057fe5 100644 --- a/libcacard/event.c +++ b/libcacard/event.c @@ -5,7 +5,7 @@ * See the COPYING.LIB file in the top-level directory. */ -#include "qemu-common.h" +#include "glib-compat.h" #include "vcard.h" #include "vreader.h" diff --git a/libcacard/vcard.c b/libcacard/vcard.c index d140a8ed1a..1a87208f3d 100644 --- a/libcacard/vcard.c +++ b/libcacard/vcard.c @@ -5,7 +5,9 @@ * See the COPYING.LIB file in the top-level directory. */ -#include "qemu-common.h" +#include "glib-compat.h" + +#include <string.h> #include "vcard.h" #include "vcard_emul.h" diff --git a/libcacard/vcard_emul_nss.c b/libcacard/vcard_emul_nss.c index 950edee069..d9761eedc2 100644 --- a/libcacard/vcard_emul_nss.c +++ b/libcacard/vcard_emul_nss.c @@ -25,7 +25,7 @@ #include <prthread.h> #include <secerr.h> -#include "qemu-common.h" +#include "glib-compat.h" #include "vcard.h" #include "card_7816t.h" @@ -33,7 +33,7 @@ #include "vreader.h" #include "vevent.h" -#include "libcacard/vcardt_internal.h" +#include "vcardt_internal.h" typedef enum { diff --git a/libcacard/vcardt.c b/libcacard/vcardt.c index 9ce4648f8c..c67de2f3c1 100644 --- a/libcacard/vcardt.c +++ b/libcacard/vcardt.c @@ -2,9 +2,9 @@ #include <string.h> #include <glib.h> -#include "libcacard/vcardt.h" +#include "vcardt.h" -#include "libcacard/vcardt_internal.h" +#include "vcardt_internal.h" /* create an ATR with appropriate historical bytes */ #define ATR_TS_DIRECT_CONVENTION 0x3b diff --git a/libcacard/vreader.c b/libcacard/vreader.c index 0315dd8920..9725f46a74 100644 --- a/libcacard/vreader.c +++ b/libcacard/vreader.c @@ -10,7 +10,9 @@ #endif #define G_LOG_DOMAIN "libcacard" -#include "qemu-common.h" +#include "glib-compat.h" + +#include <string.h> #include "vcard.h" #include "vcard_emul.h" diff --git a/libcacard/vscclient.c b/libcacard/vscclient.c index fa6041de99..0652684437 100644 --- a/libcacard/vscclient.c +++ b/libcacard/vscclient.c @@ -10,14 +10,20 @@ * See the COPYING.LIB file in the top-level directory. */ +#include <stdio.h> +#include <stdlib.h> +#include <string.h> #ifndef _WIN32 #include <sys/socket.h> #include <netinet/in.h> #include <netdb.h> +#include <unistd.h> #define closesocket(x) close(x) +#else +#include <getopt.h> #endif -#include "qemu-common.h" +#include "glib-compat.h" #include "vscard_common.h" diff --git a/linux-headers/asm-arm/kvm.h b/linux-headers/asm-arm/kvm.h index 0db25bc328..c98e4dc460 100644 --- a/linux-headers/asm-arm/kvm.h +++ b/linux-headers/asm-arm/kvm.h @@ -195,9 +195,16 @@ struct kvm_arch_memory_slot { #define KVM_ARM_IRQ_CPU_IRQ 0 #define KVM_ARM_IRQ_CPU_FIQ 1 -/* Highest supported SPI, from VGIC_NR_IRQS */ +/* + * This used to hold the highest supported SPI, but it is now obsolete + * and only here to provide source code level compatibility with older + * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS. + */ #define KVM_ARM_IRQ_GIC_MAX 127 +/* One single KVM irqchip, ie. the VGIC */ +#define KVM_NR_IRQCHIPS 1 + /* PSCI interface */ #define KVM_PSCI_FN_BASE 0x95c1ba5e #define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h index 3ef77a4660..c8abf257c1 100644 --- a/linux-headers/asm-arm64/kvm.h +++ b/linux-headers/asm-arm64/kvm.h @@ -188,9 +188,16 @@ struct kvm_arch_memory_slot { #define KVM_ARM_IRQ_CPU_IRQ 0 #define KVM_ARM_IRQ_CPU_FIQ 1 -/* Highest supported SPI, from VGIC_NR_IRQS */ +/* + * This used to hold the highest supported SPI, but it is now obsolete + * and only here to provide source code level compatibility with older + * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS. + */ #define KVM_ARM_IRQ_GIC_MAX 127 +/* One single KVM irqchip, ie. the VGIC */ +#define KVM_NR_IRQCHIPS 1 + /* PSCI interface */ #define KVM_PSCI_FN_BASE 0x95c1ba5e #define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) diff --git a/linux-headers/asm-mips/kvm.h b/linux-headers/asm-mips/kvm.h index 2c04b6d9ff..6985eb59b0 100644 --- a/linux-headers/asm-mips/kvm.h +++ b/linux-headers/asm-mips/kvm.h @@ -36,77 +36,85 @@ struct kvm_regs { /* * for KVM_GET_FPU and KVM_SET_FPU - * - * If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs - * are zero filled. */ struct kvm_fpu { - __u64 fpr[32]; - __u32 fir; - __u32 fccr; - __u32 fexr; - __u32 fenr; - __u32 fcsr; - __u32 pad; }; /* - * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0 + * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various * registers. The id field is broken down as follows: * - * bits[2..0] - Register 'sel' index. - * bits[7..3] - Register 'rd' index. - * bits[15..8] - Must be zero. - * bits[31..16] - 1 -> CP0 registers. - * bits[51..32] - Must be zero. * bits[63..52] - As per linux/kvm.h + * bits[51..32] - Must be zero. + * bits[31..16] - Register set. + * + * Register set = 0: GP registers from kvm_regs (see definitions below). + * + * Register set = 1: CP0 registers. + * bits[15..8] - Must be zero. + * bits[7..3] - Register 'rd' index. + * bits[2..0] - Register 'sel' index. + * + * Register set = 2: KVM specific registers (see definitions below). + * + * Register set = 3: FPU / MSA registers (see definitions below). * * Other sets registers may be added in the future. Each set would * have its own identifier in bits[31..16]. - * - * The registers defined in struct kvm_regs are also accessible, the - * id values for these are below. */ -#define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0) -#define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1) -#define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2) -#define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3) -#define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4) -#define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5) -#define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6) -#define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7) -#define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8) -#define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9) -#define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10) -#define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11) -#define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12) -#define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13) -#define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14) -#define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15) -#define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16) -#define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17) -#define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18) -#define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19) -#define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20) -#define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21) -#define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22) -#define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23) -#define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24) -#define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25) -#define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26) -#define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27) -#define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28) -#define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29) -#define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30) -#define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31) - -#define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32) -#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33) -#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34) - -/* KVM specific control registers */ +#define KVM_REG_MIPS_GP (KVM_REG_MIPS | 0x0000000000000000ULL) +#define KVM_REG_MIPS_CP0 (KVM_REG_MIPS | 0x0000000000010000ULL) +#define KVM_REG_MIPS_KVM (KVM_REG_MIPS | 0x0000000000020000ULL) +#define KVM_REG_MIPS_FPU (KVM_REG_MIPS | 0x0000000000030000ULL) + + +/* + * KVM_REG_MIPS_GP - General purpose registers from kvm_regs. + */ + +#define KVM_REG_MIPS_R0 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 0) +#define KVM_REG_MIPS_R1 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 1) +#define KVM_REG_MIPS_R2 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 2) +#define KVM_REG_MIPS_R3 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 3) +#define KVM_REG_MIPS_R4 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 4) +#define KVM_REG_MIPS_R5 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 5) +#define KVM_REG_MIPS_R6 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 6) +#define KVM_REG_MIPS_R7 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 7) +#define KVM_REG_MIPS_R8 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 8) +#define KVM_REG_MIPS_R9 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 9) +#define KVM_REG_MIPS_R10 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 10) +#define KVM_REG_MIPS_R11 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 11) +#define KVM_REG_MIPS_R12 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 12) +#define KVM_REG_MIPS_R13 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 13) +#define KVM_REG_MIPS_R14 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 14) +#define KVM_REG_MIPS_R15 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 15) +#define KVM_REG_MIPS_R16 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 16) +#define KVM_REG_MIPS_R17 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 17) +#define KVM_REG_MIPS_R18 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 18) +#define KVM_REG_MIPS_R19 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 19) +#define KVM_REG_MIPS_R20 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 20) +#define KVM_REG_MIPS_R21 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 21) +#define KVM_REG_MIPS_R22 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 22) +#define KVM_REG_MIPS_R23 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 23) +#define KVM_REG_MIPS_R24 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 24) +#define KVM_REG_MIPS_R25 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 25) +#define KVM_REG_MIPS_R26 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 26) +#define KVM_REG_MIPS_R27 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 27) +#define KVM_REG_MIPS_R28 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 28) +#define KVM_REG_MIPS_R29 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 29) +#define KVM_REG_MIPS_R30 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 30) +#define KVM_REG_MIPS_R31 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 31) + +#define KVM_REG_MIPS_HI (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 32) +#define KVM_REG_MIPS_LO (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 33) +#define KVM_REG_MIPS_PC (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 34) + + +/* + * KVM_REG_MIPS_KVM - KVM specific control registers. + */ /* * CP0_Count control @@ -118,8 +126,7 @@ struct kvm_fpu { * safely without losing time or guest timer interrupts. * Other: Reserved, do not change. */ -#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ - 0x20000 | 0) +#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 0) #define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001 /* @@ -131,15 +138,46 @@ struct kvm_fpu { * emulated. * Modifications to times in the future are rejected. */ -#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ - 0x20000 | 1) +#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 1) /* * CP0_Count rate in Hz * Specifies the rate of the CP0_Count timer in Hz. Modifications occur without * discontinuities in CP0_Count. */ -#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ - 0x20000 | 2) +#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 2) + + +/* + * KVM_REG_MIPS_FPU - Floating Point and MIPS SIMD Architecture (MSA) registers. + * + * bits[15..8] - Register subset (see definitions below). + * bits[7..5] - Must be zero. + * bits[4..0] - Register number within register subset. + */ + +#define KVM_REG_MIPS_FPR (KVM_REG_MIPS_FPU | 0x0000000000000000ULL) +#define KVM_REG_MIPS_FCR (KVM_REG_MIPS_FPU | 0x0000000000000100ULL) +#define KVM_REG_MIPS_MSACR (KVM_REG_MIPS_FPU | 0x0000000000000200ULL) + +/* + * KVM_REG_MIPS_FPR - Floating point / Vector registers. + */ +#define KVM_REG_MIPS_FPR_32(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U32 | (n)) +#define KVM_REG_MIPS_FPR_64(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U64 | (n)) +#define KVM_REG_MIPS_VEC_128(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U128 | (n)) + +/* + * KVM_REG_MIPS_FCR - Floating point control registers. + */ +#define KVM_REG_MIPS_FCR_IR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 0) +#define KVM_REG_MIPS_FCR_CSR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 31) + +/* + * KVM_REG_MIPS_MSACR - MIPS SIMD Architecture (MSA) control registers. + */ +#define KVM_REG_MIPS_MSA_IR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 0) +#define KVM_REG_MIPS_MSA_CSR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 1) + /* * KVM MIPS specific structures and definitions diff --git a/linux-headers/asm-s390/kvm.h b/linux-headers/asm-s390/kvm.h index c5a93eb0bc..512d8f1d4f 100644 --- a/linux-headers/asm-s390/kvm.h +++ b/linux-headers/asm-s390/kvm.h @@ -150,6 +150,7 @@ struct kvm_guest_debug_arch { #define KVM_SYNC_CRS (1UL << 3) #define KVM_SYNC_ARCH0 (1UL << 4) #define KVM_SYNC_PFAULT (1UL << 5) +#define KVM_SYNC_VRS (1UL << 6) /* definition of registers in kvm_run */ struct kvm_sync_regs { __u64 prefix; /* prefix register */ @@ -164,6 +165,9 @@ struct kvm_sync_regs { __u64 pft; /* pfault token [PFAULT] */ __u64 pfs; /* pfault select [PFAULT] */ __u64 pfc; /* pfault compare [PFAULT] */ + __u64 vrs[32][2]; /* vector registers */ + __u8 reserved[512]; /* for future vector expansion */ + __u32 fpc; /* only valid with vector registers */ }; #define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) diff --git a/linux-headers/asm-x86/hyperv.h b/linux-headers/asm-x86/hyperv.h index 90c458e66e..ce6068dbcf 100644 --- a/linux-headers/asm-x86/hyperv.h +++ b/linux-headers/asm-x86/hyperv.h @@ -225,6 +225,8 @@ #define HV_STATUS_INVALID_HYPERCALL_CODE 2 #define HV_STATUS_INVALID_HYPERCALL_INPUT 3 #define HV_STATUS_INVALID_ALIGNMENT 4 +#define HV_STATUS_INSUFFICIENT_MEMORY 11 +#define HV_STATUS_INVALID_CONNECTION_ID 18 #define HV_STATUS_INSUFFICIENT_BUFFERS 19 typedef struct _HV_REFERENCE_TSC_PAGE { diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h index 60a54c82a3..b96d9787dd 100644 --- a/linux-headers/linux/kvm.h +++ b/linux-headers/linux/kvm.h @@ -147,6 +147,16 @@ struct kvm_pit_config { #define KVM_PIT_SPEAKER_DUMMY 1 +struct kvm_s390_skeys { + __u64 start_gfn; + __u64 count; + __u64 skeydata_addr; + __u32 flags; + __u32 reserved[9]; +}; +#define KVM_S390_GET_SKEYS_NONE 1 +#define KVM_S390_SKEYS_MAX 1048576 + #define KVM_EXIT_UNKNOWN 0 #define KVM_EXIT_EXCEPTION 1 #define KVM_EXIT_IO 2 @@ -172,6 +182,7 @@ struct kvm_pit_config { #define KVM_EXIT_S390_TSCH 22 #define KVM_EXIT_EPR 23 #define KVM_EXIT_SYSTEM_EVENT 24 +#define KVM_EXIT_S390_STSI 25 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -309,6 +320,15 @@ struct kvm_run { __u32 type; __u64 flags; } system_event; + /* KVM_EXIT_S390_STSI */ + struct { + __u64 addr; + __u8 ar; + __u8 reserved; + __u8 fc; + __u8 sel1; + __u16 sel2; + } s390_stsi; /* Fix the size of the union. */ char padding[256]; }; @@ -324,7 +344,7 @@ struct kvm_run { __u64 kvm_dirty_regs; union { struct kvm_sync_regs regs; - char padding[1024]; + char padding[2048]; } s; }; @@ -365,6 +385,24 @@ struct kvm_translation { __u8 pad[5]; }; +/* for KVM_S390_MEM_OP */ +struct kvm_s390_mem_op { + /* in */ + __u64 gaddr; /* the guest address */ + __u64 flags; /* flags */ + __u32 size; /* amount of bytes */ + __u32 op; /* type of operation */ + __u64 buf; /* buffer in userspace */ + __u8 ar; /* the access register number */ + __u8 reserved[31]; /* should be set to 0 */ +}; +/* types for kvm_s390_mem_op->op */ +#define KVM_S390_MEMOP_LOGICAL_READ 0 +#define KVM_S390_MEMOP_LOGICAL_WRITE 1 +/* flags for kvm_s390_mem_op->flags */ +#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0) +#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1) + /* for KVM_INTERRUPT */ struct kvm_interrupt { /* in */ @@ -520,6 +558,13 @@ struct kvm_s390_irq { } u; }; +struct kvm_s390_irq_state { + __u64 buf; + __u32 flags; + __u32 len; + __u32 reserved[4]; +}; + /* for KVM_SET_GUEST_DEBUG */ #define KVM_GUESTDBG_ENABLE 0x00000001 @@ -760,6 +805,15 @@ struct kvm_ppc_smmu_info { #define KVM_CAP_PPC_ENABLE_HCALL 104 #define KVM_CAP_CHECK_EXTENSION_VM 105 #define KVM_CAP_S390_USER_SIGP 106 +#define KVM_CAP_S390_VECTOR_REGISTERS 107 +#define KVM_CAP_S390_MEM_OP 108 +#define KVM_CAP_S390_USER_STSI 109 +#define KVM_CAP_S390_SKEYS 110 +#define KVM_CAP_MIPS_FPU 111 +#define KVM_CAP_MIPS_MSA 112 +#define KVM_CAP_S390_INJECT_IRQ 113 +#define KVM_CAP_S390_IRQ_STATE 114 +#define KVM_CAP_PPC_HWRNG 115 #ifdef KVM_CAP_IRQ_ROUTING @@ -1135,6 +1189,16 @@ struct kvm_s390_ucas_mapping { #define KVM_ARM_VCPU_INIT _IOW(KVMIO, 0xae, struct kvm_vcpu_init) #define KVM_ARM_PREFERRED_TARGET _IOR(KVMIO, 0xaf, struct kvm_vcpu_init) #define KVM_GET_REG_LIST _IOWR(KVMIO, 0xb0, struct kvm_reg_list) +/* Available with KVM_CAP_S390_MEM_OP */ +#define KVM_S390_MEM_OP _IOW(KVMIO, 0xb1, struct kvm_s390_mem_op) +/* Available with KVM_CAP_S390_SKEYS */ +#define KVM_S390_GET_SKEYS _IOW(KVMIO, 0xb2, struct kvm_s390_skeys) +#define KVM_S390_SET_SKEYS _IOW(KVMIO, 0xb3, struct kvm_s390_skeys) +/* Available with KVM_CAP_S390_INJECT_IRQ */ +#define KVM_S390_IRQ _IOW(KVMIO, 0xb4, struct kvm_s390_irq) +/* Available with KVM_CAP_S390_IRQ_STATE */ +#define KVM_S390_SET_IRQ_STATE _IOW(KVMIO, 0xb5, struct kvm_s390_irq_state) +#define KVM_S390_GET_IRQ_STATE _IOW(KVMIO, 0xb6, struct kvm_s390_irq_state) #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h index 95ba870302..0508d0b5d2 100644 --- a/linux-headers/linux/vfio.h +++ b/linux-headers/linux/vfio.h @@ -160,6 +160,8 @@ struct vfio_device_info { __u32 flags; #define VFIO_DEVICE_FLAGS_RESET (1 << 0) /* Device supports reset */ #define VFIO_DEVICE_FLAGS_PCI (1 << 1) /* vfio-pci device */ +#define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */ +#define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */ __u32 num_regions; /* Max region index + 1 */ __u32 num_irqs; /* Max IRQ index + 1 */ }; diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 399c021337..0ba97062b7 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -2887,8 +2887,7 @@ static int write_note_info(struct elf_note_info *info, int fd) return (error); /* write prstatus for each thread */ - for (ets = info->thread_list.tqh_first; ets != NULL; - ets = ets->ets_link.tqe_next) { + QTAILQ_FOREACH(ets, &info->thread_list, ets_link) { if ((error = write_note(&ets->notes[0], fd)) != 0) return (error); } diff --git a/linux-user/main.c b/linux-user/main.c index a8adb0404b..3f32db0afd 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -3934,7 +3934,6 @@ int main(int argc, char **argv, char **envp) #endif } tcg_exec_init(0); - cpu_exec_init_all(); /* NOTE: we need to init the CPU at this stage to get qemu_host_page_size */ cpu = cpu_init(cpu_model); @@ -368,57 +368,84 @@ static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size) } } -static void memory_region_oldmmio_read_accessor(MemoryRegion *mr, +static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + unsigned shift, + uint64_t mask, + MemTxAttrs attrs) +{ + uint64_t tmp; + + tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr); + trace_memory_region_ops_read(mr, addr, tmp, size); + *value |= (tmp & mask) << shift; + return MEMTX_OK; +} + +static MemTxResult memory_region_read_accessor(MemoryRegion *mr, hwaddr addr, uint64_t *value, unsigned size, unsigned shift, - uint64_t mask) + uint64_t mask, + MemTxAttrs attrs) { uint64_t tmp; - tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr); + if (mr->flush_coalesced_mmio) { + qemu_flush_coalesced_mmio_buffer(); + } + tmp = mr->ops->read(mr->opaque, addr, size); trace_memory_region_ops_read(mr, addr, tmp, size); *value |= (tmp & mask) << shift; + return MEMTX_OK; } -static void memory_region_read_accessor(MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - unsigned shift, - uint64_t mask) +static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + unsigned shift, + uint64_t mask, + MemTxAttrs attrs) { - uint64_t tmp; + uint64_t tmp = 0; + MemTxResult r; if (mr->flush_coalesced_mmio) { qemu_flush_coalesced_mmio_buffer(); } - tmp = mr->ops->read(mr->opaque, addr, size); + r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs); trace_memory_region_ops_read(mr, addr, tmp, size); *value |= (tmp & mask) << shift; + return r; } -static void memory_region_oldmmio_write_accessor(MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - unsigned shift, - uint64_t mask) +static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + unsigned shift, + uint64_t mask, + MemTxAttrs attrs) { uint64_t tmp; tmp = (*value >> shift) & mask; trace_memory_region_ops_write(mr, addr, tmp, size); mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp); + return MEMTX_OK; } -static void memory_region_write_accessor(MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - unsigned shift, - uint64_t mask) +static MemTxResult memory_region_write_accessor(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + unsigned shift, + uint64_t mask, + MemTxAttrs attrs) { uint64_t tmp; @@ -428,24 +455,46 @@ static void memory_region_write_accessor(MemoryRegion *mr, tmp = (*value >> shift) & mask; trace_memory_region_ops_write(mr, addr, tmp, size); mr->ops->write(mr->opaque, addr, tmp, size); + return MEMTX_OK; +} + +static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + unsigned shift, + uint64_t mask, + MemTxAttrs attrs) +{ + uint64_t tmp; + + if (mr->flush_coalesced_mmio) { + qemu_flush_coalesced_mmio_buffer(); + } + tmp = (*value >> shift) & mask; + trace_memory_region_ops_write(mr, addr, tmp, size); + return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs); } -static void access_with_adjusted_size(hwaddr addr, +static MemTxResult access_with_adjusted_size(hwaddr addr, uint64_t *value, unsigned size, unsigned access_size_min, unsigned access_size_max, - void (*access)(MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - unsigned shift, - uint64_t mask), - MemoryRegion *mr) + MemTxResult (*access)(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + unsigned shift, + uint64_t mask, + MemTxAttrs attrs), + MemoryRegion *mr, + MemTxAttrs attrs) { uint64_t access_mask; unsigned access_size; unsigned i; + MemTxResult r = MEMTX_OK; if (!access_size_min) { access_size_min = 1; @@ -459,14 +508,16 @@ static void access_with_adjusted_size(hwaddr addr, access_mask = -1ULL >> (64 - access_size * 8); if (memory_region_big_endian(mr)) { for (i = 0; i < size; i += access_size) { - access(mr, addr + i, value, access_size, - (size - access_size - i) * 8, access_mask); + r |= access(mr, addr + i, value, access_size, + (size - access_size - i) * 8, access_mask, attrs); } } else { for (i = 0; i < size; i += access_size) { - access(mr, addr + i, value, access_size, i * 8, access_mask); + r |= access(mr, addr + i, value, access_size, i * 8, + access_mask, attrs); } } + return r; } static AddressSpace *memory_region_to_address_space(MemoryRegion *mr) @@ -1053,62 +1104,82 @@ bool memory_region_access_valid(MemoryRegion *mr, return true; } -static uint64_t memory_region_dispatch_read1(MemoryRegion *mr, - hwaddr addr, - unsigned size) +static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr, + hwaddr addr, + uint64_t *pval, + unsigned size, + MemTxAttrs attrs) { - uint64_t data = 0; + *pval = 0; if (mr->ops->read) { - access_with_adjusted_size(addr, &data, size, - mr->ops->impl.min_access_size, - mr->ops->impl.max_access_size, - memory_region_read_accessor, mr); + return access_with_adjusted_size(addr, pval, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_read_accessor, + mr, attrs); + } else if (mr->ops->read_with_attrs) { + return access_with_adjusted_size(addr, pval, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_read_with_attrs_accessor, + mr, attrs); } else { - access_with_adjusted_size(addr, &data, size, 1, 4, - memory_region_oldmmio_read_accessor, mr); + return access_with_adjusted_size(addr, pval, size, 1, 4, + memory_region_oldmmio_read_accessor, + mr, attrs); } - - return data; } -static bool memory_region_dispatch_read(MemoryRegion *mr, +MemTxResult memory_region_dispatch_read(MemoryRegion *mr, hwaddr addr, uint64_t *pval, - unsigned size) + unsigned size, + MemTxAttrs attrs) { + MemTxResult r; + if (!memory_region_access_valid(mr, addr, size, false)) { *pval = unassigned_mem_read(mr, addr, size); - return true; + return MEMTX_DECODE_ERROR; } - *pval = memory_region_dispatch_read1(mr, addr, size); + r = memory_region_dispatch_read1(mr, addr, pval, size, attrs); adjust_endianness(mr, pval, size); - return false; + return r; } -static bool memory_region_dispatch_write(MemoryRegion *mr, +MemTxResult memory_region_dispatch_write(MemoryRegion *mr, hwaddr addr, uint64_t data, - unsigned size) + unsigned size, + MemTxAttrs attrs) { if (!memory_region_access_valid(mr, addr, size, true)) { unassigned_mem_write(mr, addr, data, size); - return true; + return MEMTX_DECODE_ERROR; } adjust_endianness(mr, &data, size); if (mr->ops->write) { - access_with_adjusted_size(addr, &data, size, - mr->ops->impl.min_access_size, - mr->ops->impl.max_access_size, - memory_region_write_accessor, mr); + return access_with_adjusted_size(addr, &data, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_write_accessor, mr, + attrs); + } else if (mr->ops->write_with_attrs) { + return + access_with_adjusted_size(addr, &data, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_write_with_attrs_accessor, + mr, attrs); } else { - access_with_adjusted_size(addr, &data, size, 1, 4, - memory_region_oldmmio_write_accessor, mr); + return access_with_adjusted_size(addr, &data, size, 1, 4, + memory_region_oldmmio_write_accessor, + mr, attrs); } - return false; } void memory_region_init_io(MemoryRegion *mr, @@ -1452,6 +1523,13 @@ void *memory_region_get_ram_ptr(MemoryRegion *mr) return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK); } +void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp) +{ + assert(mr->terminates); + + qemu_ram_resize(mr->ram_addr, newsize, errp); +} + static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as) { FlatView *view; @@ -1992,17 +2070,6 @@ void address_space_destroy(AddressSpace *as) call_rcu(as, do_address_space_destroy, rcu); } -bool io_mem_read(MemoryRegion *mr, hwaddr addr, uint64_t *pval, unsigned size) -{ - return memory_region_dispatch_read(mr, addr, pval, size); -} - -bool io_mem_write(MemoryRegion *mr, hwaddr addr, - uint64_t val, unsigned size) -{ - return memory_region_dispatch_write(mr, addr, val, size); -} - typedef struct MemoryRegionList MemoryRegionList; struct MemoryRegionList { @@ -2022,7 +2089,7 @@ static void mtree_print_mr(fprintf_function mon_printf, void *f, const MemoryRegion *submr; unsigned int i; - if (!mr || !mr->enabled) { + if (!mr) { return; } @@ -2048,7 +2115,7 @@ static void mtree_print_mr(fprintf_function mon_printf, void *f, } mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx - "-" TARGET_FMT_plx "\n", + "-" TARGET_FMT_plx "%s\n", base + mr->addr, base + mr->addr + (int128_nz(mr->size) ? @@ -2064,10 +2131,11 @@ static void mtree_print_mr(fprintf_function mon_printf, void *f, mr->alias_offset + (int128_nz(mr->size) ? (hwaddr)int128_get64(int128_sub(mr->size, - int128_one())) : 0)); + int128_one())) : 0), + mr->enabled ? "" : " [disabled]"); } else { mon_printf(f, - TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s\n", + TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s%s\n", base + mr->addr, base + mr->addr + (int128_nz(mr->size) ? @@ -2077,7 +2145,8 @@ static void mtree_print_mr(fprintf_function mon_printf, void *f, mr->romd_mode ? 'R' : '-', !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W' : '-', - memory_region_name(mr)); + memory_region_name(mr), + mr->enabled ? "" : " [disabled]"); } QTAILQ_INIT(&submr_print_queue); @@ -2118,15 +2187,16 @@ void mtree_info(fprintf_function mon_printf, void *f) QTAILQ_INIT(&ml_head); QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { - mon_printf(f, "%s\n", as->name); - mtree_print_mr(mon_printf, f, as->root, 0, 0, &ml_head); + mon_printf(f, "address-space: %s\n", as->name); + mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head); + mon_printf(f, "\n"); } - mon_printf(f, "aliases\n"); /* print aliased regions */ QTAILQ_FOREACH(ml, &ml_head, queue) { - mon_printf(f, "%s\n", memory_region_name(ml->mr)); - mtree_print_mr(mon_printf, f, ml->mr, 0, 0, &ml_head); + mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr)); + mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head); + mon_printf(f, "\n"); } QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) { diff --git a/migration/block.c b/migration/block.c index 085c0fae05..ddb59ccf87 100644 --- a/migration/block.c +++ b/migration/block.c @@ -304,7 +304,7 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds) blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov, nr_sectors, blk_mig_read_cb, blk); - bdrv_reset_dirty_bitmap(bs, bmds->dirty_bitmap, cur_sector, nr_sectors); + bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector, nr_sectors); qemu_mutex_unlock_iothread(); bmds->cur_sector = cur_sector + nr_sectors; @@ -320,7 +320,7 @@ static int set_dirty_tracking(void) QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { bmds->dirty_bitmap = bdrv_create_dirty_bitmap(bmds->bs, BLOCK_SIZE, - NULL); + NULL, NULL); if (!bmds->dirty_bitmap) { ret = -errno; goto fail; @@ -497,8 +497,7 @@ static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds, g_free(blk); } - bdrv_reset_dirty_bitmap(bmds->bs, bmds->dirty_bitmap, sector, - nr_sectors); + bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, sector, nr_sectors); break; } sector += BDRV_SECTORS_PER_DIRTY_CHUNK; @@ -584,7 +583,7 @@ static int64_t get_remaining_dirty(void) int64_t dirty = 0; QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { - dirty += bdrv_get_dirty_count(bmds->bs, bmds->dirty_bitmap); + dirty += bdrv_get_dirty_count(bmds->dirty_bitmap); } return dirty << BDRV_SECTOR_BITS; diff --git a/migration/migration.c b/migration/migration.c index bc424907f3..732d229708 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -33,6 +33,14 @@ #define BUFFER_DELAY 100 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY) +/* Default compression thread count */ +#define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8 +/* Default decompression thread count, usually decompression is at + * least 4 times as fast as compression.*/ +#define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2 +/*0: means nocompress, 1: best speed, ... 9: best compress ratio */ +#define DEFAULT_MIGRATE_COMPRESS_LEVEL 1 + /* Migration XBZRLE default cache size */ #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024) @@ -52,6 +60,12 @@ MigrationState *migrate_get_current(void) .bandwidth_limit = MAX_THROTTLE, .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE, .mbps = -1, + .parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = + DEFAULT_MIGRATE_COMPRESS_LEVEL, + .parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = + DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT, + .parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] = + DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT, }; return ¤t_migration; @@ -106,6 +120,7 @@ static void process_incoming_migration_co(void *opaque) free_xbzrle_decoded_buf(); if (ret < 0) { error_report("load of migration failed: %s", strerror(-ret)); + migrate_decompress_threads_join(); exit(EXIT_FAILURE); } qemu_announce_self(); @@ -114,6 +129,7 @@ static void process_incoming_migration_co(void *opaque) bdrv_invalidate_cache_all(&local_err); if (local_err) { error_report_err(local_err); + migrate_decompress_threads_join(); exit(EXIT_FAILURE); } @@ -122,6 +138,7 @@ static void process_incoming_migration_co(void *opaque) } else { runstate_set(RUN_STATE_PAUSED); } + migrate_decompress_threads_join(); } void process_incoming_migration(QEMUFile *f) @@ -130,6 +147,7 @@ void process_incoming_migration(QEMUFile *f) int fd = qemu_get_fd(f); assert(fd != -1); + migrate_decompress_threads_create(); qemu_set_nonblock(fd); qemu_coroutine_enter(co, f); } @@ -170,6 +188,21 @@ MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp) return head; } +MigrationParameters *qmp_query_migrate_parameters(Error **errp) +{ + MigrationParameters *params; + MigrationState *s = migrate_get_current(); + + params = g_malloc0(sizeof(*params)); + params->compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL]; + params->compress_threads = + s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS]; + params->decompress_threads = + s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS]; + + return params; +} + static void get_xbzrle_cache_stats(MigrationInfo *info) { if (migrate_use_xbzrle()) { @@ -283,6 +316,47 @@ void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params, } } +void qmp_migrate_set_parameters(bool has_compress_level, + int64_t compress_level, + bool has_compress_threads, + int64_t compress_threads, + bool has_decompress_threads, + int64_t decompress_threads, Error **errp) +{ + MigrationState *s = migrate_get_current(); + + if (has_compress_level && (compress_level < 0 || compress_level > 9)) { + error_set(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", + "is invalid, it should be in the range of 0 to 9"); + return; + } + if (has_compress_threads && + (compress_threads < 1 || compress_threads > 255)) { + error_set(errp, QERR_INVALID_PARAMETER_VALUE, + "compress_threads", + "is invalid, it should be in the range of 1 to 255"); + return; + } + if (has_decompress_threads && + (decompress_threads < 1 || decompress_threads > 255)) { + error_set(errp, QERR_INVALID_PARAMETER_VALUE, + "decompress_threads", + "is invalid, it should be in the range of 1 to 255"); + return; + } + + if (has_compress_level) { + s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level; + } + if (has_compress_threads) { + s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = compress_threads; + } + if (has_decompress_threads) { + s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] = + decompress_threads; + } +} + /* shared migration helpers */ static void migrate_set_state(MigrationState *s, int old_state, int new_state) @@ -305,6 +379,7 @@ static void migrate_fd_cleanup(void *opaque) qemu_thread_join(&s->thread); qemu_mutex_lock_iothread(); + migrate_compress_threads_join(); qemu_fclose(s->file); s->file = NULL; } @@ -390,6 +465,11 @@ static MigrationState *migrate_init(const MigrationParams *params) int64_t bandwidth_limit = s->bandwidth_limit; bool enabled_capabilities[MIGRATION_CAPABILITY_MAX]; int64_t xbzrle_cache_size = s->xbzrle_cache_size; + int compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL]; + int compress_thread_count = + s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS]; + int decompress_thread_count = + s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS]; memcpy(enabled_capabilities, s->enabled_capabilities, sizeof(enabled_capabilities)); @@ -400,6 +480,11 @@ static MigrationState *migrate_init(const MigrationParams *params) sizeof(enabled_capabilities)); s->xbzrle_cache_size = xbzrle_cache_size; + s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level; + s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = + compress_thread_count; + s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] = + decompress_thread_count; s->bandwidth_limit = bandwidth_limit; s->state = MIGRATION_STATUS_SETUP; trace_migrate_set_state(MIGRATION_STATUS_SETUP); @@ -587,6 +672,42 @@ bool migrate_zero_blocks(void) return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS]; } +bool migrate_use_compression(void) +{ + MigrationState *s; + + s = migrate_get_current(); + + return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS]; +} + +int migrate_compress_level(void) +{ + MigrationState *s; + + s = migrate_get_current(); + + return s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL]; +} + +int migrate_compress_threads(void) +{ + MigrationState *s; + + s = migrate_get_current(); + + return s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS]; +} + +int migrate_decompress_threads(void) +{ + MigrationState *s; + + s = migrate_get_current(); + + return s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS]; +} + int migrate_use_xbzrle(void) { MigrationState *s; @@ -730,6 +851,7 @@ void migrate_fd_connect(MigrationState *s) /* Notify before starting migration thread */ notifier_list_notify(&migration_state_notifiers, s); + migrate_compress_threads_create(); qemu_thread_create(&s->thread, "migration", migration_thread, s, QEMU_THREAD_JOINABLE); } diff --git a/migration/qemu-file.c b/migration/qemu-file.c index 1a4f9868ed..2750365a7e 100644 --- a/migration/qemu-file.c +++ b/migration/qemu-file.c @@ -21,6 +21,7 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ +#include <zlib.h> #include "qemu-common.h" #include "qemu/iov.h" #include "qemu/sockets.h" @@ -546,3 +547,41 @@ uint64_t qemu_get_be64(QEMUFile *f) v |= qemu_get_be32(f); return v; } + +/* compress size bytes of data start at p with specific compression + * level and store the compressed data to the buffer of f. + */ + +ssize_t qemu_put_compression_data(QEMUFile *f, const uint8_t *p, size_t size, + int level) +{ + ssize_t blen = IO_BUF_SIZE - f->buf_index - sizeof(int32_t); + + if (blen < compressBound(size)) { + return 0; + } + if (compress2(f->buf + f->buf_index + sizeof(int32_t), (uLongf *)&blen, + (Bytef *)p, size, level) != Z_OK) { + error_report("Compress Failed!"); + return 0; + } + qemu_put_be32(f, blen); + f->buf_index += blen; + return blen + sizeof(int32_t); +} + +/* Put the data in the buffer of f_src to the buffer of f_des, and + * then reset the buf_index of f_src to 0. + */ + +int qemu_put_qemu_file(QEMUFile *f_des, QEMUFile *f_src) +{ + int len = 0; + + if (f_src->buf_index > 0) { + len = f_src->buf_index; + qemu_put_buffer(f_des, f_src->buf, f_src->buf_index); + f_src->buf_index = 0; + } + return len; +} @@ -1086,7 +1086,7 @@ static void hmp_info_trace_events(Monitor *mon, const QDict *qdict) } static int client_migrate_info(Monitor *mon, const QDict *qdict, - MonitorCompletion cb, void *opaque) + QObject **ret_data) { const char *protocol = qdict_get_str(qdict, "protocol"); const char *hostname = qdict_get_str(qdict, "hostname"); @@ -1108,8 +1108,7 @@ static int client_migrate_info(Monitor *mon, const QDict *qdict, return -1; } - ret = qemu_spice_migrate_info(hostname, port, tls_port, subject, - cb, opaque); + ret = qemu_spice_migrate_info(hostname, port, tls_port, subject); if (ret != 0) { qerror_report(QERR_UNDEFINED_ERROR); return -1; @@ -1385,7 +1384,8 @@ static void hmp_sum(Monitor *mon, const QDict *qdict) sum = 0; for(addr = start; addr < (start + size); addr++) { - uint8_t val = ldub_phys(&address_space_memory, addr); + uint8_t val = address_space_ldub(&address_space_memory, addr, + MEMTXATTRS_UNSPECIFIED, NULL); /* BSD sum algorithm ('sum' Unix command) */ sum = (sum >> 1) | (sum << 15); sum += val; @@ -2859,6 +2859,13 @@ static mon_cmd_t info_cmds[] = { .mhandler.cmd = hmp_info_migrate_capabilities, }, { + .name = "migrate_parameters", + .args_type = "", + .params = "", + .help = "show current migration parameters", + .mhandler.cmd = hmp_info_migrate_parameters, + }, + { .name = "migrate_cache_size", .args_type = "", .params = "", @@ -4390,14 +4397,6 @@ static void ringbuf_completion(ReadLineState *rs, const char *str) qapi_free_ChardevInfoList(start); } -void ringbuf_read_completion(ReadLineState *rs, int nb_args, const char *str) -{ - if (nb_args != 2) { - return; - } - ringbuf_completion(rs, str); -} - void ringbuf_write_completion(ReadLineState *rs, int nb_args, const char *str) { if (nb_args != 2) { @@ -4549,6 +4548,24 @@ void migrate_set_capability_completion(ReadLineState *rs, int nb_args, } } +void migrate_set_parameter_completion(ReadLineState *rs, int nb_args, + const char *str) +{ + size_t len; + + len = strlen(str); + readline_set_completion_index(rs, len); + if (nb_args == 2) { + int i; + for (i = 0; i < MIGRATION_PARAMETER_MAX; i++) { + const char *name = MigrationParameter_lookup[i]; + if (!strncmp(str, name, len)) { + readline_add_completion(rs, name); + } + } + } +} + void host_net_add_completion(ReadLineState *rs, int nb_args, const char *str) { int i; @@ -5395,11 +5412,6 @@ static void bdrv_password_cb(void *opaque, const char *password, monitor_read_command(mon, 1); } -ReadLineState *monitor_get_rs(Monitor *mon) -{ - return mon->rs; -} - int monitor_read_bdrv_key_start(Monitor *mon, BlockDriverState *bs, BlockCompletionFunc *completion_cb, void *opaque) @@ -86,6 +86,59 @@ #define NBD_OPT_ABORT (2) #define NBD_OPT_LIST (3) +/* NBD errors are based on errno numbers, so there is a 1:1 mapping, + * but only a limited set of errno values is specified in the protocol. + * Everything else is squashed to EINVAL. + */ +#define NBD_SUCCESS 0 +#define NBD_EPERM 1 +#define NBD_EIO 5 +#define NBD_ENOMEM 12 +#define NBD_EINVAL 22 +#define NBD_ENOSPC 28 + +static int system_errno_to_nbd_errno(int err) +{ + switch (err) { + case 0: + return NBD_SUCCESS; + case EPERM: + return NBD_EPERM; + case EIO: + return NBD_EIO; + case ENOMEM: + return NBD_ENOMEM; +#ifdef EDQUOT + case EDQUOT: +#endif + case EFBIG: + case ENOSPC: + return NBD_ENOSPC; + case EINVAL: + default: + return NBD_EINVAL; + } +} + +static int nbd_errno_to_system_errno(int err) +{ + switch (err) { + case NBD_SUCCESS: + return 0; + case NBD_EPERM: + return EPERM; + case NBD_EIO: + return EIO; + case NBD_ENOMEM: + return ENOMEM; + case NBD_ENOSPC: + return ENOSPC; + case NBD_EINVAL: + default: + return EINVAL; + } +} + /* Definitions for opaque data types */ typedef struct NBDRequest NBDRequest; @@ -681,7 +734,7 @@ int nbd_init(int fd, int csock, uint32_t flags, off_t size) TRACE("Setting size to %zd block(s)", (size_t)(size / BDRV_SECTOR_SIZE)); - if (ioctl(fd, NBD_SET_SIZE_BLOCKS, size / (size_t)BDRV_SECTOR_SIZE) < 0) { + if (ioctl(fd, NBD_SET_SIZE_BLOCKS, (size_t)(size / BDRV_SECTOR_SIZE)) < 0) { int serrno = errno; LOG("Failed setting size (in blocks)"); return -serrno; @@ -856,6 +909,8 @@ ssize_t nbd_receive_reply(int csock, struct nbd_reply *reply) reply->error = be32_to_cpup((uint32_t*)(buf + 4)); reply->handle = be64_to_cpup((uint64_t*)(buf + 8)); + reply->error = nbd_errno_to_system_errno(reply->error); + TRACE("Got reply: " "{ magic = 0x%x, .error = %d, handle = %" PRIu64" }", magic, reply->error, reply->handle); @@ -872,6 +927,8 @@ static ssize_t nbd_send_reply(int csock, struct nbd_reply *reply) uint8_t buf[NBD_REPLY_SIZE]; ssize_t ret; + reply->error = system_errno_to_nbd_errno(reply->error); + /* Reply [ 0 .. 3] magic (NBD_REPLY_MAGIC) [ 4 .. 7] error (0 == no error) diff --git a/qapi-schema.json b/qapi-schema.json index ac9594d66d..9c92482898 100644 --- a/qapi-schema.json +++ b/qapi-schema.json @@ -71,7 +71,7 @@ # # Since 0.14.0 ## -{ 'type': 'NameInfo', 'data': {'*name': 'str'} } +{ 'struct': 'NameInfo', 'data': {'*name': 'str'} } ## # @query-name: @@ -95,7 +95,7 @@ # # Since: 0.14.0 ## -{ 'type': 'KvmInfo', 'data': {'enabled': 'bool', 'present': 'bool'} } +{ 'struct': 'KvmInfo', 'data': {'enabled': 'bool', 'present': 'bool'} } ## # @query-kvm: @@ -170,7 +170,7 @@ # # Notes: @singlestep is enabled through the GDB stub ## -{ 'type': 'StatusInfo', +{ 'struct': 'StatusInfo', 'data': {'running': 'bool', 'singlestep': 'bool', 'status': 'RunState'} } ## @@ -195,7 +195,7 @@ # # Notes: If no UUID was specified for the guest, a null UUID is returned. ## -{ 'type': 'UuidInfo', 'data': {'UUID': 'str'} } +{ 'struct': 'UuidInfo', 'data': {'UUID': 'str'} } ## # @query-uuid: @@ -226,7 +226,7 @@ # # Since: 0.14.0 ## -{ 'type': 'ChardevInfo', 'data': {'label': 'str', +{ 'struct': 'ChardevInfo', 'data': {'label': 'str', 'filename': 'str', 'frontend-open': 'bool'} } @@ -250,7 +250,7 @@ # # Since: 2.0 ## -{ 'type': 'ChardevBackendInfo', 'data': {'name': 'str'} } +{ 'struct': 'ChardevBackendInfo', 'data': {'name': 'str'} } ## # @query-chardev-backends: @@ -339,7 +339,7 @@ # # Since: 1.2.0 ## -{ 'type': 'EventInfo', 'data': {'name': 'str'} } +{ 'struct': 'EventInfo', 'data': {'name': 'str'} } ## # @query-events: @@ -380,7 +380,7 @@ # # Since: 0.14.0 ## -{ 'type': 'MigrationStats', +{ 'struct': 'MigrationStats', 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' , 'duplicate': 'int', 'skipped': 'int', 'normal': 'int', 'normal-bytes': 'int', 'dirty-pages-rate' : 'int', @@ -405,7 +405,7 @@ # # Since: 1.2 ## -{ 'type': 'XBZRLECacheStats', +{ 'struct': 'XBZRLECacheStats', 'data': {'cache-size': 'int', 'bytes': 'int', 'pages': 'int', 'cache-miss': 'int', 'cache-miss-rate': 'number', 'overflow': 'int' } } @@ -476,7 +476,7 @@ # # Since: 0.14.0 ## -{ 'type': 'MigrationInfo', +{ 'struct': 'MigrationInfo', 'data': {'*status': 'MigrationStatus', '*ram': 'MigrationStats', '*disk': 'MigrationStats', '*xbzrle-cache': 'XBZRLECacheStats', @@ -515,13 +515,22 @@ # to enable the capability on the source VM. The feature is disabled by # default. (since 1.6) # +# @compress: Use multiple compression threads to accelerate live migration. +# This feature can help to reduce the migration traffic, by sending +# compressed pages. Please note that if compress and xbzrle are both +# on, compress only takes effect in the ram bulk stage, after that, +# it will be disabled and only xbzrle takes effect, this can help to +# minimize migration traffic. The feature is disabled by default. +# (since 2.4 ) +# # @auto-converge: If enabled, QEMU will automatically throttle down the guest # to speed up convergence of RAM migration. (since 1.6) # # Since: 1.2 ## { 'enum': 'MigrationCapability', - 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks'] } + 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', + 'compress'] } ## # @MigrationCapabilityStatus @@ -534,7 +543,7 @@ # # Since: 1.2 ## -{ 'type': 'MigrationCapabilityStatus', +{ 'struct': 'MigrationCapabilityStatus', 'data': { 'capability' : 'MigrationCapability', 'state' : 'bool' } } ## @@ -560,6 +569,74 @@ ## { 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']} +# @MigrationParameter +# +# Migration parameters enumeration +# +# @compress-level: Set the compression level to be used in live migration, +# the compression level is an integer between 0 and 9, where 0 means +# no compression, 1 means the best compression speed, and 9 means best +# compression ratio which will consume more CPU. +# +# @compress-threads: Set compression thread count to be used in live migration, +# the compression thread count is an integer between 1 and 255. +# +# @decompress-threads: Set decompression thread count to be used in live +# migration, the decompression thread count is an integer between 1 +# and 255. Usually, decompression is at least 4 times as fast as +# compression, so set the decompress-threads to the number about 1/4 +# of compress-threads is adequate. +# +# Since: 2.4 +## +{ 'enum': 'MigrationParameter', + 'data': ['compress-level', 'compress-threads', 'decompress-threads'] } + +# +# @migrate-set-parameters +# +# Set the following migration parameters +# +# @compress-level: compression level +# +# @compress-threads: compression thread count +# +# @decompress-threads: decompression thread count +# +# Since: 2.4 +## +{ 'command': 'migrate-set-parameters', + 'data': { '*compress-level': 'int', + '*compress-threads': 'int', + '*decompress-threads': 'int'} } + +# +# @MigrationParameters +# +# @compress-level: compression level +# +# @compress-threads: compression thread count +# +# @decompress-threads: decompression thread count +# +# Since: 2.4 +## +{ 'struct': 'MigrationParameters', + 'data': { 'compress-level': 'int', + 'compress-threads': 'int', + 'decompress-threads': 'int'} } +## +# @query-migrate-parameters +# +# Returns information about the current migration parameters +# +# Returns: @MigrationParameters +# +# Since: 2.4 +## +{ 'command': 'query-migrate-parameters', + 'returns': 'MigrationParameters' } + ## # @MouseInfo: # @@ -575,7 +652,7 @@ # # Since: 0.14.0 ## -{ 'type': 'MouseInfo', +{ 'struct': 'MouseInfo', 'data': {'name': 'str', 'index': 'int', 'current': 'bool', 'absolute': 'bool'} } @@ -621,7 +698,7 @@ # Notes: @halted is a transient state that changes frequently. By the time the # data is sent to the client, the guest may no longer be halted. ## -{ 'type': 'CpuInfo', +{ 'struct': 'CpuInfo', 'data': {'CPU': 'int', 'current': 'bool', 'halted': 'bool', '*pc': 'int', '*nip': 'int', '*npc': 'int', '*PC': 'int', 'thread_id': 'int'} } @@ -647,7 +724,7 @@ # # Since: 2.0 ## -{ 'type': 'IOThreadInfo', +{ 'struct': 'IOThreadInfo', 'data': {'id': 'str', 'thread-id': 'int'} } ## @@ -700,7 +777,7 @@ # # Since: 2.1 ## -{ 'type': 'VncBasicInfo', +{ 'struct': 'VncBasicInfo', 'data': { 'host': 'str', 'service': 'str', 'family': 'NetworkAddressFamily', @@ -715,7 +792,7 @@ # # Since: 2.1 ## -{ 'type': 'VncServerInfo', +{ 'struct': 'VncServerInfo', 'base': 'VncBasicInfo', 'data': { '*auth': 'str' } } @@ -732,7 +809,7 @@ # # Since: 0.14.0 ## -{ 'type': 'VncClientInfo', +{ 'struct': 'VncClientInfo', 'base': 'VncBasicInfo', 'data': { '*x509_dname': 'str', '*sasl_username': 'str' } } @@ -772,7 +849,7 @@ # # Since: 0.14.0 ## -{ 'type': 'VncInfo', +{ 'struct': 'VncInfo', 'data': {'enabled': 'bool', '*host': 'str', '*family': 'NetworkAddressFamily', '*service': 'str', '*auth': 'str', '*clients': ['VncClientInfo']} } @@ -826,7 +903,7 @@ # # Since: 2.3 ## -{ 'type': 'VncInfo2', +{ 'struct': 'VncInfo2', 'data': { 'id' : 'str', 'server' : ['VncBasicInfo'], 'clients' : ['VncClientInfo'], @@ -869,7 +946,7 @@ # # Since: 2.1 ## -{ 'type': 'SpiceBasicInfo', +{ 'struct': 'SpiceBasicInfo', 'data': { 'host': 'str', 'port': 'str', 'family': 'NetworkAddressFamily' } } @@ -883,7 +960,7 @@ # # Since: 2.1 ## -{ 'type': 'SpiceServerInfo', +{ 'struct': 'SpiceServerInfo', 'base': 'SpiceBasicInfo', 'data': { '*auth': 'str' } } @@ -907,7 +984,7 @@ # # Since: 0.14.0 ## -{ 'type': 'SpiceChannel', +{ 'struct': 'SpiceChannel', 'base': 'SpiceBasicInfo', 'data': {'connection-id': 'int', 'channel-type': 'int', 'channel-id': 'int', 'tls': 'bool'} } @@ -965,7 +1042,7 @@ # # Since: 0.14.0 ## -{ 'type': 'SpiceInfo', +{ 'struct': 'SpiceInfo', 'data': {'enabled': 'bool', 'migrated': 'bool', '*host': 'str', '*port': 'int', '*tls-port': 'int', '*auth': 'str', '*compiled-version': 'str', 'mouse-mode': 'SpiceQueryMouseMode', '*channels': ['SpiceChannel']} } @@ -991,7 +1068,7 @@ # Since: 0.14.0 # ## -{ 'type': 'BalloonInfo', 'data': {'actual': 'int' } } +{ 'struct': 'BalloonInfo', 'data': {'actual': 'int' } } ## # @query-balloon: @@ -1018,7 +1095,7 @@ # # Since: 0.14.0 ## -{ 'type': 'PciMemoryRange', 'data': {'base': 'int', 'limit': 'int'} } +{ 'struct': 'PciMemoryRange', 'data': {'base': 'int', 'limit': 'int'} } ## # @PciMemoryRegion @@ -1036,41 +1113,80 @@ # # Since: 0.14.0 ## -{ 'type': 'PciMemoryRegion', +{ 'struct': 'PciMemoryRegion', 'data': {'bar': 'int', 'type': 'str', 'address': 'int', 'size': 'int', '*prefetch': 'bool', '*mem_type_64': 'bool' } } ## -# @PciBridgeInfo: +# @PciBusInfo: # -# Information about a PCI Bridge device +# Information about a bus of a PCI Bridge device +# +# @number: primary bus interface number. This should be the number of the +# bus the device resides on. # -# @bus.number: primary bus interface number. This should be the number of the -# bus the device resides on. +# @secondary: secondary bus interface number. This is the number of the +# main bus for the bridge # -# @bus.secondary: secondary bus interface number. This is the number of the -# main bus for the bridge +# @subordinate: This is the highest number bus that resides below the +# bridge. # -# @bus.subordinate: This is the highest number bus that resides below the -# bridge. +# @io_range: The PIO range for all devices on this bridge # -# @bus.io_range: The PIO range for all devices on this bridge +# @memory_range: The MMIO range for all devices on this bridge +# +# @prefetchable_range: The range of prefetchable MMIO for all devices on +# this bridge +# +# Since: 2.4 +## +{ 'struct': 'PciBusInfo', + 'data': {'number': 'int', 'secondary': 'int', 'subordinate': 'int', + 'io_range': 'PciMemoryRange', + 'memory_range': 'PciMemoryRange', + 'prefetchable_range': 'PciMemoryRange' } } + +## +# @PciBridgeInfo: # -# @bus.memory_range: The MMIO range for all devices on this bridge +# Information about a PCI Bridge device # -# @bus.prefetchable_range: The range of prefetchable MMIO for all devices on -# this bridge +# @bus: information about the bus the device resides on # # @devices: a list of @PciDeviceInfo for each device on this bridge # # Since: 0.14.0 ## -{ 'type': 'PciBridgeInfo', - 'data': {'bus': { 'number': 'int', 'secondary': 'int', 'subordinate': 'int', - 'io_range': 'PciMemoryRange', - 'memory_range': 'PciMemoryRange', - 'prefetchable_range': 'PciMemoryRange' }, - '*devices': ['PciDeviceInfo']} } +{ 'struct': 'PciBridgeInfo', + 'data': {'bus': 'PciBusInfo', '*devices': ['PciDeviceInfo']} } + +## +# @PciDeviceClass: +# +# Information about the Class of a PCI device +# +# @desc: #optional a string description of the device's class +# +# @class: the class code of the device +# +# Since: 2.4 +## +{ 'struct': 'PciDeviceClass', + 'data': {'*desc': 'str', 'class': 'int'} } + +## +# @PciDeviceId: +# +# Information about the Id of a PCI device +# +# @device: the PCI device id +# +# @vendor: the PCI vendor id +# +# Since: 2.4 +## +{ 'struct': 'PciDeviceId', + 'data': {'device': 'int', 'vendor': 'int'} } ## # @PciDeviceInfo: @@ -1083,13 +1199,9 @@ # # @function: the function of the slot used by the device # -# @class_info.desc: #optional a string description of the device's class -# -# @class_info.class: the class code of the device -# -# @id.device: the PCI device id +# @class_info: the class of the device # -# @id.vendor: the PCI vendor id +# @id: the PCI device id # # @irq: #optional if an IRQ is assigned to the device, the IRQ number # @@ -1104,10 +1216,9 @@ # # Since: 0.14.0 ## -{ 'type': 'PciDeviceInfo', +{ 'struct': 'PciDeviceInfo', 'data': {'bus': 'int', 'slot': 'int', 'function': 'int', - 'class_info': {'*desc': 'str', 'class': 'int'}, - 'id': {'device': 'int', 'vendor': 'int'}, + 'class_info': 'PciDeviceClass', 'id': 'PciDeviceId', '*irq': 'int', 'qdev_id': 'str', '*pci_bridge': 'PciBridgeInfo', 'regions': ['PciMemoryRegion']} } @@ -1122,7 +1233,7 @@ # # Since: 0.14.0 ## -{ 'type': 'PciInfo', 'data': {'bus': 'int', 'devices': ['PciDeviceInfo']} } +{ 'struct': 'PciInfo', 'data': {'bus': 'int', 'devices': ['PciDeviceInfo']} } ## # @query-pci: @@ -1341,7 +1452,7 @@ # # Since: 1.6 ### -{ 'type': 'Abort', +{ 'struct': 'Abort', 'data': { } } ## @@ -1506,7 +1617,7 @@ # # Since: 1.2 ## -{ 'type': 'ObjectPropertyInfo', +{ 'struct': 'ObjectPropertyInfo', 'data': { 'name': 'str', 'type': 'str' } } ## @@ -1561,8 +1672,8 @@ ## { 'command': 'qom-get', 'data': { 'path': 'str', 'property': 'str' }, - 'returns': 'visitor', - 'gen': 'no' } + 'returns': '**', + 'gen': false } ## # @qom-set: @@ -1579,8 +1690,8 @@ # Since: 1.2 ## { 'command': 'qom-set', - 'data': { 'path': 'str', 'property': 'str', 'value': 'visitor' }, - 'gen': 'no' } + 'data': { 'path': 'str', 'property': 'str', 'value': '**' }, + 'gen': false } ## # @set_password: @@ -1691,7 +1802,7 @@ # # Notes: This command is experimental and may change syntax in future releases. ## -{ 'type': 'ObjectTypeInfo', +{ 'struct': 'ObjectTypeInfo', 'data': { 'name': 'str' } } ## @@ -1723,7 +1834,7 @@ # # Since: 1.2 ## -{ 'type': 'DevicePropertyInfo', +{ 'struct': 'DevicePropertyInfo', 'data': { 'name': 'str', 'type': 'str', '*description': 'str' } } ## @@ -1903,7 +2014,7 @@ # # Since: 2.0 ## -{ 'type': 'DumpGuestMemoryCapability', +{ 'struct': 'DumpGuestMemoryCapability', 'data': { 'formats': ['DumpGuestMemoryFormat'] } } @@ -1943,7 +2054,7 @@ ## { 'command': 'netdev_add', 'data': {'type': 'str', 'id': 'str', '*props': '**'}, - 'gen': 'no' } + 'gen': false } ## # @netdev_del: @@ -1976,8 +2087,8 @@ # Since: 2.0 ## { 'command': 'object-add', - 'data': {'qom-type': 'str', 'id': 'str', '*props': 'dict'}, - 'gen': 'no' } + 'data': {'qom-type': 'str', 'id': 'str', '*props': '**'}, + 'gen': false } ## # @object-del: @@ -2000,7 +2111,7 @@ # # Since 1.2 ## -{ 'type': 'NetdevNoneOptions', +{ 'struct': 'NetdevNoneOptions', 'data': { } } ## @@ -2020,7 +2131,7 @@ # # Since 1.2 ## -{ 'type': 'NetLegacyNicOptions', +{ 'struct': 'NetLegacyNicOptions', 'data': { '*netdev': 'str', '*macaddr': 'str', @@ -2035,7 +2146,7 @@ # # Since 1.2 ## -{ 'type': 'String', +{ 'struct': 'String', 'data': { 'str': 'str' } } @@ -2078,7 +2189,7 @@ # # Since 1.2 ## -{ 'type': 'NetdevUserOptions', +{ 'struct': 'NetdevUserOptions', 'data': { '*hostname': 'str', '*restrict': 'bool', @@ -2130,7 +2241,7 @@ # # Since 1.2 ## -{ 'type': 'NetdevTapOptions', +{ 'struct': 'NetdevTapOptions', 'data': { '*ifname': 'str', '*fd': 'str', @@ -2166,7 +2277,7 @@ # # Since 1.2 ## -{ 'type': 'NetdevSocketOptions', +{ 'struct': 'NetdevSocketOptions', 'data': { '*fd': 'str', '*listen': 'str', @@ -2214,7 +2325,7 @@ # # Since 2.1 ## -{ 'type': 'NetdevL2TPv3Options', +{ 'struct': 'NetdevL2TPv3Options', 'data': { 'src': 'str', 'dst': 'str', @@ -2246,7 +2357,7 @@ # # Since 1.2 ## -{ 'type': 'NetdevVdeOptions', +{ 'struct': 'NetdevVdeOptions', 'data': { '*sock': 'str', '*port': 'uint16', @@ -2265,7 +2376,7 @@ # # Since 1.2 ## -{ 'type': 'NetdevDumpOptions', +{ 'struct': 'NetdevDumpOptions', 'data': { '*len': 'size', '*file': 'str' } } @@ -2281,7 +2392,7 @@ # # Since 1.2 ## -{ 'type': 'NetdevBridgeOptions', +{ 'struct': 'NetdevBridgeOptions', 'data': { '*br': 'str', '*helper': 'str' } } @@ -2295,7 +2406,7 @@ # # Since 1.2 ## -{ 'type': 'NetdevHubPortOptions', +{ 'struct': 'NetdevHubPortOptions', 'data': { 'hubid': 'int32' } } @@ -2315,7 +2426,7 @@ # # Since 2.0 ## -{ 'type': 'NetdevNetmapOptions', +{ 'struct': 'NetdevNetmapOptions', 'data': { 'ifname': 'str', '*devname': 'str' } } @@ -2331,7 +2442,7 @@ # # Since 2.1 ## -{ 'type': 'NetdevVhostUserOptions', +{ 'struct': 'NetdevVhostUserOptions', 'data': { 'chardev': 'str', '*vhostforce': 'bool' } } @@ -2376,7 +2487,7 @@ # # Since 1.2 ## -{ 'type': 'NetLegacy', +{ 'struct': 'NetLegacy', 'data': { '*vlan': 'int32', '*id': 'str', @@ -2394,7 +2505,7 @@ # # Since 1.2 ## -{ 'type': 'Netdev', +{ 'struct': 'Netdev', 'data': { 'id': 'str', 'opts': 'NetClientOptions' } } @@ -2418,7 +2529,7 @@ # # Since 1.3 ## -{ 'type': 'InetSocketAddress', +{ 'struct': 'InetSocketAddress', 'data': { 'host': 'str', 'port': 'str', @@ -2435,7 +2546,7 @@ # # Since 1.3 ## -{ 'type': 'UnixSocketAddress', +{ 'struct': 'UnixSocketAddress', 'data': { 'path': 'str' } } @@ -2500,7 +2611,7 @@ # # Since: 1.2.0 ## -{ 'type': 'MachineInfo', +{ 'struct': 'MachineInfo', 'data': { 'name': 'str', '*alias': 'str', '*is-default': 'bool', 'cpu-max': 'int' } } @@ -2524,7 +2635,7 @@ # # Since: 1.2.0 ## -{ 'type': 'CpuDefinitionInfo', +{ 'struct': 'CpuDefinitionInfo', 'data': { 'name': 'str' } } ## @@ -2549,7 +2660,7 @@ # # Since: 1.2.0 ## -{ 'type': 'AddfdInfo', 'data': {'fdset-id': 'int', 'fd': 'int'} } +{ 'struct': 'AddfdInfo', 'data': {'fdset-id': 'int', 'fd': 'int'} } ## # @add-fd: @@ -2605,7 +2716,7 @@ # # Since: 1.2.0 ## -{ 'type': 'FdsetFdInfo', +{ 'struct': 'FdsetFdInfo', 'data': {'fd': 'int', '*opaque': 'str'} } ## @@ -2619,7 +2730,7 @@ # # Since: 1.2.0 ## -{ 'type': 'FdsetInfo', +{ 'struct': 'FdsetInfo', 'data': {'fdset-id': 'int', 'fds': ['FdsetFdInfo']} } ## @@ -2645,7 +2756,7 @@ # # Since: 1.2.0 ## -{ 'type': 'TargetInfo', +{ 'struct': 'TargetInfo', 'data': { 'arch': 'str' } } ## @@ -2745,7 +2856,7 @@ # # Since: 1.4 ## -{ 'type': 'ChardevFile', 'data': { '*in' : 'str', +{ 'struct': 'ChardevFile', 'data': { '*in' : 'str', 'out' : 'str' } } ## @@ -2759,7 +2870,7 @@ # # Since: 1.4 ## -{ 'type': 'ChardevHostdev', 'data': { 'device' : 'str' } } +{ 'struct': 'ChardevHostdev', 'data': { 'device' : 'str' } } ## # @ChardevSocket: @@ -2781,7 +2892,7 @@ # # Since: 1.4 ## -{ 'type': 'ChardevSocket', 'data': { 'addr' : 'SocketAddress', +{ 'struct': 'ChardevSocket', 'data': { 'addr' : 'SocketAddress', '*server' : 'bool', '*wait' : 'bool', '*nodelay' : 'bool', @@ -2798,7 +2909,7 @@ # # Since: 1.5 ## -{ 'type': 'ChardevUdp', 'data': { 'remote' : 'SocketAddress', +{ 'struct': 'ChardevUdp', 'data': { 'remote' : 'SocketAddress', '*local' : 'SocketAddress' } } ## @@ -2810,7 +2921,7 @@ # # Since: 1.5 ## -{ 'type': 'ChardevMux', 'data': { 'chardev' : 'str' } } +{ 'struct': 'ChardevMux', 'data': { 'chardev' : 'str' } } ## # @ChardevStdio: @@ -2823,7 +2934,7 @@ # # Since: 1.5 ## -{ 'type': 'ChardevStdio', 'data': { '*signal' : 'bool' } } +{ 'struct': 'ChardevStdio', 'data': { '*signal' : 'bool' } } ## # @ChardevSpiceChannel: @@ -2834,7 +2945,7 @@ # # Since: 1.5 ## -{ 'type': 'ChardevSpiceChannel', 'data': { 'type' : 'str' } } +{ 'struct': 'ChardevSpiceChannel', 'data': { 'type' : 'str' } } ## # @ChardevSpicePort: @@ -2845,7 +2956,7 @@ # # Since: 1.5 ## -{ 'type': 'ChardevSpicePort', 'data': { 'fqdn' : 'str' } } +{ 'struct': 'ChardevSpicePort', 'data': { 'fqdn' : 'str' } } ## # @ChardevVC: @@ -2859,7 +2970,7 @@ # # Since: 1.5 ## -{ 'type': 'ChardevVC', 'data': { '*width' : 'int', +{ 'struct': 'ChardevVC', 'data': { '*width' : 'int', '*height' : 'int', '*cols' : 'int', '*rows' : 'int' } } @@ -2873,7 +2984,7 @@ # # Since: 1.5 ## -{ 'type': 'ChardevRingbuf', 'data': { '*size' : 'int' } } +{ 'struct': 'ChardevRingbuf', 'data': { '*size' : 'int' } } ## # @ChardevBackend: @@ -2882,7 +2993,7 @@ # # Since: 1.4 (testdev since 2.2) ## -{ 'type': 'ChardevDummy', 'data': { } } +{ 'struct': 'ChardevDummy', 'data': { } } { 'union': 'ChardevBackend', 'data': { 'file' : 'ChardevFile', 'serial' : 'ChardevHostdev', @@ -2915,7 +3026,7 @@ # # Since: 1.4 ## -{ 'type' : 'ChardevReturn', 'data': { '*pty' : 'str' } } +{ 'struct' : 'ChardevReturn', 'data': { '*pty' : 'str' } } ## # @chardev-add: @@ -3002,7 +3113,7 @@ # # Since: 1.5 ## -{ 'type': 'TPMPassthroughOptions', 'data': { '*path' : 'str', +{ 'struct': 'TPMPassthroughOptions', 'data': { '*path' : 'str', '*cancel-path' : 'str'} } ## @@ -3030,7 +3141,7 @@ # # Since: 1.5 ## -{ 'type': 'TPMInfo', +{ 'struct': 'TPMInfo', 'data': {'id': 'str', 'model': 'TpmModel', 'options': 'TpmTypeOptions' } } @@ -3092,7 +3203,7 @@ # # Since 1.5 ## -{ 'type': 'AcpiTableOptions', +{ 'struct': 'AcpiTableOptions', 'data': { '*sig': 'str', '*rev': 'uint8', @@ -3138,7 +3249,7 @@ # # Since 1.5 ## -{ 'type': 'CommandLineParameterInfo', +{ 'struct': 'CommandLineParameterInfo', 'data': { 'name': 'str', 'type': 'CommandLineParameterType', '*help': 'str', @@ -3155,7 +3266,7 @@ # # Since 1.5 ## -{ 'type': 'CommandLineOptionInfo', +{ 'struct': 'CommandLineOptionInfo', 'data': { 'option': 'str', 'parameters': ['CommandLineParameterInfo'] } } ## @@ -3199,7 +3310,7 @@ # # Since: 1.5 ## -{ 'type': 'X86CPUFeatureWordInfo', +{ 'struct': 'X86CPUFeatureWordInfo', 'data': { 'cpuid-input-eax': 'int', '*cpuid-input-ecx': 'int', 'cpuid-register': 'X86CPURegister32', @@ -3252,7 +3363,7 @@ # Since 1.6 ## -{ 'type': 'RxFilterInfo', +{ 'struct': 'RxFilterInfo', 'data': { 'name': 'str', 'promiscuous': 'bool', @@ -3314,7 +3425,7 @@ # # Since: 2.0 ## -{ 'type' : 'InputKeyEvent', +{ 'struct' : 'InputKeyEvent', 'data' : { 'key' : 'KeyValue', 'down' : 'bool' } } @@ -3328,7 +3439,7 @@ # # Since: 2.0 ## -{ 'type' : 'InputBtnEvent', +{ 'struct' : 'InputBtnEvent', 'data' : { 'button' : 'InputButton', 'down' : 'bool' } } @@ -3343,7 +3454,7 @@ # # Since: 2.0 ## -{ 'type' : 'InputMoveEvent', +{ 'struct' : 'InputMoveEvent', 'data' : { 'axis' : 'InputAxis', 'value' : 'int' } } @@ -3426,7 +3537,7 @@ # # Since: 2.1 ## -{ 'type': 'NumaNodeOptions', +{ 'struct': 'NumaNodeOptions', 'data': { '*nodeid': 'uint16', '*cpus': ['uint16'], @@ -3473,7 +3584,7 @@ # Since: 2.1 ## -{ 'type': 'Memdev', +{ 'struct': 'Memdev', 'data': { 'size': 'size', 'merge': 'bool', @@ -3516,7 +3627,7 @@ # # Since: 2.1 ## -{ 'type': 'PCDIMMDeviceInfo', +{ 'struct': 'PCDIMMDeviceInfo', 'data': { '*id': 'str', 'addr': 'int', 'size': 'int', @@ -3570,7 +3681,7 @@ # # Since: 2.1 ## -{ 'type': 'ACPIOSTInfo', +{ 'struct': 'ACPIOSTInfo', 'data' : { '*device': 'str', 'slot': 'str', 'slot-type': 'ACPISlotType', diff --git a/qapi/block-core.json b/qapi/block-core.json index 78730846c2..863ffea0c7 100644 --- a/qapi/block-core.json +++ b/qapi/block-core.json @@ -26,7 +26,7 @@ # ## -{ 'type': 'SnapshotInfo', +{ 'struct': 'SnapshotInfo', 'data': { 'id': 'str', 'name': 'str', 'vm-state-size': 'int', 'date-sec': 'int', 'date-nsec': 'int', 'vm-clock-sec': 'int', 'vm-clock-nsec': 'int' } } @@ -45,7 +45,7 @@ # # Since: 1.7 ## -{ 'type': 'ImageInfoSpecificQCow2', +{ 'struct': 'ImageInfoSpecificQCow2', 'data': { 'compat': 'str', '*lazy-refcounts': 'bool', @@ -66,7 +66,7 @@ # # Since: 1.7 ## -{ 'type': 'ImageInfoSpecificVmdk', +{ 'struct': 'ImageInfoSpecificVmdk', 'data': { 'create-type': 'str', 'cid': 'int', @@ -126,7 +126,7 @@ # ## -{ 'type': 'ImageInfo', +{ 'struct': 'ImageInfo', 'data': {'filename': 'str', 'format': 'str', '*dirty-flag': 'bool', '*actual-size': 'int', 'virtual-size': 'int', '*cluster-size': 'int', '*encrypted': 'bool', '*compressed': 'bool', @@ -178,7 +178,7 @@ # ## -{ 'type': 'ImageCheck', +{ 'struct': 'ImageCheck', 'data': {'filename': 'str', 'format': 'str', 'check-errors': 'int', '*image-end-offset': 'int', '*corruptions': 'int', '*leaks': 'int', '*corruptions-fixed': 'int', '*leaks-fixed': 'int', @@ -196,7 +196,7 @@ # # Since: 2.3 ## -{ 'type': 'BlockdevCacheInfo', +{ 'struct': 'BlockdevCacheInfo', 'data': { 'writeback': 'bool', 'direct': 'bool', 'no-flush': 'bool' } } @@ -267,7 +267,7 @@ # Since: 0.14.0 # ## -{ 'type': 'BlockDeviceInfo', +{ 'struct': 'BlockDeviceInfo', 'data': { 'file': 'str', '*node-name': 'str', 'ro': 'bool', 'drv': 'str', '*backing_file': 'str', 'backing_file_depth': 'int', 'encrypted': 'bool', 'encryption_key_missing': 'bool', @@ -321,7 +321,7 @@ # # Since 1.7 ## -{ 'type': 'BlockDeviceMapEntry', +{ 'struct': 'BlockDeviceMapEntry', 'data': { 'start': 'int', 'length': 'int', 'depth': 'int', 'zero': 'bool', 'data': 'bool', '*offset': 'int' } } @@ -330,14 +330,19 @@ # # Block dirty bitmap information. # +# @name: #optional the name of the dirty bitmap (Since 2.4) +# # @count: number of dirty bytes according to the dirty bitmap # # @granularity: granularity of the dirty bitmap in bytes (since 1.4) # +# @frozen: whether the dirty bitmap is frozen (Since 2.4) +# # Since: 1.3 ## -{ 'type': 'BlockDirtyInfo', - 'data': {'count': 'int', 'granularity': 'int'} } +{ 'struct': 'BlockDirtyInfo', + 'data': {'*name': 'str', 'count': 'int', 'granularity': 'uint32', + 'frozen': 'bool'} } ## # @BlockInfo: @@ -370,7 +375,7 @@ # # Since: 0.14.0 ## -{ 'type': 'BlockInfo', +{ 'struct': 'BlockInfo', 'data': {'device': 'str', 'type': 'str', 'removable': 'bool', 'locked': 'bool', '*inserted': 'BlockDeviceInfo', '*tray_open': 'bool', '*io-status': 'BlockDeviceIoStatus', @@ -423,7 +428,7 @@ # # Since: 0.14.0 ## -{ 'type': 'BlockDeviceStats', +{ 'struct': 'BlockDeviceStats', 'data': {'rd_bytes': 'int', 'wr_bytes': 'int', 'rd_operations': 'int', 'wr_operations': 'int', 'flush_operations': 'int', 'flush_total_time_ns': 'int', 'wr_total_time_ns': 'int', @@ -449,7 +454,7 @@ # # Since: 0.14.0 ## -{ 'type': 'BlockStats', +{ 'struct': 'BlockStats', 'data': {'*device': 'str', '*node-name': 'str', 'stats': 'BlockDeviceStats', '*parent': 'BlockStats', @@ -510,10 +515,12 @@ # # @none: only copy data written from now on # +# @dirty-bitmap: only copy data described by the dirty bitmap. Since: 2.4 +# # Since: 1.3 ## { 'enum': 'MirrorSyncMode', - 'data': ['top', 'full', 'none'] } + 'data': ['top', 'full', 'none', 'dirty-bitmap'] } ## # @BlockJobType: @@ -560,7 +567,7 @@ # # Since: 1.1 ## -{ 'type': 'BlockJobInfo', +{ 'struct': 'BlockJobInfo', 'data': {'type': 'str', 'device': 'str', 'len': 'int', 'offset': 'int', 'busy': 'bool', 'paused': 'bool', 'speed': 'int', 'io-status': 'BlockDeviceIoStatus', 'ready': 'bool'} } @@ -670,7 +677,7 @@ # @mode: #optional whether and how QEMU should create a new image, default is # 'absolute-paths'. ## -{ 'type': 'BlockdevSnapshot', +{ 'struct': 'BlockdevSnapshot', 'data': { '*device': 'str', '*node-name': 'str', 'snapshot-file': 'str', '*snapshot-node-name': 'str', '*format': 'str', '*mode': 'NewImageMode' } } @@ -688,14 +695,18 @@ # probe if @mode is 'existing', else the format of the source # # @sync: what parts of the disk image should be copied to the destination -# (all the disk, only the sectors allocated in the topmost image, or -# only new I/O). +# (all the disk, only the sectors allocated in the topmost image, from a +# dirty bitmap, or only new I/O). # # @mode: #optional whether and how QEMU should create a new image, default is # 'absolute-paths'. # # @speed: #optional the maximum speed, in bytes per second # +# @bitmap: #optional the name of dirty bitmap if sync is "dirty-bitmap". +# Must be present if sync is "dirty-bitmap", must NOT be present +# otherwise. (Since 2.4) +# # @on-source-error: #optional the action to take on an error on the source, # default 'report'. 'stop' and 'enospc' can only be used # if the block device supports io-status (see BlockInfo). @@ -710,10 +721,10 @@ # # Since: 1.6 ## -{ 'type': 'DriveBackup', +{ 'struct': 'DriveBackup', 'data': { 'device': 'str', 'target': 'str', '*format': 'str', 'sync': 'MirrorSyncMode', '*mode': 'NewImageMode', - '*speed': 'int', + '*speed': 'int', '*bitmap': 'str', '*on-source-error': 'BlockdevOnError', '*on-target-error': 'BlockdevOnError' } } @@ -745,7 +756,7 @@ # # Since: 2.3 ## -{ 'type': 'BlockdevBackup', +{ 'struct': 'BlockdevBackup', 'data': { 'device': 'str', 'target': 'str', 'sync': 'MirrorSyncMode', '*speed': 'int', @@ -958,6 +969,76 @@ '*on-target-error': 'BlockdevOnError' } } ## +# @BlockDirtyBitmap +# +# @node: name of device/node which the bitmap is tracking +# +# @name: name of the dirty bitmap +# +# Since 2.4 +## +{ 'struct': 'BlockDirtyBitmap', + 'data': { 'node': 'str', 'name': 'str' } } + +## +# @BlockDirtyBitmapAdd +# +# @node: name of device/node which the bitmap is tracking +# +# @name: name of the dirty bitmap +# +# @granularity: #optional the bitmap granularity, default is 64k for +# block-dirty-bitmap-add +# +# Since 2.4 +## +{ 'struct': 'BlockDirtyBitmapAdd', + 'data': { 'node': 'str', 'name': 'str', '*granularity': 'uint32' } } + +## +# @block-dirty-bitmap-add +# +# Create a dirty bitmap with a name on the node +# +# Returns: nothing on success +# If @node is not a valid block device or node, DeviceNotFound +# If @name is already taken, GenericError with an explanation +# +# Since 2.4 +## +{ 'command': 'block-dirty-bitmap-add', + 'data': 'BlockDirtyBitmapAdd' } + +## +# @block-dirty-bitmap-remove +# +# Remove a dirty bitmap on the node +# +# Returns: nothing on success +# If @node is not a valid block device or node, DeviceNotFound +# If @name is not found, GenericError with an explanation +# if @name is frozen by an operation, GenericError +# +# Since 2.4 +## +{ 'command': 'block-dirty-bitmap-remove', + 'data': 'BlockDirtyBitmap' } + +## +# @block-dirty-bitmap-clear +# +# Clear (reset) a dirty bitmap on the device +# +# Returns: nothing on success +# If @node is not a valid block device, DeviceNotFound +# If @name is not found, GenericError with an explanation +# +# Since 2.4 +## +{ 'command': 'block-dirty-bitmap-clear', + 'data': 'BlockDirtyBitmap' } + +## # @block_set_io_throttle: # # Change I/O throttle limits for a block drive. @@ -1232,7 +1313,7 @@ # # Since: 1.7 ## -{ 'type': 'BlockdevCacheOptions', +{ 'struct': 'BlockdevCacheOptions', 'data': { '*writeback': 'bool', '*direct': 'bool', '*no-flush': 'bool' } } @@ -1279,7 +1360,7 @@ # # Since: 1.7 ## -{ 'type': 'BlockdevOptionsBase', +{ 'struct': 'BlockdevOptionsBase', 'data': { 'driver': 'BlockdevDriver', '*id': 'str', '*node-name': 'str', @@ -1301,7 +1382,7 @@ # # Since: 1.7 ## -{ 'type': 'BlockdevOptionsFile', +{ 'struct': 'BlockdevOptionsFile', 'data': { 'filename': 'str' } } ## @@ -1310,11 +1391,14 @@ # Driver specific block device options for the null backend. # # @size: #optional size of the device in bytes. +# @latency-ns: #optional emulated latency (in nanoseconds) in processing +# requests. Default to zero which completes requests immediately. +# (Since 2.4) # # Since: 2.2 ## -{ 'type': 'BlockdevOptionsNull', - 'data': { '*size': 'int' } } +{ 'struct': 'BlockdevOptionsNull', + 'data': { '*size': 'int', '*latency-ns': 'uint64' } } ## # @BlockdevOptionsVVFAT @@ -1329,7 +1413,7 @@ # # Since: 1.7 ## -{ 'type': 'BlockdevOptionsVVFAT', +{ 'struct': 'BlockdevOptionsVVFAT', 'data': { 'dir': 'str', '*fat-type': 'int', '*floppy': 'bool', '*rw': 'bool' } } @@ -1343,7 +1427,7 @@ # # Since: 1.7 ## -{ 'type': 'BlockdevOptionsGenericFormat', +{ 'struct': 'BlockdevOptionsGenericFormat', 'data': { 'file': 'BlockdevRef' } } ## @@ -1359,7 +1443,7 @@ # # Since: 1.7 ## -{ 'type': 'BlockdevOptionsGenericCOWFormat', +{ 'struct': 'BlockdevOptionsGenericCOWFormat', 'base': 'BlockdevOptionsGenericFormat', 'data': { '*backing': 'BlockdevRef' } } @@ -1395,7 +1479,7 @@ # # Since: 2.2 ## -{ 'type': 'Qcow2OverlapCheckFlags', +{ 'struct': 'Qcow2OverlapCheckFlags', 'data': { '*template': 'Qcow2OverlapCheckMode', '*main-header': 'bool', '*active-l1': 'bool', @@ -1419,8 +1503,7 @@ # # Since: 2.2 ## -{ 'union': 'Qcow2OverlapChecks', - 'discriminator': {}, +{ 'alternate': 'Qcow2OverlapChecks', 'data': { 'flags': 'Qcow2OverlapCheckFlags', 'mode': 'Qcow2OverlapCheckMode' } } @@ -1457,7 +1540,7 @@ # # Since: 1.7 ## -{ 'type': 'BlockdevOptionsQcow2', +{ 'struct': 'BlockdevOptionsQcow2', 'base': 'BlockdevOptionsGenericCOWFormat', 'data': { '*lazy-refcounts': 'bool', '*pass-discard-request': 'bool', @@ -1492,7 +1575,7 @@ # use the default value, 'archipelago'. # Since: 2.2 ## -{ 'type': 'BlockdevOptionsArchipelago', +{ 'struct': 'BlockdevOptionsArchipelago', 'data': { 'volume': 'str', '*mport': 'int', '*vport': 'int', @@ -1544,7 +1627,7 @@ # # Since: 2.0 ## -{ 'type': 'BlkdebugInjectErrorOptions', +{ 'struct': 'BlkdebugInjectErrorOptions', 'data': { 'event': 'BlkdebugEvent', '*state': 'int', '*errno': 'int', @@ -1567,7 +1650,7 @@ # # Since: 2.0 ## -{ 'type': 'BlkdebugSetStateOptions', +{ 'struct': 'BlkdebugSetStateOptions', 'data': { 'event': 'BlkdebugEvent', '*state': 'int', 'new_state': 'int' } } @@ -1589,7 +1672,7 @@ # # Since: 2.0 ## -{ 'type': 'BlockdevOptionsBlkdebug', +{ 'struct': 'BlockdevOptionsBlkdebug', 'data': { 'image': 'BlockdevRef', '*config': 'str', '*align': 'int', @@ -1607,7 +1690,7 @@ # # Since: 2.0 ## -{ 'type': 'BlockdevOptionsBlkverify', +{ 'struct': 'BlockdevOptionsBlkverify', 'data': { 'test': 'BlockdevRef', 'raw': 'BlockdevRef' } } @@ -1644,7 +1727,7 @@ # # Since: 2.0 ## -{ 'type': 'BlockdevOptionsQuorum', +{ 'struct': 'BlockdevOptionsQuorum', 'data': { '*blkverify': 'bool', 'children': [ 'BlockdevRef' ], 'vote-threshold': 'int', @@ -1711,8 +1794,7 @@ # # Since: 1.7 ## -{ 'union': 'BlockdevRef', - 'discriminator': {}, +{ 'alternate': 'BlockdevRef', 'data': { 'definition': 'BlockdevOptions', 'reference': 'str' } } @@ -1754,14 +1836,18 @@ # # Emitted when a corruption has been detected in a disk image # -# @device: device name +# @device: device name. This is always present for compatibility +# reasons, but it can be empty ("") if the image does not +# have a device name associated. +# +# @node-name: #optional node name (Since: 2.4) # # @msg: informative message for human consumption, such as the kind of # corruption being detected. It should not be parsed by machine as it is # not guaranteed to be stable # # @offset: #optional, if the corruption resulted from an image access, this is -# the access offset into the image +# the host's access offset into the image # # @size: #optional, if the corruption resulted from an image access, this is # the access size @@ -1773,11 +1859,12 @@ # Since: 1.7 ## { 'event': 'BLOCK_IMAGE_CORRUPTED', - 'data': { 'device' : 'str', - 'msg' : 'str', - '*offset': 'int', - '*size' : 'int', - 'fatal' : 'bool' } } + 'data': { 'device' : 'str', + '*node-name' : 'str', + 'msg' : 'str', + '*offset' : 'int', + '*size' : 'int', + 'fatal' : 'bool' } } ## # @BLOCK_IO_ERROR diff --git a/qapi/block.json b/qapi/block.json index e3134657b6..aad645c4a6 100644 --- a/qapi/block.json +++ b/qapi/block.json @@ -52,7 +52,7 @@ # # Since: 1.7 ## -{ 'type': 'BlockdevSnapshotInternal', +{ 'struct': 'BlockdevSnapshotInternal', 'data': { 'device': 'str', 'name': 'str' } } ## diff --git a/qapi/common.json b/qapi/common.json index 63ef3b4724..bad56bf688 100644 --- a/qapi/common.json +++ b/qapi/common.json @@ -29,15 +29,28 @@ 'DeviceNotActive', 'DeviceNotFound', 'KVMMissingCap' ] } ## -# @VersionInfo: +# @VersionTriple # -# A description of QEMU's version. +# A three-part version number. +# +# @qemu.major: The major version number. # -# @qemu.major: The major version of QEMU +# @qemu.minor: The minor version number. # -# @qemu.minor: The minor version of QEMU +# @qemu.micro: The micro version number. +# +# Since: 2.4 +## +{ 'struct': 'VersionTriple', + 'data': {'major': 'int', 'minor': 'int', 'micro': 'int'} } + + +## +# @VersionInfo: +# +# A description of QEMU's version. # -# @qemu.micro: The micro version of QEMU. By current convention, a micro +# @qemu: The version of QEMU. By current convention, a micro # version of 50 signifies a development branch. A micro version # greater than or equal to 90 signifies a release candidate for # the next minor version. A micro version of less than 50 @@ -50,9 +63,8 @@ # # Since: 0.14.0 ## -{ 'type': 'VersionInfo', - 'data': {'qemu': {'major': 'int', 'minor': 'int', 'micro': 'int'}, - 'package': 'str'} } +{ 'struct': 'VersionInfo', + 'data': {'qemu': 'VersionTriple', 'package': 'str'} } ## # @query-version: @@ -74,7 +86,7 @@ # # Since: 0.14.0 ## -{ 'type': 'CommandInfo', 'data': {'name': 'str'} } +{ 'struct': 'CommandInfo', 'data': {'name': 'str'} } ## # @query-commands: diff --git a/qapi/trace.json b/qapi/trace.json index 06c613c213..01b0a52a7e 100644 --- a/qapi/trace.json +++ b/qapi/trace.json @@ -32,7 +32,7 @@ # # Since 2.2 ## -{ 'type': 'TraceEventInfo', +{ 'struct': 'TraceEventInfo', 'data': {'name': 'str', 'state': 'TraceEventState'} } ## diff --git a/qemu-char.c b/qemu-char.c index a405d76c31..d0c1564306 100644 --- a/qemu-char.c +++ b/qemu-char.c @@ -973,7 +973,6 @@ typedef struct FDCharDriver { CharDriverState *chr; GIOChannel *fd_in, *fd_out; int max_size; - QTAILQ_ENTRY(FDCharDriver) node; } FDCharDriver; /* Called with chr_write_lock held. */ diff --git a/qemu-coroutine-lock.c b/qemu-coroutine-lock.c index e4860ae42f..6b4903334b 100644 --- a/qemu-coroutine-lock.c +++ b/qemu-coroutine-lock.c @@ -108,7 +108,7 @@ bool qemu_co_enter_next(CoQueue *queue) bool qemu_co_queue_empty(CoQueue *queue) { - return (QTAILQ_FIRST(&queue->entries) == NULL); + return QTAILQ_FIRST(&queue->entries) == NULL; } void qemu_co_mutex_init(CoMutex *mutex) diff --git a/qemu-img.c b/qemu-img.c index 9dddfbefce..8d30e43b53 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -1305,20 +1305,312 @@ out3: return ret; } +enum ImgConvertBlockStatus { + BLK_DATA, + BLK_ZERO, + BLK_BACKING_FILE, +}; + +typedef struct ImgConvertState { + BlockBackend **src; + int64_t *src_sectors; + int src_cur, src_num; + int64_t src_cur_offset; + int64_t total_sectors; + int64_t allocated_sectors; + enum ImgConvertBlockStatus status; + int64_t sector_next_status; + BlockBackend *target; + bool has_zero_init; + bool compressed; + bool target_has_backing; + int min_sparse; + size_t cluster_sectors; + size_t buf_sectors; +} ImgConvertState; + +static void convert_select_part(ImgConvertState *s, int64_t sector_num) +{ + assert(sector_num >= s->src_cur_offset); + while (sector_num - s->src_cur_offset >= s->src_sectors[s->src_cur]) { + s->src_cur_offset += s->src_sectors[s->src_cur]; + s->src_cur++; + assert(s->src_cur < s->src_num); + } +} + +static int convert_iteration_sectors(ImgConvertState *s, int64_t sector_num) +{ + int64_t ret; + int n; + + convert_select_part(s, sector_num); + + assert(s->total_sectors > sector_num); + n = MIN(s->total_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS); + + if (s->sector_next_status <= sector_num) { + ret = bdrv_get_block_status(blk_bs(s->src[s->src_cur]), + sector_num - s->src_cur_offset, + n, &n); + if (ret < 0) { + return ret; + } + + if (ret & BDRV_BLOCK_ZERO) { + s->status = BLK_ZERO; + } else if (ret & BDRV_BLOCK_DATA) { + s->status = BLK_DATA; + } else if (!s->target_has_backing) { + /* Without a target backing file we must copy over the contents of + * the backing file as well. */ + /* TODO Check block status of the backing file chain to avoid + * needlessly reading zeroes and limiting the iteration to the + * buffer size */ + s->status = BLK_DATA; + } else { + s->status = BLK_BACKING_FILE; + } + + s->sector_next_status = sector_num + n; + } + + n = MIN(n, s->sector_next_status - sector_num); + if (s->status == BLK_DATA) { + n = MIN(n, s->buf_sectors); + } + + /* We need to write complete clusters for compressed images, so if an + * unallocated area is shorter than that, we must consider the whole + * cluster allocated. */ + if (s->compressed) { + if (n < s->cluster_sectors) { + n = MIN(s->cluster_sectors, s->total_sectors - sector_num); + s->status = BLK_DATA; + } else { + n = QEMU_ALIGN_DOWN(n, s->cluster_sectors); + } + } + + return n; +} + +static int convert_read(ImgConvertState *s, int64_t sector_num, int nb_sectors, + uint8_t *buf) +{ + int n; + int ret; + + if (s->status == BLK_ZERO || s->status == BLK_BACKING_FILE) { + return 0; + } + + assert(nb_sectors <= s->buf_sectors); + while (nb_sectors > 0) { + BlockBackend *blk; + int64_t bs_sectors; + + /* In the case of compression with multiple source files, we can get a + * nb_sectors that spreads into the next part. So we must be able to + * read across multiple BDSes for one convert_read() call. */ + convert_select_part(s, sector_num); + blk = s->src[s->src_cur]; + bs_sectors = s->src_sectors[s->src_cur]; + + n = MIN(nb_sectors, bs_sectors - (sector_num - s->src_cur_offset)); + ret = blk_read(blk, sector_num - s->src_cur_offset, buf, n); + if (ret < 0) { + return ret; + } + + sector_num += n; + nb_sectors -= n; + buf += n * BDRV_SECTOR_SIZE; + } + + return 0; +} + +static int convert_write(ImgConvertState *s, int64_t sector_num, int nb_sectors, + const uint8_t *buf) +{ + int ret; + + while (nb_sectors > 0) { + int n = nb_sectors; + + switch (s->status) { + case BLK_BACKING_FILE: + /* If we have a backing file, leave clusters unallocated that are + * unallocated in the source image, so that the backing file is + * visible at the respective offset. */ + assert(s->target_has_backing); + break; + + case BLK_DATA: + /* We must always write compressed clusters as a whole, so don't + * try to find zeroed parts in the buffer. We can only save the + * write if the buffer is completely zeroed and we're allowed to + * keep the target sparse. */ + if (s->compressed) { + if (s->has_zero_init && s->min_sparse && + buffer_is_zero(buf, n * BDRV_SECTOR_SIZE)) + { + assert(!s->target_has_backing); + break; + } + + ret = blk_write_compressed(s->target, sector_num, buf, n); + if (ret < 0) { + return ret; + } + break; + } + + /* If there is real non-zero data or we're told to keep the target + * fully allocated (-S 0), we must write it. Otherwise we can treat + * it as zero sectors. */ + if (!s->min_sparse || + is_allocated_sectors_min(buf, n, &n, s->min_sparse)) + { + ret = blk_write(s->target, sector_num, buf, n); + if (ret < 0) { + return ret; + } + break; + } + /* fall-through */ + + case BLK_ZERO: + if (s->has_zero_init) { + break; + } + ret = blk_write_zeroes(s->target, sector_num, n, 0); + if (ret < 0) { + return ret; + } + break; + } + + sector_num += n; + nb_sectors -= n; + buf += n * BDRV_SECTOR_SIZE; + } + + return 0; +} + +static int convert_do_copy(ImgConvertState *s) +{ + uint8_t *buf = NULL; + int64_t sector_num, allocated_done; + int ret; + int n; + + /* Check whether we have zero initialisation or can get it efficiently */ + s->has_zero_init = s->min_sparse && !s->target_has_backing + ? bdrv_has_zero_init(blk_bs(s->target)) + : false; + + if (!s->has_zero_init && !s->target_has_backing && + bdrv_can_write_zeroes_with_unmap(blk_bs(s->target))) + { + ret = bdrv_make_zero(blk_bs(s->target), BDRV_REQ_MAY_UNMAP); + if (ret == 0) { + s->has_zero_init = true; + } + } + + /* Allocate buffer for copied data. For compressed images, only one cluster + * can be copied at a time. */ + if (s->compressed) { + if (s->cluster_sectors <= 0 || s->cluster_sectors > s->buf_sectors) { + error_report("invalid cluster size"); + ret = -EINVAL; + goto fail; + } + s->buf_sectors = s->cluster_sectors; + } + buf = blk_blockalign(s->target, s->buf_sectors * BDRV_SECTOR_SIZE); + + /* Calculate allocated sectors for progress */ + s->allocated_sectors = 0; + sector_num = 0; + while (sector_num < s->total_sectors) { + n = convert_iteration_sectors(s, sector_num); + if (n < 0) { + ret = n; + goto fail; + } + if (s->status == BLK_DATA) { + s->allocated_sectors += n; + } + sector_num += n; + } + + /* Do the copy */ + s->src_cur = 0; + s->src_cur_offset = 0; + s->sector_next_status = 0; + + sector_num = 0; + allocated_done = 0; + + while (sector_num < s->total_sectors) { + n = convert_iteration_sectors(s, sector_num); + if (n < 0) { + ret = n; + goto fail; + } + if (s->status == BLK_DATA) { + allocated_done += n; + qemu_progress_print(100.0 * allocated_done / s->allocated_sectors, + 0); + } + + ret = convert_read(s, sector_num, n, buf); + if (ret < 0) { + error_report("error while reading sector %" PRId64 + ": %s", sector_num, strerror(-ret)); + goto fail; + } + + ret = convert_write(s, sector_num, n, buf); + if (ret < 0) { + error_report("error while writing sector %" PRId64 + ": %s", sector_num, strerror(-ret)); + goto fail; + } + + sector_num += n; + } + + if (s->compressed) { + /* signal EOF to align */ + ret = blk_write_compressed(s->target, 0, NULL, 0); + if (ret < 0) { + goto fail; + } + } + + ret = 0; +fail: + qemu_vfree(buf); + return ret; +} + static int img_convert(int argc, char **argv) { - int c, n, n1, bs_n, bs_i, compress, cluster_sectors, skip_create; + int c, bs_n, bs_i, compress, cluster_sectors, skip_create; int64_t ret = 0; int progress = 0, flags, src_flags; const char *fmt, *out_fmt, *cache, *src_cache, *out_baseimg, *out_filename; BlockDriver *drv, *proto_drv; BlockBackend **blk = NULL, *out_blk = NULL; BlockDriverState **bs = NULL, *out_bs = NULL; - int64_t total_sectors, nb_sectors, sector_num, bs_offset; + int64_t total_sectors; int64_t *bs_sectors = NULL; - uint8_t * buf = NULL; size_t bufsectors = IO_BUF_SIZE / BDRV_SECTOR_SIZE; - const uint8_t *buf1; BlockDriverInfo bdi; QemuOpts *opts = NULL; QemuOptsList *create_opts = NULL; @@ -1329,6 +1621,7 @@ static int img_convert(int argc, char **argv) bool quiet = false; Error *local_err = NULL; QemuOpts *sn_opts = NULL; + ImgConvertState state; fmt = NULL; out_fmt = "raw"; @@ -1627,9 +1920,6 @@ static int img_convert(int argc, char **argv) } out_bs = blk_bs(out_blk); - bs_i = 0; - bs_offset = 0; - /* increase bufsectors from the default 4096 (2M) if opt_transfer_length * or discard_alignment of the out_bs is greater. Limit to 32768 (16MB) * as maximum. */ @@ -1638,8 +1928,6 @@ static int img_convert(int argc, char **argv) out_bs->bl.discard_alignment)) ); - buf = blk_blockalign(out_blk, bufsectors * BDRV_SECTOR_SIZE); - if (skip_create) { int64_t output_sectors = blk_nb_sectors(out_blk); if (output_sectors < 0) { @@ -1666,203 +1954,20 @@ static int img_convert(int argc, char **argv) cluster_sectors = bdi.cluster_size / BDRV_SECTOR_SIZE; } - if (compress) { - if (cluster_sectors <= 0 || cluster_sectors > bufsectors) { - error_report("invalid cluster size"); - ret = -1; - goto out; - } - sector_num = 0; - - nb_sectors = total_sectors; - - for(;;) { - int64_t bs_num; - int remainder; - uint8_t *buf2; - - nb_sectors = total_sectors - sector_num; - if (nb_sectors <= 0) - break; - if (nb_sectors >= cluster_sectors) - n = cluster_sectors; - else - n = nb_sectors; - - bs_num = sector_num - bs_offset; - assert (bs_num >= 0); - remainder = n; - buf2 = buf; - while (remainder > 0) { - int nlow; - while (bs_num == bs_sectors[bs_i]) { - bs_offset += bs_sectors[bs_i]; - bs_i++; - assert (bs_i < bs_n); - bs_num = 0; - /* printf("changing part: sector_num=%" PRId64 ", " - "bs_i=%d, bs_offset=%" PRId64 ", bs_sectors=%" PRId64 - "\n", sector_num, bs_i, bs_offset, bs_sectors[bs_i]); */ - } - assert (bs_num < bs_sectors[bs_i]); - - nlow = remainder > bs_sectors[bs_i] - bs_num - ? bs_sectors[bs_i] - bs_num : remainder; - - ret = blk_read(blk[bs_i], bs_num, buf2, nlow); - if (ret < 0) { - error_report("error while reading sector %" PRId64 ": %s", - bs_num, strerror(-ret)); - goto out; - } - - buf2 += nlow * 512; - bs_num += nlow; - - remainder -= nlow; - } - assert (remainder == 0); - - if (!buffer_is_zero(buf, n * BDRV_SECTOR_SIZE)) { - ret = blk_write_compressed(out_blk, sector_num, buf, n); - if (ret != 0) { - error_report("error while compressing sector %" PRId64 - ": %s", sector_num, strerror(-ret)); - goto out; - } - } - sector_num += n; - qemu_progress_print(100.0 * sector_num / total_sectors, 0); - } - /* signal EOF to align */ - blk_write_compressed(out_blk, 0, NULL, 0); - } else { - int64_t sectors_to_read, sectors_read, sector_num_next_status; - bool count_allocated_sectors; - int has_zero_init = min_sparse ? bdrv_has_zero_init(out_bs) : 0; - - if (!has_zero_init && bdrv_can_write_zeroes_with_unmap(out_bs)) { - ret = bdrv_make_zero(out_bs, BDRV_REQ_MAY_UNMAP); - if (ret < 0) { - goto out; - } - has_zero_init = 1; - } - - sectors_to_read = total_sectors; - count_allocated_sectors = progress && (out_baseimg || has_zero_init); -restart: - sector_num = 0; // total number of sectors converted so far - sectors_read = 0; - sector_num_next_status = 0; - - for(;;) { - nb_sectors = total_sectors - sector_num; - if (nb_sectors <= 0) { - if (count_allocated_sectors) { - sectors_to_read = sectors_read; - count_allocated_sectors = false; - goto restart; - } - ret = 0; - break; - } - - while (sector_num - bs_offset >= bs_sectors[bs_i]) { - bs_offset += bs_sectors[bs_i]; - bs_i ++; - assert (bs_i < bs_n); - /* printf("changing part: sector_num=%" PRId64 ", bs_i=%d, " - "bs_offset=%" PRId64 ", bs_sectors=%" PRId64 "\n", - sector_num, bs_i, bs_offset, bs_sectors[bs_i]); */ - } - - if ((out_baseimg || has_zero_init) && - sector_num >= sector_num_next_status) { - n = nb_sectors > INT_MAX ? INT_MAX : nb_sectors; - ret = bdrv_get_block_status(bs[bs_i], sector_num - bs_offset, - n, &n1); - if (ret < 0) { - error_report("error while reading block status of sector %" - PRId64 ": %s", sector_num - bs_offset, - strerror(-ret)); - goto out; - } - /* If the output image is zero initialized, we are not working - * on a shared base and the input is zero we can skip the next - * n1 sectors */ - if (has_zero_init && !out_baseimg && (ret & BDRV_BLOCK_ZERO)) { - sector_num += n1; - continue; - } - /* If the output image is being created as a copy on write - * image, assume that sectors which are unallocated in the - * input image are present in both the output's and input's - * base images (no need to copy them). */ - if (out_baseimg) { - if (!(ret & BDRV_BLOCK_DATA)) { - sector_num += n1; - continue; - } - /* The next 'n1' sectors are allocated in the input image. - * Copy only those as they may be followed by unallocated - * sectors. */ - nb_sectors = n1; - } - /* avoid redundant callouts to get_block_status */ - sector_num_next_status = sector_num + n1; - } - - n = MIN(nb_sectors, bufsectors); - - /* round down request length to an aligned sector, but - * do not bother doing this on short requests. They happen - * when we found an all-zero area, and the next sector to - * write will not be sector_num + n. */ - if (cluster_sectors > 0 && n >= cluster_sectors) { - int64_t next_aligned_sector = (sector_num + n); - next_aligned_sector -= next_aligned_sector % cluster_sectors; - if (sector_num + n > next_aligned_sector) { - n = next_aligned_sector - sector_num; - } - } - - n = MIN(n, bs_sectors[bs_i] - (sector_num - bs_offset)); - - sectors_read += n; - if (count_allocated_sectors) { - sector_num += n; - continue; - } + state = (ImgConvertState) { + .src = blk, + .src_sectors = bs_sectors, + .src_num = bs_n, + .total_sectors = total_sectors, + .target = out_blk, + .compressed = compress, + .target_has_backing = (bool) out_baseimg, + .min_sparse = min_sparse, + .cluster_sectors = cluster_sectors, + .buf_sectors = bufsectors, + }; + ret = convert_do_copy(&state); - n1 = n; - ret = blk_read(blk[bs_i], sector_num - bs_offset, buf, n); - if (ret < 0) { - error_report("error while reading sector %" PRId64 ": %s", - sector_num - bs_offset, strerror(-ret)); - goto out; - } - /* NOTE: at the same time we convert, we do not write zero - sectors to have a chance to compress the image. Ideally, we - should add a specific call to have the info to go faster */ - buf1 = buf; - while (n > 0) { - if (!has_zero_init || - is_allocated_sectors_min(buf1, n, &n1, min_sparse)) { - ret = blk_write(out_blk, sector_num, buf1, n1); - if (ret < 0) { - error_report("error while writing sector %" PRId64 - ": %s", sector_num, strerror(-ret)); - goto out; - } - } - sector_num += n1; - n -= n1; - buf1 += n1 * 512; - } - qemu_progress_print(100.0 * sectors_read / sectors_to_read, 0); - } - } out: if (!ret) { qemu_progress_print(100, 0); @@ -1870,7 +1975,6 @@ out: qemu_progress_end(); qemu_opts_del(opts); qemu_opts_free(create_opts); - qemu_vfree(buf); qemu_opts_del(sn_opts); blk_unref(out_blk); g_free(bs); diff --git a/qemu-options.hx b/qemu-options.hx index 319d971260..ec356f65c1 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -3106,7 +3106,7 @@ executed often has little or no correlation with actual performance. to synchronise the host clock and the virtual clock. The goal is to have a guest running at the real frequency imposed by the shift option. Whenever the guest clock is behind the host clock and if -@option{align=on} is specified then we print a messsage to the user +@option{align=on} is specified then we print a message to the user to inform about the delay. Currently this option does not work when @option{shift} is @code{auto}. Note: The sync algorithm will work for those shift values for which diff --git a/qga/qapi-schema.json b/qga/qapi-schema.json index 95f49e369c..b446dc729d 100644 --- a/qga/qapi-schema.json +++ b/qga/qapi-schema.json @@ -150,7 +150,7 @@ # # Since 1.1.0 ## -{ 'type': 'GuestAgentCommandInfo', +{ 'struct': 'GuestAgentCommandInfo', 'data': { 'name': 'str', 'enabled': 'bool', 'success-response': 'bool' } } ## @@ -164,7 +164,7 @@ # # Since 0.15.0 ## -{ 'type': 'GuestAgentInfo', +{ 'struct': 'GuestAgentInfo', 'data': { 'version': 'str', 'supported_commands': ['GuestAgentCommandInfo'] } } ## @@ -195,7 +195,7 @@ # Since: 0.15.0 ## { 'command': 'guest-shutdown', 'data': { '*mode': 'str' }, - 'success-response': 'no' } + 'success-response': false } ## # @guest-file-open: @@ -242,7 +242,7 @@ # # Since: 0.15.0 ## -{ 'type': 'GuestFileRead', +{ 'struct': 'GuestFileRead', 'data': { 'count': 'int', 'buf-b64': 'str', 'eof': 'bool' } } ## @@ -274,7 +274,7 @@ # # Since: 0.15.0 ## -{ 'type': 'GuestFileWrite', +{ 'struct': 'GuestFileWrite', 'data': { 'count': 'int', 'eof': 'bool' } } ## @@ -309,7 +309,7 @@ # # Since: 0.15.0 ## -{ 'type': 'GuestFileSeek', +{ 'struct': 'GuestFileSeek', 'data': { 'position': 'int', 'eof': 'bool' } } ## @@ -470,7 +470,7 @@ # # Since: 1.1 ## -{ 'command': 'guest-suspend-disk', 'success-response': 'no' } +{ 'command': 'guest-suspend-disk', 'success-response': false } ## # @guest-suspend-ram @@ -502,7 +502,7 @@ # # Since: 1.1 ## -{ 'command': 'guest-suspend-ram', 'success-response': 'no' } +{ 'command': 'guest-suspend-ram', 'success-response': false } ## # @guest-suspend-hybrid @@ -529,7 +529,7 @@ # # Since: 1.1 ## -{ 'command': 'guest-suspend-hybrid', 'success-response': 'no' } +{ 'command': 'guest-suspend-hybrid', 'success-response': false } ## # @GuestIpAddressType: @@ -556,7 +556,7 @@ # # Since: 1.1 ## -{ 'type': 'GuestIpAddress', +{ 'struct': 'GuestIpAddress', 'data': {'ip-address': 'str', 'ip-address-type': 'GuestIpAddressType', 'prefix': 'int'} } @@ -572,7 +572,7 @@ # # Since: 1.1 ## -{ 'type': 'GuestNetworkInterface', +{ 'struct': 'GuestNetworkInterface', 'data': {'name': 'str', '*hardware-address': 'str', '*ip-addresses': ['GuestIpAddress'] } } @@ -604,7 +604,7 @@ # # Since: 1.5 ## -{ 'type': 'GuestLogicalProcessor', +{ 'struct': 'GuestLogicalProcessor', 'data': {'logical-id': 'int', 'online': 'bool', '*can-offline': 'bool'} } @@ -694,7 +694,7 @@ # # Since: 2.2 ## -{ 'type': 'GuestPCIAddress', +{ 'struct': 'GuestPCIAddress', 'data': {'domain': 'int', 'bus': 'int', 'slot': 'int', 'function': 'int'} } @@ -709,7 +709,7 @@ # # Since: 2.2 ## -{ 'type': 'GuestDiskAddress', +{ 'struct': 'GuestDiskAddress', 'data': {'pci-controller': 'GuestPCIAddress', 'bus-type': 'GuestDiskBusType', 'bus': 'int', 'target': 'int', 'unit': 'int'} } @@ -725,7 +725,7 @@ # # Since: 2.2 ## -{ 'type': 'GuestFilesystemInfo', +{ 'struct': 'GuestFilesystemInfo', 'data': {'name': 'str', 'mountpoint': 'str', 'type': 'str', 'disk': ['GuestDiskAddress']} } @@ -782,7 +782,7 @@ # # Since: 2.3 ## -{ 'type': 'GuestMemoryBlock', +{ 'struct': 'GuestMemoryBlock', 'data': {'phys-index': 'uint64', 'online': 'bool', '*can-offline': 'bool'} } @@ -808,7 +808,7 @@ # # An enumeration of memory block operation result. # -# @sucess: the operation of online/offline memory block is successful. +# @success: the operation of online/offline memory block is successful. # @not-found: can't find the corresponding memoryXXX directory in sysfs. # @operation-not-supported: for some old kernels, it does not support # online or offline memory block. @@ -835,7 +835,7 @@ # # Since: 2.3 ## -{ 'type': 'GuestMemoryBlockResponse', +{ 'struct': 'GuestMemoryBlockResponse', 'data': { 'phys-index': 'uint64', 'response': 'GuestMemoryBlockResponseType', '*error-code': 'int' }} @@ -876,7 +876,7 @@ # # Since: 2.3 ## -{ 'type': 'GuestMemoryBlockInfo', +{ 'struct': 'GuestMemoryBlockInfo', 'data': {'size': 'uint64'} } ## diff --git a/qmp-commands.hx b/qmp-commands.hx index 09f48bada5..7506774afb 100644 --- a/qmp-commands.hx +++ b/qmp-commands.hx @@ -785,8 +785,7 @@ EQMP .args_type = "protocol:s,hostname:s,port:i?,tls-port:i?,cert-subject:s?", .params = "protocol hostname port tls-port cert-subject", .help = "send migration info to spice/vnc client", - .mhandler.cmd_async = client_migrate_info, - .flags = MONITOR_CMD_ASYNC, + .mhandler.cmd_new = client_migrate_info, }, SQMP @@ -1008,6 +1007,43 @@ EQMP .mhandler.cmd_new = qmp_marshal_input_block_stream, }, +SQMP +block-stream +------------ + +Copy data from a backing file into a block device. + +Arguments: + +- "device": The device's ID, must be unique (json-string) +- "base": The file name of the backing image above which copying starts + (json-string, optional) +- "backing-file": The backing file string to write into the active layer. This + filename is not validated. + + If a pathname string is such that it cannot be resolved by + QEMU, that means that subsequent QMP or HMP commands must use + node-names for the image in question, as filename lookup + methods will fail. + + If not specified, QEMU will automatically determine the + backing file string to use, or error out if there is no + obvious choice. Care should be taken when specifying the + string, to specify a valid filename or protocol. + (json-string, optional) (Since 2.1) +- "speed": the maximum speed, in bytes per second (json-int, optional) +- "on-error": the action to take on an error (default 'report'). 'stop' and + 'enospc' can only be used if the block device supports io-status. + (json-string, optional) (Since 2.1) + +Example: + +-> { "execute": "block-stream", "arguments": { "device": "virtio0", + "base": "/tmp/master.qcow2" } } +<- { "return": {} } + +EQMP + { .name = "block-commit", .args_type = "device:B,base:s?,top:s?,backing-file:s?,speed:o?", @@ -1074,7 +1110,7 @@ EQMP { .name = "drive-backup", .args_type = "sync:s,device:B,target:s,speed:i?,mode:s?,format:s?," - "on-source-error:s?,on-target-error:s?", + "bitmap:s?,on-source-error:s?,on-target-error:s?", .mhandler.cmd_new = qmp_marshal_input_drive_backup, }, @@ -1101,8 +1137,10 @@ Arguments: (json-string, optional) - "sync": what parts of the disk image should be copied to the destination; possibilities include "full" for all the disk, "top" for only the sectors - allocated in the topmost image, or "none" to only replicate new I/O - (MirrorSyncMode). + allocated in the topmost image, "dirty-bitmap" for only the dirty sectors in + the bitmap, or "none" to only replicate new I/O (MirrorSyncMode). +- "bitmap": dirty bitmap name for sync==dirty-bitmap. Must be present if sync + is "dirty-bitmap", must NOT be present otherwise. - "mode": whether and how QEMU should create a new image (NewImageMode, optional, default 'absolute-paths') - "speed": the maximum speed, in bytes per second (json-int, optional) @@ -1270,6 +1308,91 @@ Example: EQMP { + .name = "block-dirty-bitmap-add", + .args_type = "node:B,name:s,granularity:i?", + .mhandler.cmd_new = qmp_marshal_input_block_dirty_bitmap_add, + }, + +SQMP + +block-dirty-bitmap-add +---------------------- +Since 2.4 + +Create a dirty bitmap with a name on the device, and start tracking the writes. + +Arguments: + +- "node": device/node on which to create dirty bitmap (json-string) +- "name": name of the new dirty bitmap (json-string) +- "granularity": granularity to track writes with (int, optional) + +Example: + +-> { "execute": "block-dirty-bitmap-add", "arguments": { "node": "drive0", + "name": "bitmap0" } } +<- { "return": {} } + +EQMP + + { + .name = "block-dirty-bitmap-remove", + .args_type = "node:B,name:s", + .mhandler.cmd_new = qmp_marshal_input_block_dirty_bitmap_remove, + }, + +SQMP + +block-dirty-bitmap-remove +------------------------- +Since 2.4 + +Stop write tracking and remove the dirty bitmap that was created with +block-dirty-bitmap-add. + +Arguments: + +- "node": device/node on which to remove dirty bitmap (json-string) +- "name": name of the dirty bitmap to remove (json-string) + +Example: + +-> { "execute": "block-dirty-bitmap-remove", "arguments": { "node": "drive0", + "name": "bitmap0" } } +<- { "return": {} } + +EQMP + + { + .name = "block-dirty-bitmap-clear", + .args_type = "node:B,name:s", + .mhandler.cmd_new = qmp_marshal_input_block_dirty_bitmap_clear, + }, + +SQMP + +block-dirty-bitmap-clear +------------------------ +Since 2.4 + +Reset the dirty bitmap associated with a node so that an incremental backup +from this point in time forward will only backup clusters modified after this +clear operation. + +Arguments: + +- "node": device/node on which to remove dirty bitmap (json-string) +- "name": name of the dirty bitmap to remove (json-string) + +Example: + +-> { "execute": "block-dirty-bitmap-clear", "arguments": { "node": "drive0", + "name": "bitmap0" } } +<- { "return": {} } + +EQMP + + { .name = "blockdev-snapshot-sync", .args_type = "device:s?,node-name:s?,snapshot-file:s,snapshot-node-name:s?,format:s?,mode:s?", .mhandler.cmd_new = qmp_marshal_input_blockdev_snapshot_sync, @@ -2257,7 +2380,7 @@ Example: "virtual-size":2048000, "backing_file":"base.qcow2", "full-backing-filename":"disks/base.qcow2", - "backing-filename-format:"qcow2", + "backing-filename-format":"qcow2", "snapshots":[ { "id": "1", @@ -3320,6 +3443,63 @@ EQMP }, SQMP +migrate-set-parameters +---------------------- + +Set migration parameters + +- "compress-level": set compression level during migration (json-int) +- "compress-threads": set compression thread count for migration (json-int) +- "decompress-threads": set decompression thread count for migration (json-int) + +Arguments: + +Example: + +-> { "execute": "migrate-set-parameters" , "arguments": + { "compress-level": 1 } } + +EQMP + + { + .name = "migrate-set-parameters", + .args_type = + "compress-level:i?,compress-threads:i?,decompress-threads:i?", + .mhandler.cmd_new = qmp_marshal_input_migrate_set_parameters, + }, +SQMP +query-migrate-parameters +------------------------ + +Query current migration parameters + +- "parameters": migration parameters value + - "compress-level" : compression level value (json-int) + - "compress-threads" : compression thread count value (json-int) + - "decompress-threads" : decompression thread count value (json-int) + +Arguments: + +Example: + +-> { "execute": "query-migrate-parameters" } +<- { + "return": { + "decompress-threads", 2, + "compress-threads", 8, + "compress-level", 1 + } + } + +EQMP + + { + .name = "query-migrate-parameters", + .args_type = "", + .mhandler.cmd_new = qmp_marshal_input_query_migrate_parameters, + }, + +SQMP query-balloon ------------- @@ -3724,7 +3904,7 @@ Example: "virtual-size":2048000, "backing_file":"base.qcow2", "full-backing-filename":"disks/base.qcow2", - "backing-filename-format:"qcow2", + "backing-filename-format":"qcow2", "snapshots":[ { "id": "1", @@ -45,15 +45,16 @@ NameInfo *qmp_query_name(Error **errp) VersionInfo *qmp_query_version(Error **errp) { - VersionInfo *info = g_malloc0(sizeof(*info)); + VersionInfo *info = g_new0(VersionInfo, 1); const char *version = QEMU_VERSION; char *tmp; - info->qemu.major = strtol(version, &tmp, 10); + info->qemu = g_new0(VersionTriple, 1); + info->qemu->major = strtol(version, &tmp, 10); tmp++; - info->qemu.minor = strtol(tmp, &tmp, 10); + info->qemu->minor = strtol(tmp, &tmp, 10); tmp++; - info->qemu.micro = strtol(tmp, &tmp, 10); + info->qemu->micro = strtol(tmp, &tmp, 10); info->package = g_strdup(QEMU_PKGVERSION); return info; @@ -102,7 +102,8 @@ endif %.o: %.dtrace $(call quiet-command,dtrace -o $@ -G -s $<, " GEN $(TARGET_DIR)$@") -%$(DSOSUF): CFLAGS += -fPIC -DBUILD_DSO +DSO_OBJ_CFLAGS := -fPIC -DBUILD_DSO +module-common.o: CFLAGS += $(DSO_OBJ_CFLAGS) %$(DSOSUF): LDFLAGS += $(LDFLAGS_SHARED) %$(DSOSUF): %.mo $(call LINK,$^) @@ -351,6 +352,7 @@ define unnest-vars # For non-module build, add -m to -y $(if $(CONFIG_MODULES), $(foreach o,$($v), + $(eval $($o-objs): CFLAGS += $(DSO_OBJ_CFLAGS)) $(eval $o: $($o-objs))) $(eval $(patsubst %-m,%-y,$v) += $($v)) $(eval modules: $($v:%.mo=%$(DSOSUF))), diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 5df61f9aa9..7f0aae977d 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2911,6 +2911,17 @@ sub process { if ($rawline =~ /\b(?:Qemu|QEmu)\b/) { WARN("use QEMU instead of Qemu or QEmu\n" . $herecurr); } + +# check for non-portable ffs() calls that have portable alternatives in QEMU + if ($line =~ /\bffs\(/) { + ERROR("use ctz32() instead of ffs()\n" . $herecurr); + } + if ($line =~ /\bffsl\(/) { + ERROR("use ctz32() or ctz64() instead of ffsl()\n" . $herecurr); + } + if ($line =~ /\bffsll\(/) { + ERROR("use ctz64() instead of ffsll()\n" . $herecurr); + } } # If we have no input at all, then there is nothing to report on diff --git a/scripts/coverity-model.c b/scripts/coverity-model.c index cdda2591d9..617f67d716 100644 --- a/scripts/coverity-model.c +++ b/scripts/coverity-model.c @@ -46,8 +46,10 @@ typedef struct va_list_str *va_list; typedef struct AddressSpace AddressSpace; typedef uint64_t hwaddr; +typedef uint32_t MemTxResult; +typedef uint64_t MemTxAttrs; -static void __write(uint8_t *buf, ssize_t len) +static void __bufwrite(uint8_t *buf, ssize_t len) { int first, last; __coverity_negative_sink__(len); @@ -57,7 +59,7 @@ static void __write(uint8_t *buf, ssize_t len) __coverity_writeall__(buf); } -static void __read(uint8_t *buf, ssize_t len) +static void __bufread(uint8_t *buf, ssize_t len) { __coverity_negative_sink__(len); if (len == 0) return; @@ -65,14 +67,14 @@ static void __read(uint8_t *buf, ssize_t len) int last = buf[len-1]; } -bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, - int len, bool is_write) +MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, + uint8_t *buf, int len, bool is_write) { - bool result; + MemTxResult result; // TODO: investigate impact of treating reads as producing // tainted data, with __coverity_tainted_data_argument__(buf). - if (is_write) __write(buf, len); else __read(buf, len); + if (is_write) __bufread(buf, len); else __bufwrite(buf, len); return result; } diff --git a/scripts/qapi-commands.py b/scripts/qapi-commands.py index 053ba85b5f..93e43f0e48 100644 --- a/scripts/qapi-commands.py +++ b/scripts/qapi-commands.py @@ -2,7 +2,7 @@ # QAPI command marshaller generator # # Copyright IBM, Corp. 2011 -# Copyright (C) 2014 Red Hat, Inc. +# Copyright (C) 2014-2015 Red Hat, Inc. # # Authors: # Anthony Liguori <aliguori@us.ibm.com> @@ -28,7 +28,7 @@ def type_visitor(name): def generate_command_decl(name, args, ret_type): arglist="" - for argname, argtype, optional, structured in parse_args(args): + for argname, argtype, optional in parse_args(args): argtype = c_type(argtype, is_param=True) if optional: arglist += "bool has_%s, " % c_var(argname) @@ -53,7 +53,7 @@ def gen_sync_call(name, args, ret_type, indent=0): retval="" if ret_type: retval = "retval = " - for argname, argtype, optional, structured in parse_args(args): + for argname, argtype, optional in parse_args(args): if optional: arglist += "has_%s, " % c_var(argname) arglist += "%s, " % (c_var(argname)) @@ -96,7 +96,7 @@ Visitor *v; def gen_visitor_input_vars_decl(args): ret = "" push_indent() - for argname, argtype, optional, structured in parse_args(args): + for argname, argtype, optional in parse_args(args): if optional: ret += mcgen(''' bool has_%(argname)s = false; @@ -139,7 +139,7 @@ v = qapi_dealloc_get_visitor(md); v = qmp_input_get_visitor(mi); ''') - for argname, argtype, optional, structured in parse_args(args): + for argname, argtype, optional in parse_args(args): if optional: ret += mcgen(''' visit_optional(v, &has_%(c_name)s, "%(name)s", %(errp)s); @@ -293,17 +293,12 @@ out: return ret -def option_value_matches(opt, val, cmd): - if opt in cmd and cmd[opt] == val: - return True - return False - def gen_registry(commands): registry="" push_indent() for cmd in commands: options = 'QCO_NO_OPTIONS' - if option_value_matches('success-response', 'no', cmd): + if not cmd.get('success-response', True): options = 'QCO_NO_SUCCESS_RESP' registry += mcgen(''' diff --git a/scripts/qapi-event.py b/scripts/qapi-event.py index 601e3076ab..47dc041805 100644 --- a/scripts/qapi-event.py +++ b/scripts/qapi-event.py @@ -21,7 +21,7 @@ def _generate_event_api_name(event_name, params): l = len(api_name) if params: - for argname, argentry, optional, structured in parse_args(params): + for argname, argentry, optional in parse_args(params): if optional: api_name += "bool has_%s,\n" % c_var(argname) api_name += "".ljust(l) @@ -93,7 +93,7 @@ def generate_event_implement(api_name, event_name, params): """, event_name = event_name) - for argname, argentry, optional, structured in parse_args(params): + for argname, argentry, optional in parse_args(params): if optional: ret += mcgen(""" if (has_%(var)s) { diff --git a/scripts/qapi-types.py b/scripts/qapi-types.py index db872180c6..2bf8145076 100644 --- a/scripts/qapi-types.py +++ b/scripts/qapi-types.py @@ -63,18 +63,13 @@ typedef struct %(name)sList def generate_struct_fields(members): ret = '' - for argname, argentry, optional, structured in parse_args(members): + for argname, argentry, optional in parse_args(members): if optional: ret += mcgen(''' bool has_%(c_name)s; ''', c_name=c_var(argname)) - if structured: - push_indent() - ret += generate_struct({ "field": argname, "data": argentry}) - pop_indent() - else: - ret += mcgen(''' + ret += mcgen(''' %(c_type)s %(c_name)s; ''', c_type=c_type(argentry), c_name=c_var(argname)) @@ -83,7 +78,7 @@ def generate_struct_fields(members): def generate_struct(expr): - structname = expr.get('type', "") + structname = expr.get('struct', "") fieldname = expr.get('field', "") members = expr['data'] base = expr.get('base') @@ -170,9 +165,9 @@ typedef enum %(name)s return lookup_decl + enum_decl -def generate_anon_union_qtypes(expr): +def generate_alternate_qtypes(expr): - name = expr['union'] + name = expr['alternate'] members = expr['data'] ret = mcgen(''' @@ -181,17 +176,8 @@ const int %(name)s_qtypes[QTYPE_MAX] = { name=name) for key in members: - qapi_type = members[key] - if builtin_type_qtypes.has_key(qapi_type): - qtype = builtin_type_qtypes[qapi_type] - elif find_struct(qapi_type): - qtype = "QTYPE_QDICT" - elif find_union(qapi_type): - qtype = "QTYPE_QDICT" - elif find_enum(qapi_type): - qtype = "QTYPE_QSTRING" - else: - assert False, "Invalid anonymous union member" + qtype = find_alternate_member_qtype(members[key]) + assert qtype, "Invalid alternate member" ret += mcgen(''' [ %(qtype)s ] = %(abbrev)s_KIND_%(enum)s, @@ -206,9 +192,9 @@ const int %(name)s_qtypes[QTYPE_MAX] = { return ret -def generate_union(expr): +def generate_union(expr, meta): - name = expr['union'] + name = expr[meta] typeinfo = expr['data'] base = expr.get('base') @@ -242,10 +228,9 @@ struct %(name)s ''') if base: - base_fields = find_struct(base)['data'] - if discriminator: - base_fields = base_fields.copy() - del base_fields[discriminator] + assert discriminator + base_fields = find_struct(base)['data'].copy() + del base_fields[discriminator] ret += generate_struct_fields(base_fields) else: assert not discriminator @@ -253,7 +238,7 @@ struct %(name)s ret += mcgen(''' }; ''') - if discriminator == {}: + if meta == 'alternate': ret += mcgen(''' extern const int %(name)s_qtypes[]; ''', @@ -398,14 +383,14 @@ exprs = parse_schema(input_file) exprs = filter(lambda expr: not expr.has_key('gen'), exprs) fdecl.write(guardstart("QAPI_TYPES_BUILTIN_STRUCT_DECL")) -for typename in builtin_types: +for typename in builtin_types.keys(): fdecl.write(generate_fwd_struct(typename, None, builtin_type=True)) fdecl.write(guardend("QAPI_TYPES_BUILTIN_STRUCT_DECL")) for expr in exprs: ret = "\n" - if expr.has_key('type'): - ret += generate_fwd_struct(expr['type'], expr['data']) + if expr.has_key('struct'): + ret += generate_fwd_struct(expr['struct'], expr['data']) elif expr.has_key('enum'): ret += generate_enum(expr['enum'], expr['data']) + "\n" ret += generate_fwd_enum_struct(expr['enum'], expr['data']) @@ -417,8 +402,12 @@ for expr in exprs: ret += generate_enum('%sKind' % expr['union'], expr['data'].keys()) fdef.write(generate_enum_lookup('%sKind' % expr['union'], expr['data'].keys())) - if expr.get('discriminator') == {}: - fdef.write(generate_anon_union_qtypes(expr)) + elif expr.has_key('alternate'): + ret += generate_fwd_struct(expr['alternate'], expr['data']) + "\n" + ret += generate_enum('%sKind' % expr['alternate'], expr['data'].keys()) + fdef.write(generate_enum_lookup('%sKind' % expr['alternate'], + expr['data'].keys())) + fdef.write(generate_alternate_qtypes(expr)) else: continue fdecl.write(ret) @@ -426,7 +415,7 @@ for expr in exprs: # to avoid header dependency hell, we always generate declarations # for built-in types in our header files and simply guard them fdecl.write(guardstart("QAPI_TYPES_BUILTIN_CLEANUP_DECL")) -for typename in builtin_types: +for typename in builtin_types.keys(): fdecl.write(generate_type_cleanup_decl(typename + "List")) fdecl.write(guardend("QAPI_TYPES_BUILTIN_CLEANUP_DECL")) @@ -435,24 +424,30 @@ fdecl.write(guardend("QAPI_TYPES_BUILTIN_CLEANUP_DECL")) # over these cases if do_builtins: fdef.write(guardstart("QAPI_TYPES_BUILTIN_CLEANUP_DEF")) - for typename in builtin_types: + for typename in builtin_types.keys(): fdef.write(generate_type_cleanup(typename + "List")) fdef.write(guardend("QAPI_TYPES_BUILTIN_CLEANUP_DEF")) for expr in exprs: ret = "\n" - if expr.has_key('type'): + if expr.has_key('struct'): ret += generate_struct(expr) + "\n" - ret += generate_type_cleanup_decl(expr['type'] + "List") - fdef.write(generate_type_cleanup(expr['type'] + "List") + "\n") - ret += generate_type_cleanup_decl(expr['type']) - fdef.write(generate_type_cleanup(expr['type']) + "\n") + ret += generate_type_cleanup_decl(expr['struct'] + "List") + fdef.write(generate_type_cleanup(expr['struct'] + "List") + "\n") + ret += generate_type_cleanup_decl(expr['struct']) + fdef.write(generate_type_cleanup(expr['struct']) + "\n") elif expr.has_key('union'): - ret += generate_union(expr) + ret += generate_union(expr, 'union') ret += generate_type_cleanup_decl(expr['union'] + "List") fdef.write(generate_type_cleanup(expr['union'] + "List") + "\n") ret += generate_type_cleanup_decl(expr['union']) fdef.write(generate_type_cleanup(expr['union']) + "\n") + elif expr.has_key('alternate'): + ret += generate_union(expr, 'alternate') + ret += generate_type_cleanup_decl(expr['alternate'] + "List") + fdef.write(generate_type_cleanup(expr['alternate'] + "List") + "\n") + ret += generate_type_cleanup_decl(expr['alternate']) + fdef.write(generate_type_cleanup(expr['alternate']) + "\n") elif expr.has_key('enum'): ret += generate_type_cleanup_decl(expr['enum'] + "List") fdef.write(generate_type_cleanup(expr['enum'] + "List") + "\n") diff --git a/scripts/qapi-visit.py b/scripts/qapi-visit.py index 1be4d67d8a..0e67b336fc 100644 --- a/scripts/qapi-visit.py +++ b/scripts/qapi-visit.py @@ -43,79 +43,45 @@ static void visit_type_implicit_%(c_type)s(Visitor *m, %(c_type)s **obj, Error * ''', c_type=type_name(type)) -def generate_visit_struct_fields(name, field_prefix, fn_prefix, members, base = None): +def generate_visit_struct_fields(name, members, base = None): substructs = [] ret = '' - if not fn_prefix: - full_name = name - else: - full_name = "%s_%s" % (name, fn_prefix) - - for argname, argentry, optional, structured in parse_args(members): - if structured: - if not fn_prefix: - nested_fn_prefix = argname - else: - nested_fn_prefix = "%s_%s" % (fn_prefix, argname) - - nested_field_prefix = "%s%s." % (field_prefix, argname) - ret += generate_visit_struct_fields(name, nested_field_prefix, - nested_fn_prefix, argentry) - ret += mcgen(''' - -static void visit_type_%(full_name)s_field_%(c_name)s(Visitor *m, %(name)s **obj, Error **errp) -{ -''', - name=name, full_name=full_name, c_name=c_var(argname)) - ret += generate_visit_struct_body(full_name, argname, argentry) - ret += mcgen(''' -} -''') if base: ret += generate_visit_implicit_struct(base) ret += mcgen(''' -static void visit_type_%(full_name)s_fields(Visitor *m, %(name)s **obj, Error **errp) +static void visit_type_%(name)s_fields(Visitor *m, %(name)s **obj, Error **errp) { Error *err = NULL; ''', - name=name, full_name=full_name) + name=name) push_indent() if base: ret += mcgen(''' -visit_type_implicit_%(type)s(m, &(*obj)->%(c_prefix)s%(c_name)s, &err); +visit_type_implicit_%(type)s(m, &(*obj)->%(c_name)s, &err); if (err) { goto out; } ''', - c_prefix=c_var(field_prefix), type=type_name(base), c_name=c_var('base')) - for argname, argentry, optional, structured in parse_args(members): + for argname, argentry, optional in parse_args(members): if optional: ret += mcgen(''' -visit_optional(m, &(*obj)->%(c_prefix)shas_%(c_name)s, "%(name)s", &err); -if (!err && (*obj)->%(prefix)shas_%(c_name)s) { +visit_optional(m, &(*obj)->has_%(c_name)s, "%(name)s", &err); +if (!err && (*obj)->has_%(c_name)s) { ''', - c_prefix=c_var(field_prefix), prefix=field_prefix, c_name=c_var(argname), name=argname) push_indent() - if structured: - ret += mcgen(''' -visit_type_%(full_name)s_field_%(c_name)s(m, obj, &err); -''', - full_name=full_name, c_name=c_var(argname)) - else: - ret += mcgen(''' -visit_type_%(type)s(m, &(*obj)->%(c_prefix)s%(c_name)s, "%(name)s", &err); + ret += mcgen(''' +visit_type_%(type)s(m, &(*obj)->%(c_name)s, "%(name)s", &err); ''', - c_prefix=c_var(field_prefix), prefix=field_prefix, - type=type_name(argentry), c_name=c_var(argname), - name=argname) + type=type_name(argentry), c_name=c_var(argname), + name=argname) if optional: pop_indent() @@ -141,29 +107,11 @@ out: return ret -def generate_visit_struct_body(field_prefix, name, members): +def generate_visit_struct_body(name, members): ret = mcgen(''' Error *err = NULL; -''') - - if not field_prefix: - full_name = name - else: - full_name = "%s_%s" % (field_prefix, name) - - if len(field_prefix): - ret += mcgen(''' - visit_start_struct(m, NULL, "", "%(name)s", 0, &err); -''', - name=name) - else: - ret += mcgen(''' visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), &err); -''', - name=name) - - ret += mcgen(''' if (!err) { if (*obj) { visit_type_%(name)s_fields(m, obj, errp); @@ -172,17 +120,17 @@ def generate_visit_struct_body(field_prefix, name, members): } error_propagate(errp, err); ''', - name=full_name) + name=name) return ret def generate_visit_struct(expr): - name = expr['type'] + name = expr['struct'] members = expr['data'] base = expr.get('base') - ret = generate_visit_struct_fields(name, "", "", members, base) + ret = generate_visit_struct_fields(name, members, base) ret += mcgen(''' @@ -191,7 +139,7 @@ void visit_type_%(name)s(Visitor *m, %(name)s **obj, const char *name, Error **e ''', name=name) - ret += generate_visit_struct_body("", name, members) + ret += generate_visit_struct_body(name, members) ret += mcgen(''' } @@ -237,7 +185,7 @@ void visit_type_%(name)s(Visitor *m, %(name)s *obj, const char *name, Error **er ''', name=name) -def generate_visit_anon_union(name, members): +def generate_visit_alternate(name, members): ret = mcgen(''' void visit_type_%(name)s(Visitor *m, %(name)s **obj, const char *name, Error **errp) @@ -256,15 +204,15 @@ void visit_type_%(name)s(Visitor *m, %(name)s **obj, const char *name, Error **e ''', name=name) - # For anon union, always use the default enum type automatically generated + # For alternate, always use the default enum type automatically generated # as "'%sKind' % (name)" disc_type = '%sKind' % (name) for key in members: - assert (members[key] in builtin_types + assert (members[key] in builtin_types.keys() or find_struct(members[key]) or find_union(members[key]) - or find_enum(members[key])), "Invalid anonymous union member" + or find_enum(members[key])), "Invalid alternate member" enum_full_value = generate_enum_full_value(disc_type, key) ret += mcgen(''' @@ -300,27 +248,22 @@ def generate_visit_union(expr): base = expr.get('base') discriminator = expr.get('discriminator') - if discriminator == {}: - assert not base - return generate_visit_anon_union(name, members) - enum_define = discriminator_find_enum_define(expr) if enum_define: # Use the enum type as discriminator ret = "" disc_type = enum_define['enum_name'] else: - # There will always be a discriminator in the C switch code, by default it - # is an enum type generated silently as "'%sKind' % (name)" + # There will always be a discriminator in the C switch code, by default + # it is an enum type generated silently as "'%sKind' % (name)" ret = generate_visit_enum('%sKind' % name, members.keys()) disc_type = '%sKind' % (name) if base: - base_fields = find_struct(base)['data'] - if discriminator: - base_fields = base_fields.copy() - del base_fields[discriminator] - ret += generate_visit_struct_fields(name, "", "", base_fields) + assert discriminator + base_fields = find_struct(base)['data'].copy() + del base_fields[discriminator] + ret += generate_visit_struct_fields(name, base_fields) if discriminator: for key in members: @@ -538,7 +481,7 @@ exprs = parse_schema(input_file) # to avoid header dependency hell, we always generate declarations # for built-in types in our header files and simply guard them fdecl.write(guardstart("QAPI_VISIT_BUILTIN_VISITOR_DECL")) -for typename in builtin_types: +for typename in builtin_types.keys(): fdecl.write(generate_declaration(typename, None, builtin_type=True)) fdecl.write(guardend("QAPI_VISIT_BUILTIN_VISITOR_DECL")) @@ -546,16 +489,16 @@ fdecl.write(guardend("QAPI_VISIT_BUILTIN_VISITOR_DECL")) # have the functions defined, so we use -b option to provide control # over these cases if do_builtins: - for typename in builtin_types: + for typename in builtin_types.keys(): fdef.write(generate_visit_list(typename, None)) for expr in exprs: - if expr.has_key('type'): + if expr.has_key('struct'): ret = generate_visit_struct(expr) - ret += generate_visit_list(expr['type'], expr['data']) + ret += generate_visit_list(expr['struct'], expr['data']) fdef.write(ret) - ret = generate_declaration(expr['type'], expr['data']) + ret = generate_declaration(expr['struct'], expr['data']) fdecl.write(ret) elif expr.has_key('union'): ret = generate_visit_union(expr) @@ -569,6 +512,15 @@ for expr in exprs: expr['data'].keys()) ret += generate_declaration(expr['union'], expr['data']) fdecl.write(ret) + elif expr.has_key('alternate'): + ret = generate_visit_alternate(expr['alternate'], expr['data']) + ret += generate_visit_list(expr['alternate'], expr['data']) + fdef.write(ret) + + ret = generate_decl_enum('%sKind' % expr['alternate'], + expr['data'].keys()) + ret += generate_declaration(expr['alternate'], expr['data']) + fdecl.write(ret) elif expr.has_key('enum'): ret = generate_visit_list(expr['enum'], expr['data']) ret += generate_visit_enum(expr['enum'], expr['data']) diff --git a/scripts/qapi.py b/scripts/qapi.py index 77d46aa995..166b74f644 100644 --- a/scripts/qapi.py +++ b/scripts/qapi.py @@ -2,7 +2,7 @@ # QAPI helper library # # Copyright IBM, Corp. 2011 -# Copyright (c) 2013 Red Hat Inc. +# Copyright (c) 2013-2015 Red Hat Inc. # # Authors: # Anthony Liguori <aliguori@us.ibm.com> @@ -16,13 +16,7 @@ from ordereddict import OrderedDict import os import sys -builtin_types = [ - 'str', 'int', 'number', 'bool', - 'int8', 'int16', 'int32', 'int64', - 'uint8', 'uint16', 'uint32', 'uint64' -] - -builtin_type_qtypes = { +builtin_types = { 'str': 'QTYPE_QSTRING', 'int': 'QTYPE_QINT', 'number': 'QTYPE_QFLOAT', @@ -35,8 +29,39 @@ builtin_type_qtypes = { 'uint16': 'QTYPE_QINT', 'uint32': 'QTYPE_QINT', 'uint64': 'QTYPE_QINT', + 'size': 'QTYPE_QINT', } +# Whitelist of commands allowed to return a non-dictionary +returns_whitelist = [ + # From QMP: + 'human-monitor-command', + 'query-migrate-cache-size', + 'query-tpm-models', + 'query-tpm-types', + 'ringbuf-read', + + # From QGA: + 'guest-file-open', + 'guest-fsfreeze-freeze', + 'guest-fsfreeze-freeze-list', + 'guest-fsfreeze-status', + 'guest-fsfreeze-thaw', + 'guest-get-time', + 'guest-set-vcpus', + 'guest-sync', + 'guest-sync-delimited', + + # From qapi-schema-test: + 'user_def_cmd3', +] + +enum_types = [] +struct_types = [] +union_types = [] +events = [] +all_names = {} + def error_path(parent): res = "" while parent: @@ -148,7 +173,41 @@ class QAPISchema: raise QAPISchemaError(self, 'Missing terminating "\'"') if esc: - string += ch + if ch == 'b': + string += '\b' + elif ch == 'f': + string += '\f' + elif ch == 'n': + string += '\n' + elif ch == 'r': + string += '\r' + elif ch == 't': + string += '\t' + elif ch == 'u': + value = 0 + for x in range(0, 4): + ch = self.src[self.cursor] + self.cursor += 1 + if ch not in "0123456789abcdefABCDEF": + raise QAPISchemaError(self, + '\\u escape needs 4 ' + 'hex digits') + value = (value << 4) + int(ch, 16) + # If Python 2 and 3 didn't disagree so much on + # how to handle Unicode, then we could allow + # Unicode string defaults. But most of QAPI is + # ASCII-only, so we aren't losing much for now. + if not value or value > 0x7f: + raise QAPISchemaError(self, + 'For now, \\u escape ' + 'only supports non-zero ' + 'values up to \\u007f') + string += chr(value) + elif ch in "\\/'\"": + string += ch + else: + raise QAPISchemaError(self, + "Unknown escape \\%s" %ch) esc = False elif ch == "\\": esc = True @@ -157,6 +216,20 @@ class QAPISchema: return else: string += ch + elif self.tok in "tfn": + val = self.src[self.cursor - 1:] + if val.startswith("true"): + self.val = True + self.cursor += 3 + return + elif val.startswith("false"): + self.val = False + self.cursor += 4 + return + elif val.startswith("null"): + self.val = None + self.cursor += 3 + return elif self.tok == '\n': if self.cursor == len(self.src): self.tok = None @@ -196,8 +269,9 @@ class QAPISchema: if self.tok == ']': self.accept() return expr - if not self.tok in [ '{', '[', "'" ]: - raise QAPISchemaError(self, 'Expected "{", "[", "]" or string') + if not self.tok in "{['tfn": + raise QAPISchemaError(self, 'Expected "{", "[", "]", string, ' + 'boolean or "null"') while True: expr.append(self.get_expr(True)) if self.tok == ']': @@ -216,7 +290,7 @@ class QAPISchema: elif self.tok == '[': self.accept() expr = self.get_values() - elif self.tok == "'": + elif self.tok in "'tfn": expr = self.val self.accept() else: @@ -229,6 +303,18 @@ def find_base_fields(base): return None return base_struct_define['data'] +# Return the qtype of an alternate branch, or None on error. +def find_alternate_member_qtype(qapi_type): + if builtin_types.has_key(qapi_type): + return builtin_types[qapi_type] + elif find_struct(qapi_type): + return "QTYPE_QDICT" + elif find_enum(qapi_type): + return "QTYPE_QSTRING" + elif find_union(qapi_type): + return "QTYPE_QDICT" + return None + # Return the discriminator enum define if discriminator is specified as an # enum type, otherwise return None. def discriminator_find_enum_define(expr): @@ -248,56 +334,178 @@ def discriminator_find_enum_define(expr): return find_enum(discriminator_type) +valid_name = re.compile('^[a-zA-Z_][a-zA-Z0-9_.-]*$') +def check_name(expr_info, source, name, allow_optional = False, + enum_member = False): + global valid_name + membername = name + + if not isinstance(name, str): + raise QAPIExprError(expr_info, + "%s requires a string name" % source) + if name.startswith('*'): + membername = name[1:] + if not allow_optional: + raise QAPIExprError(expr_info, + "%s does not allow optional name '%s'" + % (source, name)) + # Enum members can start with a digit, because the generated C + # code always prefixes it with the enum name + if enum_member: + membername = '_' + membername + if not valid_name.match(membername): + raise QAPIExprError(expr_info, + "%s uses invalid name '%s'" % (source, name)) + +def check_type(expr_info, source, value, allow_array = False, + allow_dict = False, allow_optional = False, + allow_star = False, allow_metas = []): + global all_names + orig_value = value + + if value is None: + return + + if allow_star and value == '**': + return + + # Check if array type for value is okay + if isinstance(value, list): + if not allow_array: + raise QAPIExprError(expr_info, + "%s cannot be an array" % source) + if len(value) != 1 or not isinstance(value[0], str): + raise QAPIExprError(expr_info, + "%s: array type must contain single type name" + % source) + value = value[0] + orig_value = "array of %s" %value + + # Check if type name for value is okay + if isinstance(value, str): + if value == '**': + raise QAPIExprError(expr_info, + "%s uses '**' but did not request 'gen':false" + % source) + if not value in all_names: + raise QAPIExprError(expr_info, + "%s uses unknown type '%s'" + % (source, orig_value)) + if not all_names[value] in allow_metas: + raise QAPIExprError(expr_info, + "%s cannot use %s type '%s'" + % (source, all_names[value], orig_value)) + return + + # value is a dictionary, check that each member is okay + if not isinstance(value, OrderedDict): + raise QAPIExprError(expr_info, + "%s should be a dictionary" % source) + if not allow_dict: + raise QAPIExprError(expr_info, + "%s should be a type name" % source) + for (key, arg) in value.items(): + check_name(expr_info, "Member of %s" % source, key, + allow_optional=allow_optional) + # Todo: allow dictionaries to represent default values of + # an optional argument. + check_type(expr_info, "Member '%s' of %s" % (key, source), arg, + allow_array=True, allow_star=allow_star, + allow_metas=['built-in', 'union', 'alternate', 'struct', + 'enum']) + +def check_member_clash(expr_info, base_name, data, source = ""): + base = find_struct(base_name) + assert base + base_members = base['data'] + for key in data.keys(): + if key.startswith('*'): + key = key[1:] + if key in base_members or "*" + key in base_members: + raise QAPIExprError(expr_info, + "Member name '%s'%s clashes with base '%s'" + % (key, source, base_name)) + if base.get('base'): + check_member_clash(expr_info, base['base'], data, source) + +def check_command(expr, expr_info): + name = expr['command'] + allow_star = expr.has_key('gen') + + check_type(expr_info, "'data' for command '%s'" % name, + expr.get('data'), allow_dict=True, allow_optional=True, + allow_metas=['union', 'struct'], allow_star=allow_star) + returns_meta = ['union', 'struct'] + if name in returns_whitelist: + returns_meta += ['built-in', 'alternate', 'enum'] + check_type(expr_info, "'returns' for command '%s'" % name, + expr.get('returns'), allow_array=True, allow_dict=True, + allow_optional=True, allow_metas=returns_meta, + allow_star=allow_star) + def check_event(expr, expr_info): + global events + name = expr['event'] params = expr.get('data') - if params: - for argname, argentry, optional, structured in parse_args(params): - if structured: - raise QAPIExprError(expr_info, - "Nested structure define in event is not " - "supported, event '%s', argname '%s'" - % (expr['event'], argname)) + + if name.upper() == 'MAX': + raise QAPIExprError(expr_info, "Event name 'MAX' cannot be created") + events.append(name) + check_type(expr_info, "'data' for event '%s'" % name, + expr.get('data'), allow_dict=True, allow_optional=True, + allow_metas=['union', 'struct']) def check_union(expr, expr_info): name = expr['union'] base = expr.get('base') discriminator = expr.get('discriminator') members = expr['data'] + values = { 'MAX': '(automatic)' } - # If the object has a member 'base', its value must name a complex type. - if base: - base_fields = find_base_fields(base) - if not base_fields: + # If the object has a member 'base', its value must name a struct, + # and there must be a discriminator. + if base is not None: + if discriminator is None: raise QAPIExprError(expr_info, - "Base '%s' is not a valid type" - % base) + "Union '%s' requires a discriminator to go " + "along with base" %name) - # If the union object has no member 'discriminator', it's an - # ordinary union. - if not discriminator: - enum_define = None + # Two types of unions, determined by discriminator. - # Else if the value of member 'discriminator' is {}, it's an - # anonymous union. - elif discriminator == {}: + # With no discriminator it is a simple union. + if discriminator is None: enum_define = None + allow_metas=['built-in', 'union', 'alternate', 'struct', 'enum'] + if base is not None: + raise QAPIExprError(expr_info, + "Simple union '%s' must not have a base" + % name) # Else, it's a flat union. else: - # The object must have a member 'base'. - if not base: + # The object must have a string member 'base'. + if not isinstance(base, str): raise QAPIExprError(expr_info, - "Flat union '%s' must have a base field" + "Flat union '%s' must have a string base field" % name) - # The value of member 'discriminator' must name a member of the - # base type. + base_fields = find_base_fields(base) + if not base_fields: + raise QAPIExprError(expr_info, + "Base '%s' is not a valid struct" + % base) + + # The value of member 'discriminator' must name a non-optional + # member of the base struct. + check_name(expr_info, "Discriminator of flat union '%s'" % name, + discriminator) discriminator_type = base_fields.get(discriminator) if not discriminator_type: raise QAPIExprError(expr_info, "Discriminator '%s' is not a member of base " - "type '%s'" + "struct '%s'" % (discriminator, base)) enum_define = find_enum(discriminator_type) + allow_metas=['struct'] # Do not allow string discriminator if not enum_define: raise QAPIExprError(expr_info, @@ -306,51 +514,196 @@ def check_union(expr, expr_info): # Check every branch for (key, value) in members.items(): - # If this named member's value names an enum type, then all members + check_name(expr_info, "Member of union '%s'" % name, key) + + # Each value must name a known type; furthermore, in flat unions, + # branches must be a struct with no overlapping member names + check_type(expr_info, "Member '%s' of union '%s'" % (key, name), + value, allow_array=True, allow_metas=allow_metas) + if base: + branch_struct = find_struct(value) + assert branch_struct + check_member_clash(expr_info, base, branch_struct['data'], + " of branch '%s'" % key) + + # If the discriminator names an enum type, then all members # of 'data' must also be members of the enum type. - if enum_define and not key in enum_define['enum_values']: + if enum_define: + if not key in enum_define['enum_values']: + raise QAPIExprError(expr_info, + "Discriminator value '%s' is not found in " + "enum '%s'" % + (key, enum_define["enum_name"])) + + # Otherwise, check for conflicts in the generated enum + else: + c_key = _generate_enum_string(key) + if c_key in values: + raise QAPIExprError(expr_info, + "Union '%s' member '%s' clashes with '%s'" + % (name, key, values[c_key])) + values[c_key] = key + +def check_alternate(expr, expr_info): + name = expr['alternate'] + members = expr['data'] + values = { 'MAX': '(automatic)' } + types_seen = {} + + # Check every branch + for (key, value) in members.items(): + check_name(expr_info, "Member of alternate '%s'" % name, key) + + # Check for conflicts in the generated enum + c_key = _generate_enum_string(key) + if c_key in values: raise QAPIExprError(expr_info, - "Discriminator value '%s' is not found in " - "enum '%s'" % - (key, enum_define["enum_name"])) - # Todo: add checking for values. Key is checked as above, value can be - # also checked here, but we need more functions to handle array case. + "Alternate '%s' member '%s' clashes with '%s'" + % (name, key, values[c_key])) + values[c_key] = key + + # Ensure alternates have no type conflicts. + check_type(expr_info, "Member '%s' of alternate '%s'" % (key, name), + value, + allow_metas=['built-in', 'union', 'struct', 'enum']) + qtype = find_alternate_member_qtype(value) + assert qtype + if qtype in types_seen: + raise QAPIExprError(expr_info, + "Alternate '%s' member '%s' can't " + "be distinguished from member '%s'" + % (name, key, types_seen[qtype])) + types_seen[qtype] = key + +def check_enum(expr, expr_info): + name = expr['enum'] + members = expr.get('data') + values = { 'MAX': '(automatic)' } + + if not isinstance(members, list): + raise QAPIExprError(expr_info, + "Enum '%s' requires an array for 'data'" % name) + for member in members: + check_name(expr_info, "Member of enum '%s'" %name, member, + enum_member=True) + key = _generate_enum_string(member) + if key in values: + raise QAPIExprError(expr_info, + "Enum '%s' member '%s' clashes with '%s'" + % (name, member, values[key])) + values[key] = member + +def check_struct(expr, expr_info): + name = expr['struct'] + members = expr['data'] + + check_type(expr_info, "'data' for struct '%s'" % name, members, + allow_dict=True, allow_optional=True) + check_type(expr_info, "'base' for struct '%s'" % name, expr.get('base'), + allow_metas=['struct']) + if expr.get('base'): + check_member_clash(expr_info, expr['base'], expr['data']) def check_exprs(schema): for expr_elem in schema.exprs: expr = expr_elem['expr'] - if expr.has_key('union'): - check_union(expr, expr_elem['info']) - if expr.has_key('event'): - check_event(expr, expr_elem['info']) + info = expr_elem['info'] + + if expr.has_key('enum'): + check_enum(expr, info) + elif expr.has_key('union'): + check_union(expr, info) + elif expr.has_key('alternate'): + check_alternate(expr, info) + elif expr.has_key('struct'): + check_struct(expr, info) + elif expr.has_key('command'): + check_command(expr, info) + elif expr.has_key('event'): + check_event(expr, info) + else: + assert False, 'unexpected meta type' + +def check_keys(expr_elem, meta, required, optional=[]): + expr = expr_elem['expr'] + info = expr_elem['info'] + name = expr[meta] + if not isinstance(name, str): + raise QAPIExprError(info, + "'%s' key must have a string value" % meta) + required = required + [ meta ] + for (key, value) in expr.items(): + if not key in required and not key in optional: + raise QAPIExprError(info, + "Unknown key '%s' in %s '%s'" + % (key, meta, name)) + if (key == 'gen' or key == 'success-response') and value != False: + raise QAPIExprError(info, + "'%s' of %s '%s' should only use false value" + % (key, meta, name)) + for key in required: + if not expr.has_key(key): + raise QAPIExprError(info, + "Key '%s' is missing from %s '%s'" + % (key, meta, name)) + def parse_schema(input_file): + global all_names + exprs = [] + + # First pass: read entire file into memory try: schema = QAPISchema(open(input_file, "r")) except (QAPISchemaError, QAPIExprError), e: print >>sys.stderr, e exit(1) - exprs = [] - - for expr_elem in schema.exprs: - expr = expr_elem['expr'] - if expr.has_key('enum'): - add_enum(expr['enum'], expr['data']) - elif expr.has_key('union'): - add_union(expr) - elif expr.has_key('type'): - add_struct(expr) - exprs.append(expr) - - # Try again for hidden UnionKind enum - for expr_elem in schema.exprs: - expr = expr_elem['expr'] - if expr.has_key('union'): - if not discriminator_find_enum_define(expr): - add_enum('%sKind' % expr['union']) - try: + # Next pass: learn the types and check for valid expression keys. At + # this point, top-level 'include' has already been flattened. + for builtin in builtin_types.keys(): + all_names[builtin] = 'built-in' + for expr_elem in schema.exprs: + expr = expr_elem['expr'] + info = expr_elem['info'] + if expr.has_key('enum'): + check_keys(expr_elem, 'enum', ['data']) + add_enum(expr['enum'], info, expr['data']) + elif expr.has_key('union'): + check_keys(expr_elem, 'union', ['data'], + ['base', 'discriminator']) + add_union(expr, info) + elif expr.has_key('alternate'): + check_keys(expr_elem, 'alternate', ['data']) + add_name(expr['alternate'], info, 'alternate') + elif expr.has_key('struct'): + check_keys(expr_elem, 'struct', ['data'], ['base']) + add_struct(expr, info) + elif expr.has_key('command'): + check_keys(expr_elem, 'command', [], + ['data', 'returns', 'gen', 'success-response']) + add_name(expr['command'], info, 'command') + elif expr.has_key('event'): + check_keys(expr_elem, 'event', [], ['data']) + add_name(expr['event'], info, 'event') + else: + raise QAPIExprError(expr_elem['info'], + "Expression is missing metatype") + exprs.append(expr) + + # Try again for hidden UnionKind enum + for expr_elem in schema.exprs: + expr = expr_elem['expr'] + if expr.has_key('union'): + if not discriminator_find_enum_define(expr): + add_enum('%sKind' % expr['union'], expr_elem['info'], + implicit=True) + elif expr.has_key('alternate'): + add_enum('%sKind' % expr['alternate'], expr_elem['info'], + implicit=True) + + # Final pass - validate that exprs make sense check_exprs(schema) except QAPIExprError, e: print >>sys.stderr, e @@ -359,7 +712,7 @@ def parse_schema(input_file): return exprs def parse_args(typeinfo): - if isinstance(typeinfo, basestring): + if isinstance(typeinfo, str): struct = find_struct(typeinfo) assert struct != None typeinfo = struct['data'] @@ -368,13 +721,12 @@ def parse_args(typeinfo): argname = member argentry = typeinfo[member] optional = False - structured = False if member.startswith('*'): argname = member[1:] optional = True - if isinstance(argentry, OrderedDict): - structured = True - yield (argname, argentry, optional, structured) + # Todo: allow argentry to be OrderedDict, for providing the + # value of an optional argument. + yield (argname, argentry, optional) def de_camel_case(name): new_name = '' @@ -442,23 +794,36 @@ def type_name(name): return c_list_type(name[0]) return name -enum_types = [] -struct_types = [] -union_types = [] - -def add_struct(definition): +def add_name(name, info, meta, implicit = False): + global all_names + check_name(info, "'%s'" % meta, name) + if name in all_names: + raise QAPIExprError(info, + "%s '%s' is already defined" + % (all_names[name], name)) + if not implicit and name[-4:] == 'Kind': + raise QAPIExprError(info, + "%s '%s' should not end in 'Kind'" + % (meta, name)) + all_names[name] = meta + +def add_struct(definition, info): global struct_types + name = definition['struct'] + add_name(name, info, 'struct') struct_types.append(definition) def find_struct(name): global struct_types for struct in struct_types: - if struct['type'] == name: + if struct['struct'] == name: return struct return None -def add_union(definition): +def add_union(definition, info): global union_types + name = definition['union'] + add_name(name, info, 'union') union_types.append(definition) def find_union(name): @@ -468,8 +833,9 @@ def find_union(name): return union return None -def add_enum(name, enum_values = None): +def add_enum(name, info, enum_values = None, implicit = False): global enum_types + add_name(name, info, 'enum', implicit) enum_types.append({"enum_name": name, "enum_values": enum_values}) def find_enum(name): @@ -511,7 +877,7 @@ def c_type(name, is_param=False): return name elif name == None or len(name) == 0: return 'void' - elif name == name.upper(): + elif name in events: return '%sEvent *%s' % (camel_case(name), eatspace) else: return '%s *%s' % (name, eatspace) diff --git a/scripts/qemu-gdb.py b/scripts/qemu-gdb.py index 8a0f30534f..6c7f4fbe53 100644 --- a/scripts/qemu-gdb.py +++ b/scripts/qemu-gdb.py @@ -22,12 +22,86 @@ def isnull(ptr): def int128(p): return long(p['lo']) + (long(p['hi']) << 64) +def get_fs_base(): + '''Fetch %fs base value using arch_prctl(ARCH_GET_FS)''' + # %rsp - 120 is scratch space according to the SystemV ABI + old = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)') + gdb.execute('call arch_prctl(0x1003, $rsp - 120)', False, True) + fs_base = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)') + gdb.execute('set *(uint64_t*)($rsp - 120) = %s' % old, False, True) + return fs_base + +def get_glibc_pointer_guard(): + '''Fetch glibc pointer guard value''' + fs_base = get_fs_base() + return gdb.parse_and_eval('*(uint64_t*)((uint64_t)%s + 0x30)' % fs_base) + +def glibc_ptr_demangle(val, pointer_guard): + '''Undo effect of glibc's PTR_MANGLE()''' + return gdb.parse_and_eval('(((uint64_t)%s >> 0x11) | ((uint64_t)%s << (64 - 0x11))) ^ (uint64_t)%s' % (val, val, pointer_guard)) + +def bt_jmpbuf(jmpbuf): + '''Backtrace a jmpbuf''' + JB_RBX = 0 + JB_RBP = 1 + JB_R12 = 2 + JB_R13 = 3 + JB_R14 = 4 + JB_R15 = 5 + JB_RSP = 6 + JB_PC = 7 + + old_rbx = gdb.parse_and_eval('(uint64_t)$rbx') + old_rbp = gdb.parse_and_eval('(uint64_t)$rbp') + old_rsp = gdb.parse_and_eval('(uint64_t)$rsp') + old_r12 = gdb.parse_and_eval('(uint64_t)$r12') + old_r13 = gdb.parse_and_eval('(uint64_t)$r13') + old_r14 = gdb.parse_and_eval('(uint64_t)$r14') + old_r15 = gdb.parse_and_eval('(uint64_t)$r15') + old_rip = gdb.parse_and_eval('(uint64_t)$rip') + + pointer_guard = get_glibc_pointer_guard() + gdb.execute('set $rbx = %s' % jmpbuf[JB_RBX]) + gdb.execute('set $rbp = %s' % glibc_ptr_demangle(jmpbuf[JB_RBP], pointer_guard)) + gdb.execute('set $rsp = %s' % glibc_ptr_demangle(jmpbuf[JB_RSP], pointer_guard)) + gdb.execute('set $r12 = %s' % jmpbuf[JB_R12]) + gdb.execute('set $r13 = %s' % jmpbuf[JB_R13]) + gdb.execute('set $r14 = %s' % jmpbuf[JB_R14]) + gdb.execute('set $r15 = %s' % jmpbuf[JB_R15]) + gdb.execute('set $rip = %s' % glibc_ptr_demangle(jmpbuf[JB_PC], pointer_guard)) + + gdb.execute('bt') + + gdb.execute('set $rbx = %s' % old_rbx) + gdb.execute('set $rbp = %s' % old_rbp) + gdb.execute('set $rsp = %s' % old_rsp) + gdb.execute('set $r12 = %s' % old_r12) + gdb.execute('set $r13 = %s' % old_r13) + gdb.execute('set $r14 = %s' % old_r14) + gdb.execute('set $r15 = %s' % old_r15) + gdb.execute('set $rip = %s' % old_rip) + class QemuCommand(gdb.Command): '''Prefix for QEMU debug support commands''' def __init__(self): gdb.Command.__init__(self, 'qemu', gdb.COMMAND_DATA, gdb.COMPLETE_NONE, True) +class CoroutineCommand(gdb.Command): + '''Display coroutine backtrace''' + def __init__(self): + gdb.Command.__init__(self, 'qemu coroutine', gdb.COMMAND_DATA, + gdb.COMPLETE_NONE) + + def invoke(self, arg, from_tty): + argv = gdb.string_to_argv(arg) + if len(argv) != 1: + gdb.write('usage: qemu coroutine <coroutine-pointer>\n') + return + + coroutine_pointer = gdb.parse_and_eval(argv[0]).cast(gdb.lookup_type('CoroutineUContext').pointer()) + bt_jmpbuf(coroutine_pointer['env']['__jmpbuf']) + class MtreeCommand(gdb.Command): '''Display the memory tree hierarchy''' def __init__(self): @@ -86,4 +160,5 @@ class MtreeCommand(gdb.Command): subregion = subregion['subregions_link']['tqe_next'] QemuCommand() +CoroutineCommand() MtreeCommand() diff --git a/scripts/qmp/qmp.py b/scripts/qmp/qmp.py index 20b6ec795e..1d38e3e9e7 100644 --- a/scripts/qmp/qmp.py +++ b/scripts/qmp/qmp.py @@ -21,6 +21,9 @@ class QMPConnectError(QMPError): class QMPCapabilitiesError(QMPError): pass +class QMPTimeoutError(QMPError): + pass + class QEMUMonitorProtocol: def __init__(self, address, server=False): """ @@ -72,6 +75,44 @@ class QEMUMonitorProtocol: error = socket.error + def __get_events(self, wait=False): + """ + Check for new events in the stream and cache them in __events. + + @param wait (bool): block until an event is available. + @param wait (float): If wait is a float, treat it as a timeout value. + + @raise QMPTimeoutError: If a timeout float is provided and the timeout + period elapses. + @raise QMPConnectError: If wait is True but no events could be retrieved + or if some other error occurred. + """ + + # Check for new events regardless and pull them into the cache: + self.__sock.setblocking(0) + try: + self.__json_read() + except socket.error, err: + if err[0] == errno.EAGAIN: + # No data available + pass + self.__sock.setblocking(1) + + # Wait for new events, if needed. + # if wait is 0.0, this means "no wait" and is also implicitly false. + if not self.__events and wait: + if isinstance(wait, float): + self.__sock.settimeout(wait) + try: + ret = self.__json_read(only_event=True) + except socket.timeout: + raise QMPTimeoutError("Timeout waiting for event") + except: + raise QMPConnectError("Error while reading from socket") + if ret is None: + raise QMPConnectError("Error while reading from socket") + self.__sock.settimeout(None) + def connect(self, negotiate=True): """ Connect to the QMP Monitor and perform capabilities negotiation. @@ -140,43 +181,37 @@ class QEMUMonitorProtocol: """ Get and delete the first available QMP event. - @param wait: block until an event is available (bool) + @param wait (bool): block until an event is available. + @param wait (float): If wait is a float, treat it as a timeout value. + + @raise QMPTimeoutError: If a timeout float is provided and the timeout + period elapses. + @raise QMPConnectError: If wait is True but no events could be retrieved + or if some other error occurred. + + @return The first available QMP event, or None. """ - self.__sock.setblocking(0) - try: - self.__json_read() - except socket.error, err: - if err[0] == errno.EAGAIN: - # No data available - pass - self.__sock.setblocking(1) - if not self.__events and wait: - self.__json_read(only_event=True) - event = self.__events[0] - del self.__events[0] - return event + self.__get_events(wait) + + if self.__events: + return self.__events.pop(0) + return None def get_events(self, wait=False): """ Get a list of available QMP events. - @param wait: block until an event is available (bool) - """ - self.__sock.setblocking(0) - try: - self.__json_read() - except socket.error, err: - if err[0] == errno.EAGAIN: - # No data available - pass - self.__sock.setblocking(1) - if not self.__events and wait: - ret = self.__json_read(only_event=True) - if ret == None: - # We are in blocking mode, if don't get anything, something - # went wrong - raise QMPConnectError("Error while reading from socket") + @param wait (bool): block until an event is available. + @param wait (float): If wait is a float, treat it as a timeout value. + @raise QMPTimeoutError: If a timeout float is provided and the timeout + period elapses. + @raise QMPConnectError: If wait is True but no events could be retrieved + or if some other error occurred. + + @return The list of available QMP events. + """ + self.__get_events(wait) return self.__events def clear_events(self): diff --git a/scripts/shaderinclude.pl b/scripts/shaderinclude.pl new file mode 100644 index 0000000000..81b5146332 --- /dev/null +++ b/scripts/shaderinclude.pl @@ -0,0 +1,16 @@ +#!/usr/bin/perl +use strict; +use warnings; + +my $file = shift; +open FILE, "<", $file or die "open $file: $!"; +my $name = $file; +$name =~ s|.*/||; +$name =~ s/[-.]/_/g; +print "static GLchar ${name}_src[] =\n"; +while (<FILE>) { + chomp; + printf " \"%s\\n\"\n", $_; +} +print " \"\\n\";\n"; +close FILE; diff --git a/softmmu_template.h b/softmmu_template.h index 0e3dd35fe1..16b08523e9 100644 --- a/softmmu_template.h +++ b/softmmu_template.h @@ -123,7 +123,7 @@ * victim tlb. try to refill from the victim tlb before walking the \ * page table. */ \ int vidx; \ - hwaddr tmpiotlb; \ + CPUIOTLBEntry tmpiotlb; \ CPUTLBEntry tmptlb; \ for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \ if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\ @@ -143,12 +143,13 @@ #ifndef SOFTMMU_CODE_ACCESS static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, - hwaddr physaddr, + CPUIOTLBEntry *iotlbentry, target_ulong addr, uintptr_t retaddr) { uint64_t val; CPUState *cpu = ENV_GET_CPU(env); + hwaddr physaddr = iotlbentry->addr; MemoryRegion *mr = iotlb_to_region(cpu, physaddr); physaddr = (physaddr & TARGET_PAGE_MASK) + addr; @@ -158,7 +159,8 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, } cpu->mem_io_vaddr = addr; - io_mem_read(mr, physaddr, &val, 1 << SHIFT); + memory_region_dispatch_read(mr, physaddr, &val, 1 << SHIFT, + iotlbentry->attrs); return val; } #endif @@ -195,15 +197,15 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, /* Handle an IO access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; + CPUIOTLBEntry *iotlbentry; if ((addr & (DATA_SIZE - 1)) != 0) { goto do_unaligned_access; } - ioaddr = env->iotlb[mmu_idx][index]; + iotlbentry = &env->iotlb[mmu_idx][index]; /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ - res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr); + res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr); res = TGT_LE(res); return res; } @@ -283,15 +285,15 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, /* Handle an IO access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; + CPUIOTLBEntry *iotlbentry; if ((addr & (DATA_SIZE - 1)) != 0) { goto do_unaligned_access; } - ioaddr = env->iotlb[mmu_idx][index]; + iotlbentry = &env->iotlb[mmu_idx][index]; /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ - res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr); + res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr); res = TGT_BE(res); return res; } @@ -363,12 +365,13 @@ WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr, #endif static inline void glue(io_write, SUFFIX)(CPUArchState *env, - hwaddr physaddr, + CPUIOTLBEntry *iotlbentry, DATA_TYPE val, target_ulong addr, uintptr_t retaddr) { CPUState *cpu = ENV_GET_CPU(env); + hwaddr physaddr = iotlbentry->addr; MemoryRegion *mr = iotlb_to_region(cpu, physaddr); physaddr = (physaddr & TARGET_PAGE_MASK) + addr; @@ -378,7 +381,8 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env, cpu->mem_io_vaddr = addr; cpu->mem_io_pc = retaddr; - io_mem_write(mr, physaddr, val, 1 << SHIFT); + memory_region_dispatch_write(mr, physaddr, val, 1 << SHIFT, + iotlbentry->attrs); } void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, @@ -408,16 +412,16 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* Handle an IO access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; + CPUIOTLBEntry *iotlbentry; if ((addr & (DATA_SIZE - 1)) != 0) { goto do_unaligned_access; } - ioaddr = env->iotlb[mmu_idx][index]; + iotlbentry = &env->iotlb[mmu_idx][index]; /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ val = TGT_LE(val); - glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr); + glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr); return; } @@ -489,16 +493,16 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* Handle an IO access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; + CPUIOTLBEntry *iotlbentry; if ((addr & (DATA_SIZE - 1)) != 0) { goto do_unaligned_access; } - ioaddr = env->iotlb[mmu_idx][index]; + iotlbentry = &env->iotlb[mmu_idx][index]; /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ val = TGT_BE(val); - glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr); + glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr); return; } diff --git a/target-arm/cpu.c b/target-arm/cpu.c index 986f04cfd6..3ca3fa8d21 100644 --- a/target-arm/cpu.c +++ b/target-arm/cpu.c @@ -111,7 +111,7 @@ static void arm_cpu_reset(CPUState *s) /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */ env->cp15.sctlr_el[1] |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE; /* and to the FP/Neon instructions */ - env->cp15.c1_coproc = deposit64(env->cp15.c1_coproc, 20, 2, 3); + env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 2, 3); #else /* Reset into the highest available EL */ if (arm_feature(env, ARM_FEATURE_EL3)) { @@ -126,7 +126,7 @@ static void arm_cpu_reset(CPUState *s) } else { #if defined(CONFIG_USER_ONLY) /* Userspace expects access to cp10 and cp11 for FP/Neon */ - env->cp15.c1_coproc = deposit64(env->cp15.c1_coproc, 20, 4, 0xf); + env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 4, 0xf); #endif } @@ -524,9 +524,10 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) unset_feature(env, ARM_FEATURE_EL3); /* Disable the security extension feature bits in the processor feature - * register as well. This is id_pfr1[7:4]. + * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12]. */ cpu->id_pfr1 &= ~0xf0; + cpu->id_aa64pfr0 &= ~0xf000; } register_cp_regs_for_features(cpu); diff --git a/target-arm/cpu.h b/target-arm/cpu.h index 083211ce39..d4a589964e 100644 --- a/target-arm/cpu.h +++ b/target-arm/cpu.h @@ -93,11 +93,6 @@ #define ARM_CPU_VIRQ 2 #define ARM_CPU_VFIQ 3 -typedef void ARMWriteCPFunc(void *opaque, int cp_info, - int srcreg, int operand, uint32_t value); -typedef uint32_t ARMReadCPFunc(void *opaque, int cp_info, - int dstreg, int operand); - struct arm_boot_info; #define NB_MMU_MODES 7 @@ -201,7 +196,7 @@ typedef struct CPUARMState { }; uint64_t sctlr_el[4]; }; - uint64_t c1_coproc; /* Coprocessor access register. */ + uint64_t cpacr_el1; /* Architectural feature access control register */ uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */ uint64_t sder; /* Secure debug enable register. */ uint32_t nsacr; /* Non-secure access control register. */ @@ -1813,7 +1808,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, int fpen; if (arm_feature(env, ARM_FEATURE_V6)) { - fpen = extract32(env->cp15.c1_coproc, 20, 2); + fpen = extract32(env->cp15.cpacr_el1, 20, 2); } else { /* CPACR doesn't exist before v6, so VFP is always accessible */ fpen = 3; @@ -1879,15 +1874,6 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, #include "exec/exec-all.h" -static inline void cpu_pc_from_tb(CPUARMState *env, TranslationBlock *tb) -{ - if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) { - env->pc = tb->pc; - } else { - env->regs[15] = tb->pc; - } -} - enum { QEMU_PSCI_CONDUIT_DISABLED = 0, QEMU_PSCI_CONDUIT_SMC = 1, diff --git a/target-arm/helper.c b/target-arm/helper.c index d77c6de40c..f8f8d76fc0 100644 --- a/target-arm/helper.c +++ b/target-arm/helper.c @@ -14,7 +14,7 @@ #ifndef CONFIG_USER_ONLY static inline int get_phys_addr(CPUARMState *env, target_ulong address, int access_type, ARMMMUIdx mmu_idx, - hwaddr *phys_ptr, int *prot, + hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, target_ulong *page_size); /* Definitions for the PMCCNTR and PMCR registers */ @@ -589,7 +589,7 @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, } value &= mask; } - env->cp15.c1_coproc = value; + env->cp15.cpacr_el1 = value; } static const ARMCPRegInfo v6_cp_reginfo[] = { @@ -615,7 +615,7 @@ static const ARMCPRegInfo v6_cp_reginfo[] = { .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, - .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_coproc), + .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), .resetvalue = 0, .writefn = cpacr_write }, REGINFO_SENTINEL }; @@ -816,8 +816,10 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) * supported if EL2 exists. The bit is UNK/SBZP when * EL2 is unavailable. In QEMU ARMv7, we force it to always zero * when EL2 is unavailable. + * On ARMv8, this bit is always available. */ - if (arm_feature(env, ARM_FEATURE_V7)) { + if (arm_feature(env, ARM_FEATURE_V7) && + !arm_feature(env, ARM_FEATURE_V8)) { valid_mask &= ~SCR_SMD; } } @@ -1466,9 +1468,10 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value, int prot; int ret; uint64_t par64; + MemTxAttrs attrs = {}; ret = get_phys_addr(env, value, access_type, mmu_idx, - &phys_addr, &prot, &page_size); + &phys_addr, &attrs, &prot, &page_size); if (extended_addresses_enabled(env)) { /* ret is a DFSR/IFSR value for the long descriptor * translation table format, but with WnR always clear. @@ -1477,6 +1480,9 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value, par64 = (1 << 11); /* LPAE bit always set */ if (ret == 0) { par64 |= phys_addr & ~0xfffULL; + if (!attrs.secure) { + par64 |= (1 << 9); /* NS */ + } /* We don't set the ATTR or SH fields in the PAR. */ } else { par64 |= 1; /* F */ @@ -1499,6 +1505,9 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value, } else { par64 = phys_addr & 0xfffff000; } + if (!attrs.secure) { + par64 |= (1 << 9); /* NS */ + } } else { par64 = ((ret & (1 << 10)) >> 5) | ((ret & (1 << 12)) >> 6) | ((ret & 0xf) << 1) | 1; @@ -4858,6 +4867,26 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) } } +/* Return true if this address translation regime is secure */ +static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx) +{ + switch (mmu_idx) { + case ARMMMUIdx_S12NSE0: + case ARMMMUIdx_S12NSE1: + case ARMMMUIdx_S1NSE0: + case ARMMMUIdx_S1NSE1: + case ARMMMUIdx_S1E2: + case ARMMMUIdx_S2NS: + return false; + case ARMMMUIdx_S1E3: + case ARMMMUIdx_S1SE0: + case ARMMMUIdx_S1SE1: + return true; + default: + g_assert_not_reached(); + } +} + /* Return the SCTLR value which controls this address translation regime */ static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) { @@ -5102,6 +5131,29 @@ static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, return true; } +/* All loads done in the course of a page table walk go through here. + * TODO: rather than ignoring errors from physical memory reads (which + * are external aborts in ARM terminology) we should propagate this + * error out so that we can turn it into a Data Abort if this walk + * was being done for a CPU load/store or an address translation instruction + * (but not if it was for a debug access). + */ +static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure) +{ + MemTxAttrs attrs = {}; + + attrs.secure = is_secure; + return address_space_ldl(cs->as, addr, attrs, NULL); +} + +static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure) +{ + MemTxAttrs attrs = {}; + + attrs.secure = is_secure; + return address_space_ldq(cs->as, addr, attrs, NULL); +} + static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, int *prot, target_ulong *page_size) @@ -5124,7 +5176,7 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type, code = 5; goto do_fault; } - desc = ldl_phys(cs->as, table); + desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx)); type = (desc & 3); domain = (desc >> 5) & 0x0f; if (regime_el(env, mmu_idx) == 1) { @@ -5160,7 +5212,7 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type, /* Fine pagetable. */ table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); } - desc = ldl_phys(cs->as, table); + desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx)); switch (desc & 3) { case 0: /* Page translation fault. */ code = 7; @@ -5210,6 +5262,7 @@ do_fault: static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, + MemTxAttrs *attrs, int *prot, target_ulong *page_size) { CPUState *cs = CPU(arm_env_get_cpu(env)); @@ -5224,6 +5277,7 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type, int domain_prot; hwaddr phys_addr; uint32_t dacr; + bool ns; /* Pagetable walk. */ /* Lookup l1 descriptor. */ @@ -5232,7 +5286,7 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type, code = 5; goto do_fault; } - desc = ldl_phys(cs->as, table); + desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx)); type = (desc & 3); if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) { /* Section translation fault, or attempt to use the encoding @@ -5273,13 +5327,15 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type, xn = desc & (1 << 4); pxn = desc & 1; code = 13; + ns = extract32(desc, 19, 1); } else { if (arm_feature(env, ARM_FEATURE_PXN)) { pxn = (desc >> 2) & 1; } + ns = extract32(desc, 3, 1); /* Lookup l2 entry. */ table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); - desc = ldl_phys(cs->as, table); + desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx)); ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); switch (desc & 3) { case 0: /* Page translation fault. */ @@ -5330,6 +5386,13 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type, goto do_fault; } } + if (ns) { + /* The NS bit will (as required by the architecture) have no effect if + * the CPU doesn't support TZ or this is a non-secure translation + * regime, because the attribute will already be non-secure. + */ + attrs->secure = false; + } *phys_ptr = phys_addr; return 0; do_fault: @@ -5347,7 +5410,7 @@ typedef enum { static int get_phys_addr_lpae(CPUARMState *env, target_ulong address, int access_type, ARMMMUIdx mmu_idx, - hwaddr *phys_ptr, int *prot, + hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, target_ulong *page_size_ptr) { CPUState *cs = CPU(arm_env_get_cpu(env)); @@ -5487,13 +5550,20 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address, descaddr = extract64(ttbr, 0, 48); descaddr &= ~((1ULL << (va_size - tsz - (granule_sz * (4 - level)))) - 1); - tableattrs = 0; + /* Secure accesses start with the page table in secure memory and + * can be downgraded to non-secure at any step. Non-secure accesses + * remain non-secure. We implement this by just ORing in the NSTable/NS + * bits at each step. + */ + tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4); for (;;) { uint64_t descriptor; + bool nstable; descaddr |= (address >> (granule_sz * (4 - level))) & descmask; descaddr &= ~7ULL; - descriptor = ldq_phys(cs->as, descaddr); + nstable = extract32(tableattrs, 4, 1); + descriptor = arm_ldq_ptw(cs, descaddr, !nstable); if (!(descriptor & 1) || (!(descriptor & 2) && (level == 3))) { /* Invalid, or the Reserved level 3 encoding */ @@ -5528,7 +5598,7 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address, if (extract32(tableattrs, 2, 1)) { attrs &= ~(1 << 4); } - attrs |= extract32(tableattrs, 4, 1) << 3; /* NS */ + attrs |= nstable << 3; /* NS */ break; } /* Here descaddr is the final physical address, and attributes @@ -5552,6 +5622,13 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address, goto do_fault; } + if (ns) { + /* The NS bit will (as required by the architecture) have no effect if + * the CPU doesn't support TZ or this is a non-secure translation + * regime, because the attribute will already be non-secure. + */ + txattrs->secure = false; + } *phys_ptr = descaddr; *page_size_ptr = page_size; return 0; @@ -5635,8 +5712,8 @@ static int get_phys_addr_mpu(CPUARMState *env, uint32_t address, * by doing a translation table walk on MMU based systems or using the * MPU state on MPU based systems. * - * Returns 0 if the translation was successful. Otherwise, phys_ptr, - * prot and page_size are not filled in, and the return value provides + * Returns 0 if the translation was successful. Otherwise, phys_ptr, attrs, + * prot and page_size may not be filled in, and the return value provides * information on why the translation aborted, in the format of a * DFSR/IFSR fault register, with the following caveats: * * we honour the short vs long DFSR format differences. @@ -5649,24 +5726,33 @@ static int get_phys_addr_mpu(CPUARMState *env, uint32_t address, * @access_type: 0 for read, 1 for write, 2 for execute * @mmu_idx: MMU index indicating required translation regime * @phys_ptr: set to the physical address corresponding to the virtual address + * @attrs: set to the memory transaction attributes to use * @prot: set to the permissions for the page containing phys_ptr * @page_size: set to the size of the page containing phys_ptr */ static inline int get_phys_addr(CPUARMState *env, target_ulong address, int access_type, ARMMMUIdx mmu_idx, - hwaddr *phys_ptr, int *prot, + hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, target_ulong *page_size) { if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { /* TODO: when we support EL2 we should here call ourselves recursively - * to do the stage 1 and then stage 2 translations. The ldl_phys - * calls for stage 1 will also need changing. + * to do the stage 1 and then stage 2 translations. The arm_ld*_ptw + * functions will also need changing to perform ARMMMUIdx_S2NS loads + * rather than direct physical memory loads when appropriate. * For non-EL2 CPUs a stage1+stage2 translation is just stage 1. */ assert(!arm_feature(env, ARM_FEATURE_EL2)); mmu_idx += ARMMMUIdx_S1NSE0; } + /* The page table entries may downgrade secure to non-secure, but + * cannot upgrade an non-secure translation regime's attributes + * to secure. + */ + attrs->secure = regime_is_secure(env, mmu_idx); + attrs->user = regime_is_user(env, mmu_idx); + /* Fast Context Switch Extension. This doesn't exist at all in v8. * In v7 and earlier it affects all stage 1 translations. */ @@ -5695,10 +5781,10 @@ static inline int get_phys_addr(CPUARMState *env, target_ulong address, if (regime_using_lpae_format(env, mmu_idx)) { return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr, - prot, page_size); + attrs, prot, page_size); } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) { return get_phys_addr_v6(env, address, access_type, mmu_idx, phys_ptr, - prot, page_size); + attrs, prot, page_size); } else { return get_phys_addr_v5(env, address, access_type, mmu_idx, phys_ptr, prot, page_size); @@ -5716,14 +5802,16 @@ int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int ret; uint32_t syn; bool same_el = (arm_current_el(env) != 0); + MemTxAttrs attrs = {}; - ret = get_phys_addr(env, address, access_type, mmu_idx, &phys_addr, &prot, - &page_size); + ret = get_phys_addr(env, address, access_type, mmu_idx, &phys_addr, + &attrs, &prot, &page_size); if (ret == 0) { /* Map a single [sub]page. */ phys_addr &= TARGET_PAGE_MASK; address &= TARGET_PAGE_MASK; - tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size); + tlb_set_page_with_attrs(cs, address, phys_addr, attrs, + prot, mmu_idx, page_size); return 0; } @@ -5758,9 +5846,10 @@ hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) target_ulong page_size; int prot; int ret; + MemTxAttrs attrs = {}; ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env), &phys_addr, - &prot, &page_size); + &attrs, &prot, &page_size); if (ret != 0) { return -1; diff --git a/target-arm/kvm.c b/target-arm/kvm.c index fdd9ba3f1d..16abbf198c 100644 --- a/target-arm/kvm.c +++ b/target-arm/kvm.c @@ -23,6 +23,7 @@ #include "cpu.h" #include "internals.h" #include "hw/arm/arm.h" +#include "exec/memattrs.h" const KVMCapabilityInfo kvm_arch_required_capabilities[] = { KVM_CAP_LAST_INFO @@ -506,8 +507,9 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) { } -void kvm_arch_post_run(CPUState *cs, struct kvm_run *run) +MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) { + return MEMTXATTRS_UNSPECIFIED; } int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c index 7713022752..3df9c57c91 100644 --- a/target-arm/op_helper.c +++ b/target-arm/op_helper.c @@ -600,15 +600,26 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp) CPUARMState *env = &cpu->env; uint64_t cr; int pac, hmc, ssc, wt, lbn; - /* TODO: check against CPU security state when we implement TrustZone */ - bool is_secure = false; + /* Note that for watchpoints the check is against the CPU security + * state, not the S/NS attribute on the offending data access. + */ + bool is_secure = arm_is_secure(env); + int access_el = arm_current_el(env); if (is_wp) { - if (!env->cpu_watchpoint[n] - || !(env->cpu_watchpoint[n]->flags & BP_WATCHPOINT_HIT)) { + CPUWatchpoint *wp = env->cpu_watchpoint[n]; + + if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) { return false; } cr = env->cp15.dbgwcr[n]; + if (wp->hitattrs.user) { + /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should + * match watchpoints as if they were accesses done at EL0, even if + * the CPU is at EL1 or higher. + */ + access_el = 0; + } } else { uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; @@ -649,15 +660,7 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp) break; } - /* TODO: this is not strictly correct because the LDRT/STRT/LDT/STT - * "unprivileged access" instructions should match watchpoints as if - * they were accesses done at EL0, even if the CPU is at EL1 or higher. - * Implementing this would require reworking the core watchpoint code - * to plumb the mmu_idx through to this point. Luckily Linux does not - * rely on this behaviour currently. - * For breakpoints we do want to use the current CPU state. - */ - switch (arm_current_el(env)) { + switch (access_el) { case 3: case 2: if (!hmc) { diff --git a/target-i386/arch_memory_mapping.c b/target-i386/arch_memory_mapping.c index 2d35f63e1e..01563fecce 100644 --- a/target-i386/arch_memory_mapping.c +++ b/target-i386/arch_memory_mapping.c @@ -27,7 +27,7 @@ static void walk_pte(MemoryMappingList *list, AddressSpace *as, for (i = 0; i < 512; i++) { pte_addr = (pte_start_addr + i * 8) & a20_mask; - pte = ldq_phys(as, pte_addr); + pte = address_space_ldq(as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL); if (!(pte & PG_PRESENT_MASK)) { /* not present */ continue; @@ -57,7 +57,7 @@ static void walk_pte2(MemoryMappingList *list, AddressSpace *as, for (i = 0; i < 1024; i++) { pte_addr = (pte_start_addr + i * 4) & a20_mask; - pte = ldl_phys(as, pte_addr); + pte = address_space_ldl(as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL); if (!(pte & PG_PRESENT_MASK)) { /* not present */ continue; @@ -89,7 +89,7 @@ static void walk_pde(MemoryMappingList *list, AddressSpace *as, for (i = 0; i < 512; i++) { pde_addr = (pde_start_addr + i * 8) & a20_mask; - pde = ldq_phys(as, pde_addr); + pde = address_space_ldq(as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL); if (!(pde & PG_PRESENT_MASK)) { /* not present */ continue; @@ -126,7 +126,7 @@ static void walk_pde2(MemoryMappingList *list, AddressSpace *as, for (i = 0; i < 1024; i++) { pde_addr = (pde_start_addr + i * 4) & a20_mask; - pde = ldl_phys(as, pde_addr); + pde = address_space_ldl(as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL); if (!(pde & PG_PRESENT_MASK)) { /* not present */ continue; @@ -167,7 +167,7 @@ static void walk_pdpe2(MemoryMappingList *list, AddressSpace *as, for (i = 0; i < 4; i++) { pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask; - pdpe = ldq_phys(as, pdpe_addr); + pdpe = address_space_ldq(as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL); if (!(pdpe & PG_PRESENT_MASK)) { /* not present */ continue; @@ -192,7 +192,7 @@ static void walk_pdpe(MemoryMappingList *list, AddressSpace *as, for (i = 0; i < 512; i++) { pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask; - pdpe = ldq_phys(as, pdpe_addr); + pdpe = address_space_ldq(as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL); if (!(pdpe & PG_PRESENT_MASK)) { /* not present */ continue; @@ -228,7 +228,8 @@ static void walk_pml4e(MemoryMappingList *list, AddressSpace *as, for (i = 0; i < 512; i++) { pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask; - pml4e = ldq_phys(as, pml4e_addr); + pml4e = address_space_ldq(as, pml4e_addr, MEMTXATTRS_UNSPECIFIED, + NULL); if (!(pml4e & PG_PRESENT_MASK)) { /* not present */ continue; diff --git a/target-i386/cpu.c b/target-i386/cpu.c index 03b33cf3bd..3305e09413 100644 --- a/target-i386/cpu.c +++ b/target-i386/cpu.c @@ -688,7 +688,6 @@ static X86CPUDefinition builtin_x86_defs[] = { .features[FEAT_1_ECX] = CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT, .features[FEAT_8000_0001_EDX] = - (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) | CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | @@ -711,7 +710,6 @@ static X86CPUDefinition builtin_x86_defs[] = { CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | CPUID_EXT_POPCNT, .features[FEAT_8000_0001_EDX] = - (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) | CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, @@ -769,7 +767,6 @@ static X86CPUDefinition builtin_x86_defs[] = { CPUID_EXT_SSE3 | CPUID_EXT_CX16, /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ .features[FEAT_8000_0001_EDX] = - (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) | CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, @@ -805,8 +802,6 @@ static X86CPUDefinition builtin_x86_defs[] = { CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, .features[FEAT_1_ECX] = CPUID_EXT_SSE3, - .features[FEAT_8000_0001_EDX] = - PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES, .features[FEAT_8000_0001_ECX] = 0, .xlevel = 0x80000008, @@ -888,7 +883,6 @@ static X86CPUDefinition builtin_x86_defs[] = { PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA, .features[FEAT_8000_0001_EDX] = - (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, .xlevel = 0x80000008, }, @@ -912,7 +906,6 @@ static X86CPUDefinition builtin_x86_defs[] = { CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_MOVBE, .features[FEAT_8000_0001_EDX] = - (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) | CPUID_EXT2_NX, .features[FEAT_8000_0001_ECX] = CPUID_EXT3_LAHF_LM, @@ -1618,38 +1611,6 @@ static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, env->cpuid_version |= value & 0xf; } -static void x86_cpuid_get_level(Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - X86CPU *cpu = X86_CPU(obj); - - visit_type_uint32(v, &cpu->env.cpuid_level, name, errp); -} - -static void x86_cpuid_set_level(Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - X86CPU *cpu = X86_CPU(obj); - - visit_type_uint32(v, &cpu->env.cpuid_level, name, errp); -} - -static void x86_cpuid_get_xlevel(Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - X86CPU *cpu = X86_CPU(obj); - - visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp); -} - -static void x86_cpuid_set_xlevel(Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - X86CPU *cpu = X86_CPU(obj); - - visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp); -} - static char *x86_cpuid_get_vendor(Object *obj, Error **errp) { X86CPU *cpu = X86_CPU(obj); @@ -2109,7 +2070,7 @@ static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp) object_property_set_int(OBJECT(cpu), def->model, "model", errp); object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp); object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp); - env->cpuid_xlevel2 = def->xlevel2; + object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp); cpu->cache_info_passthrough = def->cache_info_passthrough; object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp); for (w = 0; w < FEATURE_WORDS; w++) { @@ -2900,12 +2861,6 @@ static void x86_cpu_initfn(Object *obj) object_property_add(obj, "stepping", "int", x86_cpuid_version_get_stepping, x86_cpuid_version_set_stepping, NULL, NULL, NULL); - object_property_add(obj, "level", "int", - x86_cpuid_get_level, - x86_cpuid_set_level, NULL, NULL, NULL); - object_property_add(obj, "xlevel", "int", - x86_cpuid_get_xlevel, - x86_cpuid_set_xlevel, NULL, NULL, NULL); object_property_add_str(obj, "vendor", x86_cpuid_get_vendor, x86_cpuid_set_vendor, NULL); @@ -2998,6 +2953,9 @@ static Property x86_cpu_properties[] = { DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false), DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), + DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0), + DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0), + DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0), DEFINE_PROP_END_OF_LIST() }; diff --git a/target-i386/kvm.c b/target-i386/kvm.c index 41d09e52de..a26d25a81f 100644 --- a/target-i386/kvm.c +++ b/target-i386/kvm.c @@ -37,6 +37,7 @@ #include "hw/pci/pci.h" #include "migration/migration.h" #include "qapi/qmp/qerror.h" +#include "exec/memattrs.h" //#define DEBUG_KVM @@ -2246,7 +2247,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) } } -void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) +MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) { X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; @@ -2258,6 +2259,7 @@ void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) } cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8); cpu_set_apic_base(x86_cpu->apic_state, run->apic_base); + return MEMTXATTRS_UNSPECIFIED; } int kvm_arch_process_async_events(CPUState *cs) diff --git a/target-microblaze/cpu.h b/target-microblaze/cpu.h index 7d06227730..4ea04acc4d 100644 --- a/target-microblaze/cpu.h +++ b/target-microblaze/cpu.h @@ -36,12 +36,11 @@ typedef struct CPUMBState CPUMBState; #define ELF_MACHINE EM_MICROBLAZE -#define EXCP_NMI 1 -#define EXCP_MMU 2 -#define EXCP_IRQ 3 -#define EXCP_BREAK 4 -#define EXCP_HW_BREAK 5 -#define EXCP_HW_EXCP 6 +#define EXCP_MMU 1 +#define EXCP_IRQ 2 +#define EXCP_BREAK 3 +#define EXCP_HW_BREAK 4 +#define EXCP_HW_EXCP 5 /* MicroBlaze-specific interrupt pending bits. */ #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 @@ -284,12 +283,6 @@ int cpu_mb_exec(CPUMBState *s); int cpu_mb_signal_handler(int host_signum, void *pinfo, void *puc); -enum { - CC_OP_DYNAMIC, /* Use env->cc_op */ - CC_OP_FLAGS, - CC_OP_CMP, -}; - /* FIXME: MB uses variable pages down to 1K but linux only uses 4k. */ #define TARGET_PAGE_BITS 12 #define MMAP_SHIFT TARGET_PAGE_BITS @@ -326,18 +319,8 @@ static inline int cpu_mmu_index (CPUMBState *env) int mb_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, int mmu_idx); -static inline int cpu_interrupts_enabled(CPUMBState *env) -{ - return env->sregs[SR_MSR] & MSR_IE; -} - #include "exec/cpu-all.h" -static inline target_ulong cpu_get_pc(CPUMBState *env) -{ - return env->sregs[SR_PC]; -} - static inline void cpu_get_tb_cpu_state(CPUMBState *env, target_ulong *pc, target_ulong *cs_base, int *flags) { diff --git a/target-microblaze/mmu.h b/target-microblaze/mmu.h index 3f74dda0b0..3b7a9983d5 100644 --- a/target-microblaze/mmu.h +++ b/target-microblaze/mmu.h @@ -82,7 +82,6 @@ struct microblaze_mmu_lookup } err; }; -void mmu_flip_um(CPUMBState *env, unsigned int um); unsigned int mmu_translate(struct microblaze_mmu *mmu, struct microblaze_mmu_lookup *lu, target_ulong vaddr, int rw, int mmu_idx); diff --git a/target-mips/kvm.c b/target-mips/kvm.c index 4d1f7ead81..59eb11105a 100644 --- a/target-mips/kvm.c +++ b/target-mips/kvm.c @@ -23,6 +23,7 @@ #include "cpu.h" #include "sysemu/cpus.h" #include "kvm_mips.h" +#include "exec/memattrs.h" #define DEBUG_KVM 0 @@ -110,9 +111,10 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) } } -void kvm_arch_post_run(CPUState *cs, struct kvm_run *run) +MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) { DPRINTF("%s\n", __func__); + return MEMTXATTRS_UNSPECIFIED; } int kvm_arch_process_async_events(CPUState *cs) diff --git a/target-openrisc/cpu.h b/target-openrisc/cpu.h index b25324bc89..9e23cd0500 100644 --- a/target-openrisc/cpu.h +++ b/target-openrisc/cpu.h @@ -415,9 +415,4 @@ static inline int cpu_mmu_index(CPUOpenRISCState *env) #include "exec/exec-all.h" -static inline target_ulong cpu_get_pc(CPUOpenRISCState *env) -{ - return env->pc; -} - #endif /* CPU_OPENRISC_H */ diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h index f15815f11b..c05c503305 100644 --- a/target-ppc/cpu.h +++ b/target-ppc/cpu.h @@ -2251,8 +2251,8 @@ static inline ppcmas_tlb_t *booke206_get_tlbm(CPUPPCState *env, const int tlbn, { int r; uint32_t ways = booke206_tlb_ways(env, tlbn); - int ways_bits = ffs(ways) - 1; - int tlb_bits = ffs(booke206_tlb_size(env, tlbn)) - 1; + int ways_bits = ctz32(ways); + int tlb_bits = ctz32(booke206_tlb_size(env, tlbn)); int i; way &= ways - 1; diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c index 12328a4027..1da9ea81e5 100644 --- a/target-ppc/kvm.c +++ b/target-ppc/kvm.c @@ -39,6 +39,7 @@ #include "sysemu/watchdog.h" #include "trace.h" #include "exec/gdbstub.h" +#include "exec/memattrs.h" //#define DEBUG_KVM @@ -1270,8 +1271,9 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) * anyways, so we will get a chance to deliver the rest. */ } -void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) +MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) { + return MEMTXATTRS_UNSPECIFIED; } int kvm_arch_process_async_events(CPUState *cs) diff --git a/target-s390x/cpu-qom.h b/target-s390x/cpu-qom.h index 8b376df1b7..936ae21e06 100644 --- a/target-s390x/cpu-qom.h +++ b/target-s390x/cpu-qom.h @@ -66,6 +66,9 @@ typedef struct S390CPU { /*< public >*/ CPUS390XState env; + /* needed for live migration */ + void *irqstate; + uint32_t irqstate_saved_size; } S390CPU; static inline S390CPU *s390_env_get_cpu(CPUS390XState *env) diff --git a/target-s390x/cpu.c b/target-s390x/cpu.c index e0537fa222..d2f9836e86 100644 --- a/target-s390x/cpu.c +++ b/target-s390x/cpu.c @@ -213,6 +213,7 @@ static void s390_cpu_finalize(Object *obj) S390CPU *cpu = S390_CPU(obj); qemu_unregister_reset(s390_cpu_machine_reset_cb, cpu); + g_free(cpu->irqstate); #endif } diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h index 8135dda318..c55721114e 100644 --- a/target-s390x/cpu.h +++ b/target-s390x/cpu.h @@ -356,7 +356,8 @@ int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, #ifndef CONFIG_USER_ONLY void do_restart_interrupt(CPUS390XState *env); -static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb) +static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb, + uint8_t *ar) { hwaddr addr = 0; uint8_t reg; @@ -366,6 +367,9 @@ static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb) addr = env->regs[reg]; } addr += (ipb >> 16) & 0xfff; + if (ar) { + *ar = reg; + } return addr; } @@ -401,6 +405,8 @@ void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq); void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq); int kvm_s390_inject_flic(struct kvm_s390_irq *irq); void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code); +int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, + int len, bool is_write); int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock); int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_clock); #else @@ -418,6 +424,11 @@ static inline int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low) { return -ENOSYS; } +static inline int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, + void *hostbuf, int len, bool is_write) +{ + return -ENOSYS; +} static inline void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code) { @@ -865,9 +876,13 @@ struct sysib_322 { uint8_t name[8]; uint32_t caf; uint8_t cpi[16]; - uint8_t res3[24]; + uint8_t res5[3]; + uint8_t ext_name_encoding; + uint32_t res3; + uint8_t uuid[16]; } vm[8]; - uint8_t res4[3552]; + uint8_t res4[1504]; + uint8_t ext_names[8][256]; }; /* MMU defines */ @@ -952,15 +967,15 @@ int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code); uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst, uint64_t vr); -int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, void *hostbuf, int len, - bool is_write); +int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf, + int len, bool is_write); -#define s390_cpu_virt_mem_read(cpu, laddr, dest, len) \ - s390_cpu_virt_mem_rw(cpu, laddr, dest, len, false) -#define s390_cpu_virt_mem_write(cpu, laddr, dest, len) \ - s390_cpu_virt_mem_rw(cpu, laddr, dest, len, true) -#define s390_cpu_virt_mem_check_write(cpu, laddr, len) \ - s390_cpu_virt_mem_rw(cpu, laddr, NULL, len, true) +#define s390_cpu_virt_mem_read(cpu, laddr, ar, dest, len) \ + s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, false) +#define s390_cpu_virt_mem_write(cpu, laddr, ar, dest, len) \ + s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, true) +#define s390_cpu_virt_mem_check_write(cpu, laddr, ar, len) \ + s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, true) /* The value of the TOD clock for 1.1.1970. */ #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL @@ -1064,6 +1079,8 @@ void kvm_s390_clear_cmma_callback(void *opaque); int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state); void kvm_s390_reset_vcpu(S390CPU *cpu); int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit, uint64_t *hw_limit); +void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu); +int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu); #else static inline void kvm_s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr, @@ -1106,6 +1123,13 @@ static inline int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit, { return 0; } +static inline void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu) +{ +} +static inline int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu) +{ + return 0; +} #endif static inline int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit) diff --git a/target-s390x/helper.c b/target-s390x/helper.c index f1060c2bce..041c9c7429 100644 --- a/target-s390x/helper.c +++ b/target-s390x/helper.c @@ -162,7 +162,7 @@ hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr) vaddr &= 0x7fffffff; } - mmu_translate(env, vaddr, 2, asc, &raddr, &prot, false); + mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false); return raddr; } diff --git a/target-s390x/ioinst.c b/target-s390x/ioinst.c index b00a00ca2b..e220cea8ab 100644 --- a/target-s390x/ioinst.c +++ b/target-s390x/ioinst.c @@ -149,13 +149,14 @@ void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) int ret = -ENODEV; int cc; CPUS390XState *env = &cpu->env; + uint8_t ar; - addr = decode_basedisp_s(env, ipb); + addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return; } - if (s390_cpu_virt_mem_read(cpu, addr, &schib, sizeof(schib))) { + if (s390_cpu_virt_mem_read(cpu, addr, ar, &schib, sizeof(schib))) { return; } if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) || @@ -215,13 +216,14 @@ void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) int ret = -ENODEV; int cc; CPUS390XState *env = &cpu->env; + uint8_t ar; - addr = decode_basedisp_s(env, ipb); + addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return; } - if (s390_cpu_virt_mem_read(cpu, addr, &orig_orb, sizeof(orb))) { + if (s390_cpu_virt_mem_read(cpu, addr, ar, &orig_orb, sizeof(orb))) { return; } copy_orb_from_guest(&orb, &orig_orb); @@ -258,8 +260,9 @@ void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb) uint64_t addr; int cc; CPUS390XState *env = &cpu->env; + uint8_t ar; - addr = decode_basedisp_s(env, ipb); + addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return; @@ -268,7 +271,7 @@ void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb) cc = css_do_stcrw(&crw); /* 0 - crw stored, 1 - zeroes stored */ - if (s390_cpu_virt_mem_write(cpu, addr, &crw, sizeof(crw)) == 0) { + if (s390_cpu_virt_mem_write(cpu, addr, ar, &crw, sizeof(crw)) == 0) { setcc(cpu, cc); } else if (cc == 0) { /* Write failed: requeue CRW since STCRW is a suppressing instruction */ @@ -284,8 +287,9 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) int cc; SCHIB schib; CPUS390XState *env = &cpu->env; + uint8_t ar; - addr = decode_basedisp_s(env, ipb); + addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return; @@ -297,7 +301,7 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) * we check whether the memory area is writeable (injecting the * access execption if it is not) first. */ - if (!s390_cpu_virt_mem_check_write(cpu, addr, sizeof(schib))) { + if (!s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib))) { program_interrupt(env, PGM_OPERAND, 2); } return; @@ -322,12 +326,13 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) } } if (cc != 3) { - if (s390_cpu_virt_mem_write(cpu, addr, &schib, sizeof(schib)) != 0) { + if (s390_cpu_virt_mem_write(cpu, addr, ar, &schib, + sizeof(schib)) != 0) { return; } } else { /* Access exceptions have a higher priority than cc3 */ - if (s390_cpu_virt_mem_check_write(cpu, addr, sizeof(schib)) != 0) { + if (s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib)) != 0) { return; } } @@ -342,13 +347,14 @@ int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) IRB irb; uint64_t addr; int cc, irb_len; + uint8_t ar; if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { program_interrupt(env, PGM_OPERAND, 2); return -EIO; } trace_ioinst_sch_id("tsch", cssid, ssid, schid); - addr = decode_basedisp_s(env, ipb); + addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return -EIO; @@ -362,14 +368,14 @@ int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) } /* 0 - status pending, 1 - not status pending, 3 - not operational */ if (cc != 3) { - if (s390_cpu_virt_mem_write(cpu, addr, &irb, irb_len) != 0) { + if (s390_cpu_virt_mem_write(cpu, addr, ar, &irb, irb_len) != 0) { return -EFAULT; } css_do_tsch_update_subch(sch); } else { irb_len = sizeof(irb) - sizeof(irb.emw); /* Access exceptions have a higher priority than cc3 */ - if (s390_cpu_virt_mem_check_write(cpu, addr, irb_len) != 0) { + if (s390_cpu_virt_mem_check_write(cpu, addr, ar, irb_len) != 0) { return -EFAULT; } } @@ -645,7 +651,7 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb) * present CHSC sub-handlers ... if we ever need more, we should take * care of req->len here first. */ - if (s390_cpu_virt_mem_read(cpu, addr, buf, sizeof(ChscReq))) { + if (s390_cpu_virt_mem_read(cpu, addr, reg, buf, sizeof(ChscReq))) { return; } req = (ChscReq *)buf; @@ -677,7 +683,8 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb) break; } - if (!s390_cpu_virt_mem_write(cpu, addr + len, res, be16_to_cpu(res->len))) { + if (!s390_cpu_virt_mem_write(cpu, addr + len, reg, res, + be16_to_cpu(res->len))) { setcc(cpu, 0); /* Command execution complete */ } } @@ -690,9 +697,10 @@ int ioinst_handle_tpi(S390CPU *cpu, uint32_t ipb) IOIntCode int_code; hwaddr len; int ret; + uint8_t ar; trace_ioinst("tpi"); - addr = decode_basedisp_s(env, ipb); + addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return -EIO; @@ -702,7 +710,7 @@ int ioinst_handle_tpi(S390CPU *cpu, uint32_t ipb) len = lowcore ? 8 /* two words */ : 12 /* three words */; ret = css_do_tpi(&int_code, lowcore); if (ret == 1) { - s390_cpu_virt_mem_write(cpu, lowcore ? 184 : addr, &int_code, len); + s390_cpu_virt_mem_write(cpu, lowcore ? 184 : addr, ar, &int_code, len); } return ret; } diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c index b48c643b36..ea18015793 100644 --- a/target-s390x/kvm.c +++ b/target-s390x/kvm.c @@ -44,6 +44,8 @@ #include "hw/s390x/s390-pci-inst.h" #include "hw/s390x/s390-pci-bus.h" #include "hw/s390x/ipl.h" +#include "hw/s390x/ebcdic.h" +#include "exec/memattrs.h" /* #define DEBUG_KVM */ @@ -108,6 +110,14 @@ #define ICPT_CPU_STOP 0x28 #define ICPT_IO 0x40 +#define NR_LOCAL_IRQS 32 +/* + * Needs to be big enough to contain max_cpus emergency signals + * and in addition NR_LOCAL_IRQS interrupts + */ +#define VCPU_IRQ_BUF_SIZE (sizeof(struct kvm_s390_irq) * \ + (max_cpus + NR_LOCAL_IRQS)) + static CPUWatchpoint hw_watchpoint; /* * We don't use a list because this structure is also used to transmit the @@ -122,6 +132,8 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = { static int cap_sync_regs; static int cap_async_pf; +static int cap_mem_op; +static int cap_s390_irq; static void *legacy_s390_alloc(size_t size, uint64_t *align); @@ -246,6 +258,8 @@ int kvm_arch_init(MachineState *ms, KVMState *s) { cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS); cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); + cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP); + cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ); kvm_s390_enable_cmma(s); @@ -255,6 +269,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0); + kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0); return 0; } @@ -268,6 +283,7 @@ int kvm_arch_init_vcpu(CPUState *cs) { S390CPU *cpu = S390_CPU(cs); kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state); + cpu->irqstate = g_malloc0(VCPU_IRQ_BUF_SIZE); return 0; } @@ -548,6 +564,46 @@ int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low) return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); } +/** + * kvm_s390_mem_op: + * @addr: the logical start address in guest memory + * @ar: the access register number + * @hostbuf: buffer in host memory. NULL = do only checks w/o copying + * @len: length that should be transfered + * @is_write: true = write, false = read + * Returns: 0 on success, non-zero if an exception or error occured + * + * Use KVM ioctl to read/write from/to guest memory. An access exception + * is injected into the vCPU in case of translation errors. + */ +int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, + int len, bool is_write) +{ + struct kvm_s390_mem_op mem_op = { + .gaddr = addr, + .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION, + .size = len, + .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE + : KVM_S390_MEMOP_LOGICAL_READ, + .buf = (uint64_t)hostbuf, + .ar = ar, + }; + int ret; + + if (!cap_mem_op) { + return -ENOSYS; + } + if (!hostbuf) { + mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY; + } + + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); + if (ret < 0) { + error_printf("KVM_S390_MEM_OP failed: %s\n", strerror(-ret)); + } + return ret; +} + /* * Legacy layout for s390: * Older S390 KVM requires the topmost vma of the RAM to be @@ -725,8 +781,9 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) { } -void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) +MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) { + return MEMTXATTRS_UNSPECIFIED; } int kvm_arch_process_async_events(CPUState *cs) @@ -783,10 +840,9 @@ static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq, return r; } -void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq) +static void inject_vcpu_irq_legacy(CPUState *cs, struct kvm_s390_irq *irq) { struct kvm_s390_interrupt kvmint = {}; - CPUState *cs = CPU(cpu); int r; r = s390_kvm_irq_to_interrupt(irq, &kvmint); @@ -802,6 +858,23 @@ void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq) } } +void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq) +{ + CPUState *cs = CPU(cpu); + int r; + + if (cap_s390_irq) { + r = kvm_vcpu_ioctl(cs, KVM_S390_IRQ, irq); + if (!r) { + return; + } + error_report("KVM failed to inject interrupt %llx", irq->type); + exit(1); + } + + inject_vcpu_irq_legacy(cs, irq); +} + static void __kvm_s390_floating_interrupt(struct kvm_s390_irq *irq) { struct kvm_s390_interrupt kvmint = {}; @@ -975,7 +1048,8 @@ static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) return rc; } -static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run) +static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run, + uint8_t *ar) { CPUS390XState *env = &cpu->env; uint32_t x2 = (run->s390_sieic.ipa & 0x000f); @@ -986,12 +1060,16 @@ static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run) if (disp2 & 0x80000) { disp2 += 0xfff00000; } + if (ar) { + *ar = base2; + } return (base2 ? env->regs[base2] : 0) + (x2 ? env->regs[x2] : 0) + (long)(int)disp2; } -static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run) +static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run, + uint8_t *ar) { CPUS390XState *env = &cpu->env; uint32_t base2 = run->s390_sieic.ipb >> 28; @@ -1001,6 +1079,9 @@ static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run) if (disp2 & 0x80000) { disp2 += 0xfff00000; } + if (ar) { + *ar = base2; + } return (base2 ? env->regs[base2] : 0) + (long)(int)disp2; } @@ -1032,11 +1113,12 @@ static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run) { uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; uint64_t fiba; + uint8_t ar; cpu_synchronize_state(CPU(cpu)); - fiba = get_base_disp_rxy(cpu, run); + fiba = get_base_disp_rxy(cpu, run, &ar); - return stpcifc_service_call(cpu, r1, fiba); + return stpcifc_service_call(cpu, r1, fiba, ar); } static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run) @@ -1058,22 +1140,24 @@ static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run) uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; uint8_t r3 = run->s390_sieic.ipa & 0x000f; uint64_t gaddr; + uint8_t ar; cpu_synchronize_state(CPU(cpu)); - gaddr = get_base_disp_rsy(cpu, run); + gaddr = get_base_disp_rsy(cpu, run, &ar); - return pcistb_service_call(cpu, r1, r3, gaddr); + return pcistb_service_call(cpu, r1, r3, gaddr, ar); } static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run) { uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; uint64_t fiba; + uint8_t ar; cpu_synchronize_state(CPU(cpu)); - fiba = get_base_disp_rxy(cpu, run); + fiba = get_base_disp_rxy(cpu, run, &ar); - return mpcifc_service_call(cpu, r1, fiba); + return mpcifc_service_call(cpu, r1, fiba, ar); } static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) @@ -1202,7 +1286,7 @@ static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) * For any diagnose call we support, bits 48-63 of the resulting * address specify the function code; the remainder is ignored. */ - func_code = decode_basedisp_rs(&cpu->env, ipb) & DIAG_KVM_CODE_MASK; + func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK; switch (func_code) { case DIAG_IPL: kvm_handle_diag_308(cpu, run); @@ -1549,7 +1633,8 @@ static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) cpu_synchronize_state(CPU(cpu)); /* get order code */ - order = decode_basedisp_rs(env, run->s390_sieic.ipb) & SIGP_ORDER_MASK; + order = decode_basedisp_rs(env, run->s390_sieic.ipb, NULL) + & SIGP_ORDER_MASK; status_reg = &env->regs[r1]; param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1]; @@ -1723,6 +1808,72 @@ static int handle_tsch(S390CPU *cpu) return ret; } +static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar) +{ + struct sysib_322 sysib; + int del; + + if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) { + return; + } + /* Shift the stack of Extended Names to prepare for our own data */ + memmove(&sysib.ext_names[1], &sysib.ext_names[0], + sizeof(sysib.ext_names[0]) * (sysib.count - 1)); + /* First virt level, that doesn't provide Ext Names delimits stack. It is + * assumed it's not capable of managing Extended Names for lower levels. + */ + for (del = 1; del < sysib.count; del++) { + if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) { + break; + } + } + if (del < sysib.count) { + memset(sysib.ext_names[del], 0, + sizeof(sysib.ext_names[0]) * (sysib.count - del)); + } + /* Insert short machine name in EBCDIC, padded with blanks */ + if (qemu_name) { + memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name)); + ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name), + strlen(qemu_name))); + } + sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */ + memset(sysib.ext_names[0], 0, sizeof(sysib.ext_names[0])); + /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's + * considered by s390 as not capable of providing any Extended Name. + * Therefore if no name was specified on qemu invocation, we go with the + * same "KVMguest" default, which KVM has filled into short name field. + */ + if (qemu_name) { + strncpy((char *)sysib.ext_names[0], qemu_name, + sizeof(sysib.ext_names[0])); + } else { + strcpy((char *)sysib.ext_names[0], "KVMguest"); + } + /* Insert UUID */ + memcpy(sysib.vm[0].uuid, qemu_uuid, sizeof(sysib.vm[0].uuid)); + + s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib)); +} + +static int handle_stsi(S390CPU *cpu) +{ + CPUState *cs = CPU(cpu); + struct kvm_run *run = cs->kvm_run; + + switch (run->s390_stsi.fc) { + case 3: + if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) { + return 0; + } + /* Only sysib 3.2.2 needs post-handling for now. */ + insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar); + return 0; + default: + return 0; + } +} + static int kvm_arch_handle_debug_exit(S390CPU *cpu) { CPUState *cs = CPU(cpu); @@ -1772,6 +1923,9 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) case KVM_EXIT_S390_TSCH: ret = handle_tsch(cpu); break; + case KVM_EXIT_S390_STSI: + ret = handle_stsi(cpu); + break; case KVM_EXIT_DEBUG: ret = kvm_arch_handle_debug_exit(cpu); break; @@ -1916,6 +2070,52 @@ int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state) return ret; } +void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu) +{ + struct kvm_s390_irq_state irq_state; + CPUState *cs = CPU(cpu); + int32_t bytes; + + if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { + return; + } + + irq_state.buf = (uint64_t) cpu->irqstate; + irq_state.len = VCPU_IRQ_BUF_SIZE; + + bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state); + if (bytes < 0) { + cpu->irqstate_saved_size = 0; + error_report("Migration of interrupt state failed"); + return; + } + + cpu->irqstate_saved_size = bytes; +} + +int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu) +{ + CPUState *cs = CPU(cpu); + struct kvm_s390_irq_state irq_state; + int r; + + if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { + return -ENOSYS; + } + + if (cpu->irqstate_saved_size == 0) { + return 0; + } + irq_state.buf = (uint64_t) cpu->irqstate; + irq_state.len = cpu->irqstate_saved_size; + + r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state); + if (r) { + error_report("Setting interrupt state failed %d", r); + } + return r; +} + int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, uint64_t address, uint32_t data) { diff --git a/target-s390x/machine.c b/target-s390x/machine.c index bd4cea726d..7853e3c989 100644 --- a/target-s390x/machine.c +++ b/target-s390x/machine.c @@ -28,17 +28,25 @@ static int cpu_post_load(void *opaque, int version_id) */ if (kvm_enabled()) { kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state); + return kvm_s390_vcpu_interrupt_post_load(cpu); } return 0; } +static void cpu_pre_save(void *opaque) +{ + S390CPU *cpu = opaque; -const VMStateDescription vmstate_s390_cpu = { - .name = "cpu", - .post_load = cpu_post_load, - .version_id = 2, - .minimum_version_id = 2, - .fields = (VMStateField[]) { + if (kvm_enabled()) { + kvm_s390_vcpu_interrupt_pre_save(cpu); + } +} + +const VMStateDescription vmstate_fpu = { + .name = "cpu/fpu", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { VMSTATE_UINT64(env.fregs[0].ll, S390CPU), VMSTATE_UINT64(env.fregs[1].ll, S390CPU), VMSTATE_UINT64(env.fregs[2].ll, S390CPU), @@ -55,11 +63,27 @@ const VMStateDescription vmstate_s390_cpu = { VMSTATE_UINT64(env.fregs[13].ll, S390CPU), VMSTATE_UINT64(env.fregs[14].ll, S390CPU), VMSTATE_UINT64(env.fregs[15].ll, S390CPU), + VMSTATE_UINT32(env.fpc, S390CPU), + VMSTATE_END_OF_LIST() + } +}; + +static inline bool fpu_needed(void *opaque) +{ + return true; +} + +const VMStateDescription vmstate_s390_cpu = { + .name = "cpu", + .post_load = cpu_post_load, + .pre_save = cpu_pre_save, + .version_id = 4, + .minimum_version_id = 3, + .fields = (VMStateField[]) { VMSTATE_UINT64_ARRAY(env.regs, S390CPU, 16), VMSTATE_UINT64(env.psw.mask, S390CPU), VMSTATE_UINT64(env.psw.addr, S390CPU), VMSTATE_UINT64(env.psa, S390CPU), - VMSTATE_UINT32(env.fpc, S390CPU), VMSTATE_UINT32(env.todpr, S390CPU), VMSTATE_UINT64(env.pfault_token, S390CPU), VMSTATE_UINT64(env.pfault_compare, S390CPU), @@ -72,6 +96,17 @@ const VMStateDescription vmstate_s390_cpu = { VMSTATE_UINT64_ARRAY(env.cregs, S390CPU, 16), VMSTATE_UINT8(env.cpu_state, S390CPU), VMSTATE_UINT8(env.sigp_order, S390CPU), + VMSTATE_UINT32_V(irqstate_saved_size, S390CPU, 4), + VMSTATE_VBUFFER_UINT32(irqstate, S390CPU, 4, NULL, 0, + irqstate_saved_size), VMSTATE_END_OF_LIST() }, + .subsections = (VMStateSubsection[]) { + { + .vmsd = &vmstate_fpu, + .needed = fpu_needed, + } , { + /* empty */ + } + }, }; diff --git a/target-s390x/mmu_helper.c b/target-s390x/mmu_helper.c index b061c85aff..e8dcd0c18f 100644 --- a/target-s390x/mmu_helper.c +++ b/target-s390x/mmu_helper.c @@ -68,7 +68,7 @@ static void trigger_prot_fault(CPUS390XState *env, target_ulong vaddr, { uint64_t tec; - tec = vaddr | (rw == 1 ? FS_WRITE : FS_READ) | 4 | asc >> 46; + tec = vaddr | (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ) | 4 | asc >> 46; DPRINTF("%s: trans_exc_code=%016" PRIx64 "\n", __func__, tec); @@ -85,7 +85,7 @@ static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr, int ilen = ILEN_LATER; uint64_t tec; - tec = vaddr | (rw == 1 ? FS_WRITE : FS_READ) | asc >> 46; + tec = vaddr | (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ) | asc >> 46; DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __func__, vaddr, bits); @@ -94,7 +94,7 @@ static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr, } /* Code accesses have an undefined ilc. */ - if (rw == 2) { + if (rw == MMU_INST_FETCH) { ilen = 2; } @@ -288,7 +288,7 @@ static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr, r = mmu_translate_region(env, vaddr, asc, asce, level, raddr, flags, rw, exc); - if ((rw == 1) && !(*flags & PAGE_WRITE)) { + if (rw == MMU_DATA_STORE && !(*flags & PAGE_WRITE)) { trigger_prot_fault(env, vaddr, asc, rw, exc); return -1; } @@ -303,8 +303,8 @@ static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr, * @param asc address space control (one of the PSW_ASC_* modes) * @param raddr the translated address is stored to this pointer * @param flags the PAGE_READ/WRITE/EXEC flags are stored to this pointer - * @param exc true = inject a program check if a fault occured - * @return 0 if the translation was successfull, -1 if a fault occured + * @param exc true = inject a program check if a fault occurred + * @return 0 if the translation was successful, -1 if a fault occurred */ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc, target_ulong *raddr, int *flags, bool exc) @@ -338,7 +338,7 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc, * Instruction: Primary * Data: Secondary */ - if (rw == 2) { + if (rw == MMU_INST_FETCH) { r = mmu_translate_asce(env, vaddr, PSW_ASC_PRIMARY, env->cregs[1], raddr, flags, rw, exc); *flags &= ~(PAGE_READ | PAGE_WRITE); @@ -435,21 +435,29 @@ static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages, /** * s390_cpu_virt_mem_rw: * @laddr: the logical start address + * @ar: the access register number * @hostbuf: buffer in host memory. NULL = do only checks w/o copying - * @len: length that should be transfered + * @len: length that should be transferred * @is_write: true = write, false = read - * Returns: 0 on success, non-zero if an exception occured + * Returns: 0 on success, non-zero if an exception occurred * * Copy from/to guest memory using logical addresses. Note that we inject a * program interrupt in case there is an error while accessing the memory. */ -int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, void *hostbuf, +int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf, int len, bool is_write) { int currlen, nr_pages, i; target_ulong *pages; int ret; + if (kvm_enabled()) { + ret = kvm_s390_mem_op(cpu, laddr, ar, hostbuf, len, is_write); + if (ret >= 0) { + return ret; + } + } + nr_pages = (((laddr & ~TARGET_PAGE_MASK) + len - 1) >> TARGET_PAGE_BITS) + 1; pages = g_malloc(nr_pages * sizeof(*pages)); diff --git a/target-s390x/translate.c b/target-s390x/translate.c index 4f82edde5b..8784112f4e 100644 --- a/target-s390x/translate.c +++ b/target-s390x/translate.c @@ -2990,7 +2990,7 @@ static ExitStatus op_sam(DisasContext *s, DisasOps *o) break; } - /* Bizzare but true, we check the address of the current insn for the + /* Bizarre but true, we check the address of the current insn for the specification exception, not the next to be executed. Thus the PoO documents that Bad Things Happen two bytes before the end. */ if (s->pc & ~mask) { diff --git a/target-tricore/cpu.h b/target-tricore/cpu.h index 90bf0069b5..c14b5f9016 100644 --- a/target-tricore/cpu.h +++ b/target-tricore/cpu.h @@ -395,9 +395,4 @@ int cpu_tricore_handle_mmu_fault(CPUState *cpu, target_ulong address, #include "exec/exec-all.h" -static inline void cpu_pc_from_tb(CPUTriCoreState *env, TranslationBlock *tb) -{ - env->PC = tb->pc; -} - #endif /*__TRICORE_CPU_H__ */ diff --git a/target-tricore/op_helper.c b/target-tricore/op_helper.c index 9907e07e22..9919b5b17b 100644 --- a/target-tricore/op_helper.c +++ b/target-tricore/op_helper.c @@ -2458,6 +2458,7 @@ void helper_rfe(CPUTriCoreState *env) if (!cdc_zero(&(env->PSW)) && (env->PSW & MASK_PSW_CDE)) { /* raise MNG trap */ } + env->PC = env->gpr_a[11] & ~0x1; /* ICR.IE = PCXI.PIE; */ env->ICR = (env->ICR & ~MASK_ICR_IE) + ((env->PCXI & MASK_PCXI_PIE) >> 15); /* ICR.CCPN = PCXI.PCPN; */ @@ -2581,7 +2582,7 @@ void helper_rslcx(CPUTriCoreState *env) ((env->PCXI & MASK_PCXI_PCXO) << 6); /* {new_PCXI, A[11], A[10], A[11], D[8], D[9], D[10], D[11], A[12], A[13], A[14], A[15], D[12], D[13], D[14], D[15]} = M(EA, 16 * word); */ - restore_context_upper(env, ea, &new_PCXI, &env->gpr_a[11]); + restore_context_lower(env, ea, &env->gpr_a[11], &new_PCXI); /* M(EA, word) = FCX; */ cpu_stl_data(env, ea, env->FCX); /* M(EA, word) = FCX; */ diff --git a/target-tricore/translate.c b/target-tricore/translate.c index 54a48cd694..663b2a0796 100644 --- a/target-tricore/translate.c +++ b/target-tricore/translate.c @@ -3440,7 +3440,7 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, break; case OPCM_32_BRR_LOOP: if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_LOOP) { - gen_loop(ctx, r1, offset * 2); + gen_loop(ctx, r2, offset * 2); } else { /* OPC2_32_BRR_LOOPU */ gen_goto_tb(ctx, 0, ctx->pc + offset * 2); @@ -3745,10 +3745,10 @@ static void decode_slr_opc(DisasContext *ctx, int op1) tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2); break; case OPC1_16_SLR_LD_W: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW); + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); break; case OPC1_16_SLR_LD_W_POSTINC: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW); + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4); break; } diff --git a/target-tricore/tricore-opcodes.h b/target-tricore/tricore-opcodes.h index d3a9bc158b..2291f75fd9 100644 --- a/target-tricore/tricore-opcodes.h +++ b/target-tricore/tricore-opcodes.h @@ -107,7 +107,7 @@ /* BO Format */ #define MASK_OP_BO_OFF10(op) (MASK_BITS_SHIFT(op, 16, 21) + \ (MASK_BITS_SHIFT(op, 28, 31) << 6)) -#define MASK_OP_BO_OFF10_SEXT(op) (MASK_BITS_SHIFT_SEXT(op, 16, 21) + \ +#define MASK_OP_BO_OFF10_SEXT(op) (MASK_BITS_SHIFT(op, 16, 21) + \ (MASK_BITS_SHIFT_SEXT(op, 28, 31) << 6)) #define MASK_OP_BO_OP2(op) MASK_BITS_SHIFT(op, 22, 27) #define MASK_OP_BO_S2(op) MASK_BITS_SHIFT(op, 12, 15) @@ -417,20 +417,19 @@ static inline TCGCond tcg_high_cond(TCGCond c) } } -#define TEMP_VAL_DEAD 0 -#define TEMP_VAL_REG 1 -#define TEMP_VAL_MEM 2 -#define TEMP_VAL_CONST 3 +typedef enum TCGTempVal { + TEMP_VAL_DEAD, + TEMP_VAL_REG, + TEMP_VAL_MEM, + TEMP_VAL_CONST, +} TCGTempVal; -/* XXX: optimize memory layout */ typedef struct TCGTemp { - TCGType base_type; - TCGType type; - int val_type; - int reg; - tcg_target_long val; - int mem_reg; - intptr_t mem_offset; + unsigned int reg:8; + unsigned int mem_reg:8; + TCGTempVal val_type:8; + TCGType base_type:8; + TCGType type:8; unsigned int fixed_reg:1; unsigned int mem_coherent:1; unsigned int mem_allocated:1; @@ -438,6 +437,9 @@ typedef struct TCGTemp { basic blocks. Otherwise, it is not preserved across basic blocks. */ unsigned int temp_allocated:1; /* never used for code gen */ + + tcg_target_long val; + intptr_t mem_offset; const char *name; } TCGTemp; @@ -859,8 +861,10 @@ static inline size_t tcg_current_code_size(TCGContext *s) * state is correctly synchronised and ready for execution of the next * TB (and in particular the guest PC is the address to execute next). * Otherwise, we gave up on execution of this TB before it started, and - * the caller must fix up the CPU state by calling cpu_pc_from_tb() - * with the next-TB pointer we return. + * the caller must fix up the CPU state by calling the CPU's + * synchronize_from_tb() method with the next-TB pointer we return (falling + * back to calling the CPU's set_pc method with tb->pb if no + * synchronize_from_tb() method exists). * * Note that TCG targets may use a different definition of tcg_qemu_tb_exec * to this default (which just calls the prologue.code emitted by diff --git a/tests/Makefile b/tests/Makefile index 55aa7452b4..666aee2ac3 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -207,20 +207,44 @@ $(foreach target,$(SYSEMU_TARGET_LIST), \ $(eval check-qtest-$(target)-y += tests/qom-test$(EXESUF)))) check-qapi-schema-y := $(addprefix tests/qapi-schema/, \ - comments.json empty.json funny-char.json indented-expr.json \ - missing-colon.json missing-comma-list.json \ - missing-comma-object.json non-objects.json \ + comments.json empty.json enum-empty.json enum-missing-data.json \ + enum-wrong-data.json enum-int-member.json enum-dict-member.json \ + enum-clash-member.json enum-max-member.json enum-union-clash.json \ + enum-bad-name.json funny-char.json indented-expr.json \ + missing-type.json bad-ident.json ident-with-escape.json \ + escape-outside-string.json unknown-escape.json \ + escape-too-short.json escape-too-big.json unicode-str.json \ + double-type.json bad-base.json bad-type-bool.json bad-type-int.json \ + bad-type-dict.json double-data.json unknown-expr-key.json \ + redefined-type.json redefined-command.json redefined-builtin.json \ + redefined-event.json command-int.json bad-data.json event-max.json \ + type-bypass.json type-bypass-no-gen.json type-bypass-bad-gen.json \ + data-array-empty.json data-array-unknown.json data-int.json \ + data-unknown.json data-member-unknown.json data-member-array.json \ + data-member-array-bad.json returns-array-bad.json returns-int.json \ + returns-unknown.json returns-alternate.json returns-whitelist.json \ + missing-colon.json missing-comma-list.json missing-comma-object.json \ + nested-struct-data.json nested-struct-returns.json non-objects.json \ qapi-schema-test.json quoted-structural-chars.json \ trailing-comma-list.json trailing-comma-object.json \ unclosed-list.json unclosed-object.json unclosed-string.json \ - duplicate-key.json union-invalid-base.json flat-union-no-base.json \ - flat-union-invalid-discriminator.json \ + duplicate-key.json union-invalid-base.json union-bad-branch.json \ + union-optional-branch.json union-unknown.json union-max.json \ + flat-union-optional-discriminator.json flat-union-no-base.json \ + flat-union-invalid-discriminator.json flat-union-inline.json \ flat-union-invalid-branch-key.json flat-union-reverse-define.json \ - flat-union-string-discriminator.json \ + flat-union-string-discriminator.json union-base-no-discriminator.json \ + flat-union-bad-discriminator.json flat-union-bad-base.json \ + flat-union-base-star.json flat-union-int-branch.json \ + flat-union-base-union.json flat-union-branch-clash.json \ + alternate-nested.json alternate-unknown.json alternate-clash.json \ + alternate-good.json alternate-base.json alternate-array.json \ + alternate-conflict-string.json alternate-conflict-dict.json \ include-simple.json include-relpath.json include-format-err.json \ include-non-file.json include-no-file.json include-before-err.json \ include-nested-err.json include-self-cycle.json include-cycle.json \ - include-repetition.json event-nest-struct.json) + include-repetition.json event-nest-struct.json event-case.json \ + struct-base-clash.json struct-base-clash-deep.json ) GENERATED_HEADERS += tests/test-qapi-types.h tests/test-qapi-visit.h \ tests/test-qmp-commands.h tests/test-qapi-event.h @@ -415,6 +439,7 @@ GCOV_OPTIONS = -n $(if $(V),-f,) $(patsubst %, check-qtest-%, $(QTEST_TARGETS)): check-qtest-%: $(check-qtest-y) $(if $(CONFIG_GCOV),@rm -f *.gcda */*.gcda */*/*.gcda */*/*/*.gcda,) $(call quiet-command,QTEST_QEMU_BINARY=$*-softmmu/qemu-system-$* \ + QTEST_QEMU_IMG=qemu-img$(EXESUF) \ MALLOC_PERTURB_=$${MALLOC_PERTURB_:-$$((RANDOM % 255 + 1))} \ gtester $(GTESTER_OPTIONS) -m=$(SPEED) $(check-qtest-$*-y),"GTESTER $@") $(if $(CONFIG_GCOV),@for f in $(gcov-files-$*-y); do \ diff --git a/tests/ahci-test.c b/tests/ahci-test.c index ea62e249f5..7c23bb2180 100644 --- a/tests/ahci-test.c +++ b/tests/ahci-test.c @@ -39,11 +39,14 @@ #include "hw/pci/pci_ids.h" #include "hw/pci/pci_regs.h" -/* Test-specific defines. */ -#define TEST_IMAGE_SIZE (64 * 1024 * 1024) +/* Test-specific defines -- in MiB */ +#define TEST_IMAGE_SIZE_MB (200 * 1024) +#define TEST_IMAGE_SECTORS ((TEST_IMAGE_SIZE_MB / AHCI_SECTOR_SIZE) \ + * 1024 * 1024) /*** Globals ***/ static char tmp_path[] = "/tmp/qtest.XXXXXX"; +static char debug_path[] = "/tmp/qtest-blkdebug.XXXXXX"; static bool ahci_pedantic; /*** Function Declarations ***/ @@ -99,19 +102,12 @@ static void generate_pattern(void *buffer, size_t len, size_t cycle_len) /** * Start a Q35 machine and bookmark a handle to the AHCI device. */ -static AHCIQState *ahci_boot(void) +static AHCIQState *ahci_vboot(const char *cli, va_list ap) { AHCIQState *s; - const char *cli; s = g_malloc0(sizeof(AHCIQState)); - - cli = "-drive if=none,id=drive0,file=%s,cache=writeback,serial=%s" - ",format=raw" - " -M q35 " - "-device ide-hd,drive=drive0 " - "-global ide-hd.ver=%s"; - s->parent = qtest_pc_boot(cli, tmp_path, "testdisk", "version"); + s->parent = qtest_pc_vboot(cli, ap); alloc_set_flags(s->parent->alloc, ALLOC_LEAK_ASSERT); /* Verify that we have an AHCI device present. */ @@ -121,12 +117,35 @@ static AHCIQState *ahci_boot(void) } /** + * Start a Q35 machine and bookmark a handle to the AHCI device. + */ +static AHCIQState *ahci_boot(const char *cli, ...) +{ + AHCIQState *s; + va_list ap; + + if (cli) { + va_start(ap, cli); + s = ahci_vboot(cli, ap); + va_end(ap); + } else { + cli = "-drive if=none,id=drive0,file=%s,cache=writeback,serial=%s" + ",format=qcow2" + " -M q35 " + "-device ide-hd,drive=drive0 " + "-global ide-hd.ver=%s"; + s = ahci_boot(cli, tmp_path, "testdisk", "version"); + } + + return s; +} + +/** * Clean up the PCI device, then terminate the QEMU instance. */ static void ahci_shutdown(AHCIQState *ahci) { QOSState *qs = ahci->parent; - ahci_clean_mem(ahci); free_ahci_device(ahci->dev); g_free(ahci); @@ -137,10 +156,18 @@ static void ahci_shutdown(AHCIQState *ahci) * Boot and fully enable the HBA device. * @see ahci_boot, ahci_pci_enable and ahci_hba_enable. */ -static AHCIQState *ahci_boot_and_enable(void) +static AHCIQState *ahci_boot_and_enable(const char *cli, ...) { AHCIQState *ahci; - ahci = ahci_boot(); + va_list ap; + + if (cli) { + va_start(ap, cli); + ahci = ahci_vboot(cli, ap); + va_end(ap); + } else { + ahci = ahci_boot(NULL); + } ahci_pci_enable(ahci); ahci_hba_enable(ahci); @@ -738,7 +765,7 @@ static void ahci_test_identify(AHCIQState *ahci) ahci_port_clear(ahci, px); /* "Read" 512 bytes using CMD_IDENTIFY into the host buffer. */ - ahci_io(ahci, px, CMD_IDENTIFY, &buff, buffsize); + ahci_io(ahci, px, CMD_IDENTIFY, &buff, buffsize, 0); /* Check serial number/version in the buffer */ /* NB: IDENTIFY strings are packed in 16bit little endian chunks. @@ -754,11 +781,12 @@ static void ahci_test_identify(AHCIQState *ahci) g_assert_cmphex(rc, ==, 0); sect_size = le16_to_cpu(*((uint16_t *)(&buff[5]))); - g_assert_cmphex(sect_size, ==, 0x200); + g_assert_cmphex(sect_size, ==, AHCI_SECTOR_SIZE); } static void ahci_test_io_rw_simple(AHCIQState *ahci, unsigned bufsize, - uint8_t read_cmd, uint8_t write_cmd) + uint64_t sector, uint8_t read_cmd, + uint8_t write_cmd) { uint64_t ptr; uint8_t port; @@ -781,9 +809,9 @@ static void ahci_test_io_rw_simple(AHCIQState *ahci, unsigned bufsize, memwrite(ptr, tx, bufsize); /* Write this buffer to disk, then read it back to the DMA buffer. */ - ahci_guest_io(ahci, port, write_cmd, ptr, bufsize); + ahci_guest_io(ahci, port, write_cmd, ptr, bufsize, sector); qmemset(ptr, 0x00, bufsize); - ahci_guest_io(ahci, port, read_cmd, ptr, bufsize); + ahci_guest_io(ahci, port, read_cmd, ptr, bufsize, sector); /*** Read back the Data ***/ memread(ptr, rx, bufsize); @@ -794,6 +822,29 @@ static void ahci_test_io_rw_simple(AHCIQState *ahci, unsigned bufsize, g_free(rx); } +static void ahci_test_nondata(AHCIQState *ahci, uint8_t ide_cmd) +{ + uint8_t px; + AHCICommand *cmd; + + /* Sanitize */ + px = ahci_port_select(ahci); + ahci_port_clear(ahci, px); + + /* Issue Command */ + cmd = ahci_command_create(ide_cmd); + ahci_command_commit(ahci, cmd, px); + ahci_command_issue(ahci, cmd); + ahci_command_verify(ahci, cmd); + ahci_command_free(cmd); +} + +static void ahci_test_flush(AHCIQState *ahci) +{ + ahci_test_nondata(ahci, CMD_FLUSH_CACHE); +} + + /******************************************************************************/ /* Test Interfaces */ /******************************************************************************/ @@ -804,7 +855,7 @@ static void ahci_test_io_rw_simple(AHCIQState *ahci, unsigned bufsize, static void test_sanity(void) { AHCIQState *ahci; - ahci = ahci_boot(); + ahci = ahci_boot(NULL); ahci_shutdown(ahci); } @@ -815,7 +866,7 @@ static void test_sanity(void) static void test_pci_spec(void) { AHCIQState *ahci; - ahci = ahci_boot(); + ahci = ahci_boot(NULL); ahci_test_pci_spec(ahci); ahci_shutdown(ahci); } @@ -827,8 +878,7 @@ static void test_pci_spec(void) static void test_pci_enable(void) { AHCIQState *ahci; - - ahci = ahci_boot(); + ahci = ahci_boot(NULL); ahci_pci_enable(ahci); ahci_shutdown(ahci); } @@ -841,7 +891,7 @@ static void test_hba_spec(void) { AHCIQState *ahci; - ahci = ahci_boot(); + ahci = ahci_boot(NULL); ahci_pci_enable(ahci); ahci_test_hba_spec(ahci); ahci_shutdown(ahci); @@ -855,7 +905,7 @@ static void test_hba_enable(void) { AHCIQState *ahci; - ahci = ahci_boot(); + ahci = ahci_boot(NULL); ahci_pci_enable(ahci); ahci_hba_enable(ahci); ahci_shutdown(ahci); @@ -869,7 +919,7 @@ static void test_identify(void) { AHCIQState *ahci; - ahci = ahci_boot_and_enable(); + ahci = ahci_boot_and_enable(NULL); ahci_test_identify(ahci); ahci_shutdown(ahci); } @@ -890,7 +940,7 @@ static void test_dma_fragmented(void) unsigned char *rx = g_malloc0(bufsize); uint64_t ptr; - ahci = ahci_boot_and_enable(); + ahci = ahci_boot_and_enable(NULL); px = ahci_port_select(ahci); ahci_port_clear(ahci, px); @@ -928,6 +978,50 @@ static void test_dma_fragmented(void) g_free(tx); } +static void test_flush(void) +{ + AHCIQState *ahci; + + ahci = ahci_boot_and_enable(NULL); + ahci_test_flush(ahci); + ahci_shutdown(ahci); +} + +static void test_flush_retry(void) +{ + AHCIQState *ahci; + AHCICommand *cmd; + uint8_t port; + const char *s; + + prepare_blkdebug_script(debug_path, "flush_to_disk"); + ahci = ahci_boot_and_enable("-drive file=blkdebug:%s:%s,if=none,id=drive0," + "format=qcow2,cache=writeback," + "rerror=stop,werror=stop " + "-M q35 " + "-device ide-hd,drive=drive0 ", + debug_path, + tmp_path); + + /* Issue Flush Command */ + port = ahci_port_select(ahci); + ahci_port_clear(ahci, port); + cmd = ahci_command_create(CMD_FLUSH_CACHE); + ahci_command_commit(ahci, cmd, port); + ahci_command_issue_async(ahci, cmd); + qmp_eventwait("STOP"); + + /* Complete the command */ + s = "{'execute':'cont' }"; + qmp_async(s); + qmp_eventwait("RESUME"); + ahci_command_wait(ahci, cmd); + ahci_command_verify(ahci, cmd); + + ahci_command_free(cmd); + ahci_shutdown(ahci); +} + /******************************************************************************/ /* AHCI I/O Test Matrix Definitions */ @@ -968,12 +1062,45 @@ enum IOOps { NUM_IO_OPS }; +enum OffsetType { + OFFSET_BEGIN = 0, + OFFSET_ZERO = OFFSET_BEGIN, + OFFSET_LOW, + OFFSET_HIGH, + NUM_OFFSETS +}; + +static const char *offset_str[NUM_OFFSETS] = { "zero", "low", "high" }; + typedef struct AHCIIOTestOptions { enum BuffLen length; enum AddrMode address_type; enum IOMode io_type; + enum OffsetType offset; } AHCIIOTestOptions; +static uint64_t offset_sector(enum OffsetType ofst, + enum AddrMode addr_type, + uint64_t buffsize) +{ + uint64_t ceil; + uint64_t nsectors; + + switch (ofst) { + case OFFSET_ZERO: + return 0; + case OFFSET_LOW: + return 1; + case OFFSET_HIGH: + ceil = (addr_type == ADDR_MODE_LBA28) ? 0xfffffff : 0xffffffffffff; + ceil = MIN(ceil, TEST_IMAGE_SECTORS - 1); + nsectors = buffsize / AHCI_SECTOR_SIZE; + return ceil - nsectors + 1; + default: + g_assert_not_reached(); + } +} + /** * Table of possible I/O ATA commands given a set of enumerations. */ @@ -1001,12 +1128,12 @@ static const uint8_t io_cmds[NUM_MODES][NUM_ADDR_MODES][NUM_IO_OPS] = { * transfer modes, and buffer sizes. */ static void test_io_rw_interface(enum AddrMode lba48, enum IOMode dma, - unsigned bufsize) + unsigned bufsize, uint64_t sector) { AHCIQState *ahci; - ahci = ahci_boot_and_enable(); - ahci_test_io_rw_simple(ahci, bufsize, + ahci = ahci_boot_and_enable(NULL); + ahci_test_io_rw_simple(ahci, bufsize, sector, io_cmds[dma][lba48][IO_READ], io_cmds[dma][lba48][IO_WRITE]); ahci_shutdown(ahci); @@ -1019,6 +1146,7 @@ static void test_io_interface(gconstpointer opaque) { AHCIIOTestOptions *opts = (AHCIIOTestOptions *)opaque; unsigned bufsize; + uint64_t sector; switch (opts->length) { case LEN_SIMPLE: @@ -1037,13 +1165,14 @@ static void test_io_interface(gconstpointer opaque) g_assert_not_reached(); } - test_io_rw_interface(opts->address_type, opts->io_type, bufsize); + sector = offset_sector(opts->offset, opts->address_type, bufsize); + test_io_rw_interface(opts->address_type, opts->io_type, bufsize, sector); g_free(opts); return; } static void create_ahci_io_test(enum IOMode type, enum AddrMode addr, - enum BuffLen len) + enum BuffLen len, enum OffsetType offset) { static const char *arch; char *name; @@ -1052,15 +1181,17 @@ static void create_ahci_io_test(enum IOMode type, enum AddrMode addr, opts->length = len; opts->address_type = addr; opts->io_type = type; + opts->offset = offset; if (!arch) { arch = qtest_get_arch(); } - name = g_strdup_printf("/%s/ahci/io/%s/%s/%s", arch, + name = g_strdup_printf("/%s/ahci/io/%s/%s/%s/%s", arch, io_mode_str[type], addr_mode_str[addr], - buff_len_str[len]); + buff_len_str[len], + offset_str[offset]); g_test_add_data_func(name, opts, test_io_interface); g_free(name); @@ -1071,10 +1202,10 @@ static void create_ahci_io_test(enum IOMode type, enum AddrMode addr, int main(int argc, char **argv) { const char *arch; - int fd; int ret; + int fd; int c; - int i, j, k; + int i, j, k, m; static struct option long_options[] = { {"pedantic", no_argument, 0, 'p' }, @@ -1108,11 +1239,13 @@ int main(int argc, char **argv) return 0; } - /* Create a temporary raw image */ - fd = mkstemp(tmp_path); + /* Create a temporary qcow2 image */ + close(mkstemp(tmp_path)); + mkqcow2(tmp_path, TEST_IMAGE_SIZE_MB); + + /* Create temporary blkdebug instructions */ + fd = mkstemp(debug_path); g_assert(fd >= 0); - ret = ftruncate(fd, TEST_IMAGE_SIZE); - g_assert(ret == 0); close(fd); /* Run the tests */ @@ -1126,17 +1259,23 @@ int main(int argc, char **argv) for (i = MODE_BEGIN; i < NUM_MODES; i++) { for (j = ADDR_MODE_BEGIN; j < NUM_ADDR_MODES; j++) { for (k = LEN_BEGIN; k < NUM_LENGTHS; k++) { - create_ahci_io_test(i, j, k); + for (m = OFFSET_BEGIN; m < NUM_OFFSETS; m++) { + create_ahci_io_test(i, j, k, m); + } } } } qtest_add_func("/ahci/io/dma/lba28/fragmented", test_dma_fragmented); + qtest_add_func("/ahci/flush/simple", test_flush); + qtest_add_func("/ahci/flush/retry", test_flush_retry); + ret = g_test_run(); /* Cleanup */ unlink(tmp_path); + unlink(debug_path); return ret; } diff --git a/tests/ide-test.c b/tests/ide-test.c index b28a3023c2..78382e9c75 100644 --- a/tests/ide-test.c +++ b/tests/ide-test.c @@ -29,6 +29,7 @@ #include <glib.h> #include "libqtest.h" +#include "libqos/libqos.h" #include "libqos/pci-pc.h" #include "libqos/malloc-pc.h" @@ -494,33 +495,10 @@ static void test_flush(void) ide_test_quit(); } -static void prepare_blkdebug_script(const char *debug_fn, const char *event) -{ - FILE *debug_file = fopen(debug_fn, "w"); - int ret; - - fprintf(debug_file, "[inject-error]\n"); - fprintf(debug_file, "event = \"%s\"\n", event); - fprintf(debug_file, "errno = \"5\"\n"); - fprintf(debug_file, "state = \"1\"\n"); - fprintf(debug_file, "immediately = \"off\"\n"); - fprintf(debug_file, "once = \"on\"\n"); - - fprintf(debug_file, "[set-state]\n"); - fprintf(debug_file, "event = \"%s\"\n", event); - fprintf(debug_file, "new_state = \"2\"\n"); - fflush(debug_file); - g_assert(!ferror(debug_file)); - - ret = fclose(debug_file); - g_assert(ret == 0); -} - static void test_retry_flush(const char *machine) { uint8_t data; const char *s; - QDict *response; prepare_blkdebug_script(debug_path, "flush_to_disk"); @@ -539,15 +517,7 @@ static void test_retry_flush(const char *machine) assert_bit_set(data, BSY | DRDY); assert_bit_clear(data, DF | ERR | DRQ); - for (;; response = NULL) { - response = qmp_receive(); - if ((qdict_haskey(response, "event")) && - (strcmp(qdict_get_str(response, "event"), "STOP") == 0)) { - QDECREF(response); - break; - } - QDECREF(response); - } + qmp_eventwait("STOP"); /* Complete the command */ s = "{'execute':'cont' }"; diff --git a/tests/libqos/ahci.c b/tests/libqos/ahci.c index b0f39a5e32..843cf72980 100644 --- a/tests/libqos/ahci.c +++ b/tests/libqos/ahci.c @@ -364,7 +364,7 @@ void ahci_port_clear(AHCIQState *ahci, uint8_t port) ahci_px_wreg(ahci, port, AHCI_PX_IS, reg); g_assert_cmphex(ahci_px_rreg(ahci, port, AHCI_PX_IS), ==, 0); - /* Wipe the FIS-Recieve Buffer */ + /* Wipe the FIS-Receive Buffer */ qmemset(ahci->port[port].fb, 0x00, 0x100); } @@ -442,7 +442,7 @@ void ahci_port_check_pio_sanity(AHCIQState *ahci, uint8_t port, { PIOSetupFIS *pio = g_malloc0(0x20); - /* We cannot check the Status or E_Status registers, becuase + /* We cannot check the Status or E_Status registers, because * the status may have again changed between the PIO Setup FIS * and the conclusion of the command with the D2H Register FIS. */ memread(ahci->port[port].fb + 0x20, pio, 0x20); @@ -568,13 +568,15 @@ inline unsigned size_to_prdtl(unsigned bytes, unsigned bytes_per_prd) /* Given a guest buffer address, perform an IO operation */ void ahci_guest_io(AHCIQState *ahci, uint8_t port, uint8_t ide_cmd, - uint64_t buffer, size_t bufsize) + uint64_t buffer, size_t bufsize, uint64_t sector) { AHCICommand *cmd; - cmd = ahci_command_create(ide_cmd); ahci_command_set_buffer(cmd, buffer); ahci_command_set_size(cmd, bufsize); + if (sector) { + ahci_command_set_offset(cmd, sector); + } ahci_command_commit(ahci, cmd, port); ahci_command_issue(ahci, cmd); ahci_command_verify(ahci, cmd); @@ -612,7 +614,7 @@ static AHCICommandProp *ahci_command_find(uint8_t command_name) /* Given a HOST buffer, create a buffer address and perform an IO operation. */ void ahci_io(AHCIQState *ahci, uint8_t port, uint8_t ide_cmd, - void *buffer, size_t bufsize) + void *buffer, size_t bufsize, uint64_t sector) { uint64_t ptr; AHCICommandProp *props; @@ -626,7 +628,7 @@ void ahci_io(AHCIQState *ahci, uint8_t port, uint8_t ide_cmd, memwrite(ptr, buffer, bufsize); } - ahci_guest_io(ahci, port, ide_cmd, ptr, bufsize); + ahci_guest_io(ahci, port, ide_cmd, ptr, bufsize, sector); if (props->read) { memread(ptr, buffer, bufsize); diff --git a/tests/libqos/ahci.h b/tests/libqos/ahci.h index 888545d5a2..40e8ca48ba 100644 --- a/tests/libqos/ahci.h +++ b/tests/libqos/ahci.h @@ -523,9 +523,9 @@ void ahci_write_fis(AHCIQState *ahci, RegH2DFIS *fis, uint64_t addr); unsigned ahci_pick_cmd(AHCIQState *ahci, uint8_t port); unsigned size_to_prdtl(unsigned bytes, unsigned bytes_per_prd); void ahci_guest_io(AHCIQState *ahci, uint8_t port, uint8_t ide_cmd, - uint64_t gbuffer, size_t size); + uint64_t gbuffer, size_t size, uint64_t sector); void ahci_io(AHCIQState *ahci, uint8_t port, uint8_t ide_cmd, - void *buffer, size_t bufsize); + void *buffer, size_t bufsize, uint64_t sector); /* Command Lifecycle */ AHCICommand *ahci_command_create(uint8_t command_name); diff --git a/tests/libqos/libqos-pc.c b/tests/libqos/libqos-pc.c index bbace893fb..1403699377 100644 --- a/tests/libqos/libqos-pc.c +++ b/tests/libqos/libqos-pc.c @@ -6,6 +6,11 @@ static QOSOps qos_ops = { .uninit_allocator = pc_alloc_uninit }; +QOSState *qtest_pc_vboot(const char *cmdline_fmt, va_list ap) +{ + return qtest_vboot(&qos_ops, cmdline_fmt, ap); +} + QOSState *qtest_pc_boot(const char *cmdline_fmt, ...) { QOSState *qs; diff --git a/tests/libqos/libqos-pc.h b/tests/libqos/libqos-pc.h index 316857d32f..b1820c5739 100644 --- a/tests/libqos/libqos-pc.h +++ b/tests/libqos/libqos-pc.h @@ -3,6 +3,7 @@ #include "libqos/libqos.h" +QOSState *qtest_pc_vboot(const char *cmdline_fmt, va_list ap); QOSState *qtest_pc_boot(const char *cmdline_fmt, ...); void qtest_pc_shutdown(QOSState *qs); diff --git a/tests/libqos/libqos.c b/tests/libqos/libqos.c index bc8beb281f..7e7207856e 100644 --- a/tests/libqos/libqos.c +++ b/tests/libqos/libqos.c @@ -61,3 +61,69 @@ void qtest_shutdown(QOSState *qs) qtest_quit(qs->qts); g_free(qs); } + +void mkimg(const char *file, const char *fmt, unsigned size_mb) +{ + gchar *cli; + bool ret; + int rc; + GError *err = NULL; + char *qemu_img_path; + gchar *out, *out2; + char *abs_path; + + qemu_img_path = getenv("QTEST_QEMU_IMG"); + abs_path = realpath(qemu_img_path, NULL); + assert(qemu_img_path); + + cli = g_strdup_printf("%s create -f %s %s %uM", abs_path, + fmt, file, size_mb); + ret = g_spawn_command_line_sync(cli, &out, &out2, &rc, &err); + if (err) { + fprintf(stderr, "%s\n", err->message); + g_error_free(err); + } + g_assert(ret && !err); + + /* In glib 2.34, we have g_spawn_check_exit_status. in 2.12, we don't. + * glib 2.43.91 implementation assumes that any non-zero is an error for + * windows, but uses extra precautions for Linux. However, + * 0 is only possible if the program exited normally, so that should be + * sufficient for our purposes on all platforms, here. */ + if (rc) { + fprintf(stderr, "qemu-img returned status code %d\n", rc); + } + g_assert(!rc); + + g_free(out); + g_free(out2); + g_free(cli); + free(abs_path); +} + +void mkqcow2(const char *file, unsigned size_mb) +{ + return mkimg(file, "qcow2", size_mb); +} + +void prepare_blkdebug_script(const char *debug_fn, const char *event) +{ + FILE *debug_file = fopen(debug_fn, "w"); + int ret; + + fprintf(debug_file, "[inject-error]\n"); + fprintf(debug_file, "event = \"%s\"\n", event); + fprintf(debug_file, "errno = \"5\"\n"); + fprintf(debug_file, "state = \"1\"\n"); + fprintf(debug_file, "immediately = \"off\"\n"); + fprintf(debug_file, "once = \"on\"\n"); + + fprintf(debug_file, "[set-state]\n"); + fprintf(debug_file, "event = \"%s\"\n", event); + fprintf(debug_file, "new_state = \"2\"\n"); + fflush(debug_file); + g_assert(!ferror(debug_file)); + + ret = fclose(debug_file); + g_assert(ret == 0); +} diff --git a/tests/libqos/libqos.h b/tests/libqos/libqos.h index 612d41e5e9..f57362b688 100644 --- a/tests/libqos/libqos.h +++ b/tests/libqos/libqos.h @@ -19,6 +19,9 @@ typedef struct QOSState { QOSState *qtest_vboot(QOSOps *ops, const char *cmdline_fmt, va_list ap); QOSState *qtest_boot(QOSOps *ops, const char *cmdline_fmt, ...); void qtest_shutdown(QOSState *qs); +void mkimg(const char *file, const char *fmt, unsigned size_mb); +void mkqcow2(const char *file, unsigned size_mb); +void prepare_blkdebug_script(const char *debug_fn, const char *event); static inline uint64_t qmalloc(QOSState *q, size_t bytes) { diff --git a/tests/libqtest.c b/tests/libqtest.c index 12d65bd1e6..a525dc532c 100644 --- a/tests/libqtest.c +++ b/tests/libqtest.c @@ -388,7 +388,12 @@ QDict *qtest_qmp_receive(QTestState *s) return qmp.response; } -QDict *qtest_qmpv(QTestState *s, const char *fmt, va_list ap) +/** + * Allow users to send a message without waiting for the reply, + * in the case that they choose to discard all replies up until + * a particular EVENT is received. + */ +void qtest_async_qmpv(QTestState *s, const char *fmt, va_list ap) { va_list ap_copy; QObject *qobj; @@ -417,6 +422,11 @@ QDict *qtest_qmpv(QTestState *s, const char *fmt, va_list ap) QDECREF(qstr); qobject_decref(qobj); } +} + +QDict *qtest_qmpv(QTestState *s, const char *fmt, va_list ap) +{ + qtest_async_qmpv(s, fmt, ap); /* Receive reply */ return qtest_qmp_receive(s); @@ -433,6 +443,15 @@ QDict *qtest_qmp(QTestState *s, const char *fmt, ...) return response; } +void qtest_async_qmp(QTestState *s, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + qtest_async_qmpv(s, fmt, ap); + va_end(ap); +} + void qtest_qmpv_discard_response(QTestState *s, const char *fmt, va_list ap) { QDict *response = qtest_qmpv(s, fmt, ap); @@ -450,9 +469,26 @@ void qtest_qmp_discard_response(QTestState *s, const char *fmt, ...) QDECREF(response); } +void qtest_qmp_eventwait(QTestState *s, const char *event) +{ + QDict *response; + + for (;;) { + response = qtest_qmp_receive(s); + if ((qdict_haskey(response, "event")) && + (strcmp(qdict_get_str(response, "event"), event) == 0)) { + QDECREF(response); + break; + } + QDECREF(response); + } +} + + const char *qtest_get_arch(void) { const char *qemu = getenv("QTEST_QEMU_BINARY"); + g_assert(qemu != NULL); const char *end = strrchr(qemu, '/'); return end + strlen("/qemu-system-"); @@ -695,6 +731,15 @@ QDict *qmp(const char *fmt, ...) return response; } +void qmp_async(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + qtest_async_qmpv(global_qtest, fmt, ap); + va_end(ap); +} + void qmp_discard_response(const char *fmt, ...) { va_list ap; diff --git a/tests/libqtest.h b/tests/libqtest.h index 03469b8781..4b54b5da9e 100644 --- a/tests/libqtest.h +++ b/tests/libqtest.h @@ -64,6 +64,15 @@ void qtest_qmp_discard_response(QTestState *s, const char *fmt, ...); QDict *qtest_qmp(QTestState *s, const char *fmt, ...); /** + * qtest_async_qmp: + * @s: #QTestState instance to operate on. + * @fmt...: QMP message to send to qemu + * + * Sends a QMP message to QEMU and leaves the response in the stream. + */ +void qtest_async_qmp(QTestState *s, const char *fmt, ...); + +/** * qtest_qmpv_discard_response: * @s: #QTestState instance to operate on. * @fmt: QMP message to send to QEMU @@ -84,6 +93,16 @@ void qtest_qmpv_discard_response(QTestState *s, const char *fmt, va_list ap); QDict *qtest_qmpv(QTestState *s, const char *fmt, va_list ap); /** + * qtest_async_qmpv: + * @s: #QTestState instance to operate on. + * @fmt: QMP message to send to QEMU + * @ap: QMP message arguments + * + * Sends a QMP message to QEMU and leaves the response in the stream. + */ +void qtest_async_qmpv(QTestState *s, const char *fmt, va_list ap); + +/** * qtest_receive: * @s: #QTestState instance to operate on. * @@ -92,6 +111,15 @@ QDict *qtest_qmpv(QTestState *s, const char *fmt, va_list ap); QDict *qtest_qmp_receive(QTestState *s); /** + * qtest_qmp_eventwait: + * @s: #QTestState instance to operate on. + * @s: #event event to wait for. + * + * Continuosly polls for QMP responses until it receives the desired event. + */ +void qtest_qmp_eventwait(QTestState *s, const char *event); + +/** * qtest_get_irq: * @s: #QTestState instance to operate on. * @num: Interrupt to observe. @@ -411,6 +439,14 @@ static inline void qtest_end(void) QDict *qmp(const char *fmt, ...); /** + * qmp_async: + * @fmt...: QMP message to send to qemu + * + * Sends a QMP message to QEMU and leaves the response in the stream. + */ +void qmp_async(const char *fmt, ...); + +/** * qmp_discard_response: * @fmt...: QMP message to send to qemu * @@ -429,6 +465,17 @@ static inline QDict *qmp_receive(void) } /** + * qmp_eventwait: + * @s: #event event to wait for. + * + * Continuosly polls for QMP responses until it receives the desired event. + */ +static inline void qmp_eventwait(const char *event) +{ + return qtest_qmp_eventwait(global_qtest, event); +} + +/** * get_irq: * @num: Interrupt to observe. * diff --git a/tests/qapi-schema/alternate-array.err b/tests/qapi-schema/alternate-array.err new file mode 100644 index 0000000000..7b930c64ab --- /dev/null +++ b/tests/qapi-schema/alternate-array.err @@ -0,0 +1 @@ +tests/qapi-schema/alternate-array.json:5: Member 'two' of alternate 'Alt' cannot be an array diff --git a/tests/qapi-schema/alternate-array.exit b/tests/qapi-schema/alternate-array.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/alternate-array.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/alternate-array.json b/tests/qapi-schema/alternate-array.json new file mode 100644 index 0000000000..f241aac122 --- /dev/null +++ b/tests/qapi-schema/alternate-array.json @@ -0,0 +1,7 @@ +# we do not allow array branches in alternates +# TODO: should we support this? +{ 'struct': 'One', + 'data': { 'name': 'str' } } +{ 'alternate': 'Alt', + 'data': { 'one': 'One', + 'two': [ 'int' ] } } diff --git a/tests/qapi-schema/alternate-array.out b/tests/qapi-schema/alternate-array.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/alternate-array.out diff --git a/tests/qapi-schema/alternate-base.err b/tests/qapi-schema/alternate-base.err new file mode 100644 index 0000000000..30d8a34373 --- /dev/null +++ b/tests/qapi-schema/alternate-base.err @@ -0,0 +1 @@ +tests/qapi-schema/alternate-base.json:4: Unknown key 'base' in alternate 'Alt' diff --git a/tests/qapi-schema/alternate-base.exit b/tests/qapi-schema/alternate-base.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/alternate-base.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/alternate-base.json b/tests/qapi-schema/alternate-base.json new file mode 100644 index 0000000000..529430ecf2 --- /dev/null +++ b/tests/qapi-schema/alternate-base.json @@ -0,0 +1,6 @@ +# we reject alternate with base type +{ 'struct': 'Base', + 'data': { 'string': 'str' } } +{ 'alternate': 'Alt', + 'base': 'Base', + 'data': { 'number': 'int' } } diff --git a/tests/qapi-schema/alternate-base.out b/tests/qapi-schema/alternate-base.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/alternate-base.out diff --git a/tests/qapi-schema/alternate-clash.err b/tests/qapi-schema/alternate-clash.err new file mode 100644 index 0000000000..51bea3e272 --- /dev/null +++ b/tests/qapi-schema/alternate-clash.err @@ -0,0 +1 @@ +tests/qapi-schema/alternate-clash.json:2: Alternate 'Alt1' member 'ONE' clashes with 'one' diff --git a/tests/qapi-schema/alternate-clash.exit b/tests/qapi-schema/alternate-clash.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/alternate-clash.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/alternate-clash.json b/tests/qapi-schema/alternate-clash.json new file mode 100644 index 0000000000..39479353bb --- /dev/null +++ b/tests/qapi-schema/alternate-clash.json @@ -0,0 +1,3 @@ +# we detect C enum collisions in an alternate +{ 'alternate': 'Alt1', + 'data': { 'one': 'str', 'ONE': 'int' } } diff --git a/tests/qapi-schema/alternate-clash.out b/tests/qapi-schema/alternate-clash.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/alternate-clash.out diff --git a/tests/qapi-schema/alternate-conflict-dict.err b/tests/qapi-schema/alternate-conflict-dict.err new file mode 100644 index 0000000000..0f411f4faf --- /dev/null +++ b/tests/qapi-schema/alternate-conflict-dict.err @@ -0,0 +1 @@ +tests/qapi-schema/alternate-conflict-dict.json:6: Alternate 'Alt' member 'two' can't be distinguished from member 'one' diff --git a/tests/qapi-schema/alternate-conflict-dict.exit b/tests/qapi-schema/alternate-conflict-dict.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/alternate-conflict-dict.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/alternate-conflict-dict.json b/tests/qapi-schema/alternate-conflict-dict.json new file mode 100644 index 0000000000..d566cca816 --- /dev/null +++ b/tests/qapi-schema/alternate-conflict-dict.json @@ -0,0 +1,8 @@ +# we reject alternates with multiple object branches +{ 'struct': 'One', + 'data': { 'name': 'str' } } +{ 'struct': 'Two', + 'data': { 'value': 'int' } } +{ 'alternate': 'Alt', + 'data': { 'one': 'One', + 'two': 'Two' } } diff --git a/tests/qapi-schema/alternate-conflict-dict.out b/tests/qapi-schema/alternate-conflict-dict.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/alternate-conflict-dict.out diff --git a/tests/qapi-schema/alternate-conflict-string.err b/tests/qapi-schema/alternate-conflict-string.err new file mode 100644 index 0000000000..fc523b0879 --- /dev/null +++ b/tests/qapi-schema/alternate-conflict-string.err @@ -0,0 +1 @@ +tests/qapi-schema/alternate-conflict-string.json:4: Alternate 'Alt' member 'two' can't be distinguished from member 'one' diff --git a/tests/qapi-schema/alternate-conflict-string.exit b/tests/qapi-schema/alternate-conflict-string.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/alternate-conflict-string.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/alternate-conflict-string.json b/tests/qapi-schema/alternate-conflict-string.json new file mode 100644 index 0000000000..72f04a820a --- /dev/null +++ b/tests/qapi-schema/alternate-conflict-string.json @@ -0,0 +1,6 @@ +# we reject alternates with multiple string-like branches +{ 'enum': 'Enum', + 'data': [ 'hello', 'world' ] } +{ 'alternate': 'Alt', + 'data': { 'one': 'str', + 'two': 'Enum' } } diff --git a/tests/qapi-schema/alternate-conflict-string.out b/tests/qapi-schema/alternate-conflict-string.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/alternate-conflict-string.out diff --git a/tests/qapi-schema/alternate-good.err b/tests/qapi-schema/alternate-good.err new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/alternate-good.err diff --git a/tests/qapi-schema/alternate-good.exit b/tests/qapi-schema/alternate-good.exit new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/tests/qapi-schema/alternate-good.exit @@ -0,0 +1 @@ +0 diff --git a/tests/qapi-schema/alternate-good.json b/tests/qapi-schema/alternate-good.json new file mode 100644 index 0000000000..33717704ce --- /dev/null +++ b/tests/qapi-schema/alternate-good.json @@ -0,0 +1,9 @@ +# Working example of alternate +{ 'struct': 'Data', + 'data': { '*number': 'int', '*name': 'str' } } +{ 'enum': 'Enum', + 'data': [ 'hello', 'world' ] } +{ 'alternate': 'Alt', + 'data': { 'value': 'int', + 'string': 'Enum', + 'struct': 'Data' } } diff --git a/tests/qapi-schema/alternate-good.out b/tests/qapi-schema/alternate-good.out new file mode 100644 index 0000000000..99848eefbb --- /dev/null +++ b/tests/qapi-schema/alternate-good.out @@ -0,0 +1,6 @@ +[OrderedDict([('struct', 'Data'), ('data', OrderedDict([('*number', 'int'), ('*name', 'str')]))]), + OrderedDict([('enum', 'Enum'), ('data', ['hello', 'world'])]), + OrderedDict([('alternate', 'Alt'), ('data', OrderedDict([('value', 'int'), ('string', 'Enum'), ('struct', 'Data')]))])] +[{'enum_name': 'Enum', 'enum_values': ['hello', 'world']}, + {'enum_name': 'AltKind', 'enum_values': None}] +[OrderedDict([('struct', 'Data'), ('data', OrderedDict([('*number', 'int'), ('*name', 'str')]))])] diff --git a/tests/qapi-schema/alternate-nested.err b/tests/qapi-schema/alternate-nested.err new file mode 100644 index 0000000000..4d1187e60e --- /dev/null +++ b/tests/qapi-schema/alternate-nested.err @@ -0,0 +1 @@ +tests/qapi-schema/alternate-nested.json:4: Member 'nested' of alternate 'Alt2' cannot use alternate type 'Alt1' diff --git a/tests/qapi-schema/alternate-nested.exit b/tests/qapi-schema/alternate-nested.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/alternate-nested.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/alternate-nested.json b/tests/qapi-schema/alternate-nested.json new file mode 100644 index 0000000000..c4233b9f33 --- /dev/null +++ b/tests/qapi-schema/alternate-nested.json @@ -0,0 +1,5 @@ +# we reject a nested alternate branch +{ 'alternate': 'Alt1', + 'data': { 'name': 'str', 'value': 'int' } } +{ 'alternate': 'Alt2', + 'data': { 'nested': 'Alt1' } } diff --git a/tests/qapi-schema/alternate-nested.out b/tests/qapi-schema/alternate-nested.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/alternate-nested.out diff --git a/tests/qapi-schema/alternate-unknown.err b/tests/qapi-schema/alternate-unknown.err new file mode 100644 index 0000000000..dea45dc730 --- /dev/null +++ b/tests/qapi-schema/alternate-unknown.err @@ -0,0 +1 @@ +tests/qapi-schema/alternate-unknown.json:2: Member 'unknown' of alternate 'Alt' uses unknown type 'MissingType' diff --git a/tests/qapi-schema/alternate-unknown.exit b/tests/qapi-schema/alternate-unknown.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/alternate-unknown.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/alternate-unknown.json b/tests/qapi-schema/alternate-unknown.json new file mode 100644 index 0000000000..ad5c103028 --- /dev/null +++ b/tests/qapi-schema/alternate-unknown.json @@ -0,0 +1,3 @@ +# we reject an alternate with unknown type in branch +{ 'alternate': 'Alt', + 'data': { 'unknown': 'MissingType' } } diff --git a/tests/qapi-schema/alternate-unknown.out b/tests/qapi-schema/alternate-unknown.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/alternate-unknown.out diff --git a/tests/qapi-schema/bad-base.err b/tests/qapi-schema/bad-base.err new file mode 100644 index 0000000000..154274bdd3 --- /dev/null +++ b/tests/qapi-schema/bad-base.err @@ -0,0 +1 @@ +tests/qapi-schema/bad-base.json:3: 'base' for struct 'MyType' cannot use union type 'Union' diff --git a/tests/qapi-schema/bad-base.exit b/tests/qapi-schema/bad-base.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/bad-base.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/bad-base.json b/tests/qapi-schema/bad-base.json new file mode 100644 index 0000000000..a634331cdd --- /dev/null +++ b/tests/qapi-schema/bad-base.json @@ -0,0 +1,3 @@ +# we reject a base that is not a struct +{ 'union': 'Union', 'data': { 'a': 'int', 'b': 'str' } } +{ 'struct': 'MyType', 'base': 'Union', 'data': { 'c': 'int' } } diff --git a/tests/qapi-schema/bad-base.out b/tests/qapi-schema/bad-base.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/bad-base.out diff --git a/tests/qapi-schema/bad-data.err b/tests/qapi-schema/bad-data.err new file mode 100644 index 0000000000..8523ac4f46 --- /dev/null +++ b/tests/qapi-schema/bad-data.err @@ -0,0 +1 @@ +tests/qapi-schema/bad-data.json:2: 'data' for command 'oops' cannot be an array diff --git a/tests/qapi-schema/bad-data.exit b/tests/qapi-schema/bad-data.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/bad-data.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/bad-data.json b/tests/qapi-schema/bad-data.json new file mode 100644 index 0000000000..832eeb76f4 --- /dev/null +++ b/tests/qapi-schema/bad-data.json @@ -0,0 +1,2 @@ +# we ensure 'data' is a dictionary for all but enums +{ 'command': 'oops', 'data': [ ] } diff --git a/tests/qapi-schema/bad-data.out b/tests/qapi-schema/bad-data.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/bad-data.out diff --git a/tests/qapi-schema/bad-ident.err b/tests/qapi-schema/bad-ident.err new file mode 100644 index 0000000000..c4190602b5 --- /dev/null +++ b/tests/qapi-schema/bad-ident.err @@ -0,0 +1 @@ +tests/qapi-schema/bad-ident.json:2: 'struct' does not allow optional name '*oops' diff --git a/tests/qapi-schema/bad-ident.exit b/tests/qapi-schema/bad-ident.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/bad-ident.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/bad-ident.json b/tests/qapi-schema/bad-ident.json new file mode 100644 index 0000000000..763627ad23 --- /dev/null +++ b/tests/qapi-schema/bad-ident.json @@ -0,0 +1,2 @@ +# we reject creating a type name with bad name +{ 'struct': '*oops', 'data': { 'i': 'int' } } diff --git a/tests/qapi-schema/bad-ident.out b/tests/qapi-schema/bad-ident.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/bad-ident.out diff --git a/tests/qapi-schema/bad-type-bool.err b/tests/qapi-schema/bad-type-bool.err new file mode 100644 index 0000000000..62fd70baaf --- /dev/null +++ b/tests/qapi-schema/bad-type-bool.err @@ -0,0 +1 @@ +tests/qapi-schema/bad-type-bool.json:2: 'struct' key must have a string value diff --git a/tests/qapi-schema/bad-type-bool.exit b/tests/qapi-schema/bad-type-bool.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/bad-type-bool.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/bad-type-bool.json b/tests/qapi-schema/bad-type-bool.json new file mode 100644 index 0000000000..bde17b56c4 --- /dev/null +++ b/tests/qapi-schema/bad-type-bool.json @@ -0,0 +1,2 @@ +# we reject an expression with a metatype that is not a string +{ 'struct': true, 'data': { } } diff --git a/tests/qapi-schema/bad-type-bool.out b/tests/qapi-schema/bad-type-bool.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/bad-type-bool.out diff --git a/tests/qapi-schema/bad-type-dict.err b/tests/qapi-schema/bad-type-dict.err new file mode 100644 index 0000000000..0b2a2aeac4 --- /dev/null +++ b/tests/qapi-schema/bad-type-dict.err @@ -0,0 +1 @@ +tests/qapi-schema/bad-type-dict.json:2: 'command' key must have a string value diff --git a/tests/qapi-schema/bad-type-dict.exit b/tests/qapi-schema/bad-type-dict.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/bad-type-dict.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/bad-type-dict.json b/tests/qapi-schema/bad-type-dict.json new file mode 100644 index 0000000000..2a91b241f8 --- /dev/null +++ b/tests/qapi-schema/bad-type-dict.json @@ -0,0 +1,2 @@ +# we reject an expression with a metatype that is not a string +{ 'command': { } } diff --git a/tests/qapi-schema/bad-type-dict.out b/tests/qapi-schema/bad-type-dict.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/bad-type-dict.out diff --git a/tests/qapi-schema/bad-type-int.err b/tests/qapi-schema/bad-type-int.err new file mode 100644 index 0000000000..da89895404 --- /dev/null +++ b/tests/qapi-schema/bad-type-int.err @@ -0,0 +1 @@ +tests/qapi-schema/bad-type-int.json:3:13: Stray "1" diff --git a/tests/qapi-schema/bad-type-int.exit b/tests/qapi-schema/bad-type-int.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/bad-type-int.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/bad-type-int.json b/tests/qapi-schema/bad-type-int.json new file mode 100644 index 0000000000..56fc6f8126 --- /dev/null +++ b/tests/qapi-schema/bad-type-int.json @@ -0,0 +1,3 @@ +# we reject an expression with a metatype that is not a string +# FIXME: once the parser understands integer inputs, improve the error message +{ 'struct': 1, 'data': { } } diff --git a/tests/qapi-schema/bad-type-int.out b/tests/qapi-schema/bad-type-int.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/bad-type-int.out diff --git a/tests/qapi-schema/command-int.err b/tests/qapi-schema/command-int.err new file mode 100644 index 0000000000..0f9300679b --- /dev/null +++ b/tests/qapi-schema/command-int.err @@ -0,0 +1 @@ +tests/qapi-schema/command-int.json:2: built-in 'int' is already defined diff --git a/tests/qapi-schema/command-int.exit b/tests/qapi-schema/command-int.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/command-int.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/command-int.json b/tests/qapi-schema/command-int.json new file mode 100644 index 0000000000..c90d408abe --- /dev/null +++ b/tests/qapi-schema/command-int.json @@ -0,0 +1,3 @@ +# we reject collisions between commands and types +{ 'command': 'int', 'data': { 'character': 'str' }, + 'returns': { 'value': 'int' } } diff --git a/tests/qapi-schema/command-int.out b/tests/qapi-schema/command-int.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/command-int.out diff --git a/tests/qapi-schema/data-array-empty.err b/tests/qapi-schema/data-array-empty.err new file mode 100644 index 0000000000..f713f14893 --- /dev/null +++ b/tests/qapi-schema/data-array-empty.err @@ -0,0 +1 @@ +tests/qapi-schema/data-array-empty.json:2: Member 'empty' of 'data' for command 'oops': array type must contain single type name diff --git a/tests/qapi-schema/data-array-empty.exit b/tests/qapi-schema/data-array-empty.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/data-array-empty.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/data-array-empty.json b/tests/qapi-schema/data-array-empty.json new file mode 100644 index 0000000000..652dcfb24a --- /dev/null +++ b/tests/qapi-schema/data-array-empty.json @@ -0,0 +1,2 @@ +# we reject an array for data if it does not contain a known type +{ 'command': 'oops', 'data': { 'empty': [ ] } } diff --git a/tests/qapi-schema/data-array-empty.out b/tests/qapi-schema/data-array-empty.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/data-array-empty.out diff --git a/tests/qapi-schema/data-array-unknown.err b/tests/qapi-schema/data-array-unknown.err new file mode 100644 index 0000000000..8b731bbcc8 --- /dev/null +++ b/tests/qapi-schema/data-array-unknown.err @@ -0,0 +1 @@ +tests/qapi-schema/data-array-unknown.json:2: Member 'array' of 'data' for command 'oops' uses unknown type 'array of NoSuchType' diff --git a/tests/qapi-schema/data-array-unknown.exit b/tests/qapi-schema/data-array-unknown.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/data-array-unknown.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/data-array-unknown.json b/tests/qapi-schema/data-array-unknown.json new file mode 100644 index 0000000000..6f3e883315 --- /dev/null +++ b/tests/qapi-schema/data-array-unknown.json @@ -0,0 +1,2 @@ +# we reject an array for data if it does not contain a known type +{ 'command': 'oops', 'data': { 'array': [ 'NoSuchType' ] } } diff --git a/tests/qapi-schema/data-array-unknown.out b/tests/qapi-schema/data-array-unknown.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/data-array-unknown.out diff --git a/tests/qapi-schema/data-int.err b/tests/qapi-schema/data-int.err new file mode 100644 index 0000000000..1a9b077c06 --- /dev/null +++ b/tests/qapi-schema/data-int.err @@ -0,0 +1 @@ +tests/qapi-schema/data-int.json:2: 'data' for command 'oops' cannot use built-in type 'int' diff --git a/tests/qapi-schema/data-int.exit b/tests/qapi-schema/data-int.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/data-int.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/data-int.json b/tests/qapi-schema/data-int.json new file mode 100644 index 0000000000..a334d92e8c --- /dev/null +++ b/tests/qapi-schema/data-int.json @@ -0,0 +1,2 @@ +# we reject commands where data is not an array or complex type +{ 'command': 'oops', 'data': 'int' } diff --git a/tests/qapi-schema/data-int.out b/tests/qapi-schema/data-int.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/data-int.out diff --git a/tests/qapi-schema/data-member-array-bad.err b/tests/qapi-schema/data-member-array-bad.err new file mode 100644 index 0000000000..2c072d5986 --- /dev/null +++ b/tests/qapi-schema/data-member-array-bad.err @@ -0,0 +1 @@ +tests/qapi-schema/data-member-array-bad.json:2: Member 'member' of 'data' for command 'oops': array type must contain single type name diff --git a/tests/qapi-schema/data-member-array-bad.exit b/tests/qapi-schema/data-member-array-bad.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/data-member-array-bad.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/data-member-array-bad.json b/tests/qapi-schema/data-member-array-bad.json new file mode 100644 index 0000000000..b2ff144ec6 --- /dev/null +++ b/tests/qapi-schema/data-member-array-bad.json @@ -0,0 +1,2 @@ +# we reject data if it does not contain a valid array type +{ 'command': 'oops', 'data': { 'member': [ { 'nested': 'str' } ] } } diff --git a/tests/qapi-schema/data-member-array-bad.out b/tests/qapi-schema/data-member-array-bad.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/data-member-array-bad.out diff --git a/tests/qapi-schema/data-member-array.err b/tests/qapi-schema/data-member-array.err new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/data-member-array.err diff --git a/tests/qapi-schema/data-member-array.exit b/tests/qapi-schema/data-member-array.exit new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/tests/qapi-schema/data-member-array.exit @@ -0,0 +1 @@ +0 diff --git a/tests/qapi-schema/data-member-array.json b/tests/qapi-schema/data-member-array.json new file mode 100644 index 0000000000..e6f7f5da13 --- /dev/null +++ b/tests/qapi-schema/data-member-array.json @@ -0,0 +1,4 @@ +# valid array members +{ 'enum': 'abc', 'data': [ 'a', 'b', 'c' ] } +{ 'struct': 'def', 'data': { 'array': [ 'abc' ] } } +{ 'command': 'okay', 'data': { 'member1': [ 'int' ], 'member2': [ 'def' ] } } diff --git a/tests/qapi-schema/data-member-array.out b/tests/qapi-schema/data-member-array.out new file mode 100644 index 0000000000..c39fa25484 --- /dev/null +++ b/tests/qapi-schema/data-member-array.out @@ -0,0 +1,5 @@ +[OrderedDict([('enum', 'abc'), ('data', ['a', 'b', 'c'])]), + OrderedDict([('struct', 'def'), ('data', OrderedDict([('array', ['abc'])]))]), + OrderedDict([('command', 'okay'), ('data', OrderedDict([('member1', ['int']), ('member2', ['def'])]))])] +[{'enum_name': 'abc', 'enum_values': ['a', 'b', 'c']}] +[OrderedDict([('struct', 'def'), ('data', OrderedDict([('array', ['abc'])]))])] diff --git a/tests/qapi-schema/data-member-unknown.err b/tests/qapi-schema/data-member-unknown.err new file mode 100644 index 0000000000..ab905db802 --- /dev/null +++ b/tests/qapi-schema/data-member-unknown.err @@ -0,0 +1 @@ +tests/qapi-schema/data-member-unknown.json:2: Member 'member' of 'data' for command 'oops' uses unknown type 'NoSuchType' diff --git a/tests/qapi-schema/data-member-unknown.exit b/tests/qapi-schema/data-member-unknown.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/data-member-unknown.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/data-member-unknown.json b/tests/qapi-schema/data-member-unknown.json new file mode 100644 index 0000000000..342a41ec90 --- /dev/null +++ b/tests/qapi-schema/data-member-unknown.json @@ -0,0 +1,2 @@ +# we reject data if it does not contain a known type +{ 'command': 'oops', 'data': { 'member': 'NoSuchType' } } diff --git a/tests/qapi-schema/data-member-unknown.out b/tests/qapi-schema/data-member-unknown.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/data-member-unknown.out diff --git a/tests/qapi-schema/data-unknown.err b/tests/qapi-schema/data-unknown.err new file mode 100644 index 0000000000..5b07277a95 --- /dev/null +++ b/tests/qapi-schema/data-unknown.err @@ -0,0 +1 @@ +tests/qapi-schema/data-unknown.json:2: 'data' for command 'oops' uses unknown type 'NoSuchType' diff --git a/tests/qapi-schema/data-unknown.exit b/tests/qapi-schema/data-unknown.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/data-unknown.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/data-unknown.json b/tests/qapi-schema/data-unknown.json new file mode 100644 index 0000000000..32aba43b3f --- /dev/null +++ b/tests/qapi-schema/data-unknown.json @@ -0,0 +1,2 @@ +# we reject data if it does not contain a known type +{ 'command': 'oops', 'data': 'NoSuchType' } diff --git a/tests/qapi-schema/data-unknown.out b/tests/qapi-schema/data-unknown.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/data-unknown.out diff --git a/tests/qapi-schema/double-data.err b/tests/qapi-schema/double-data.err new file mode 100644 index 0000000000..cc765c4ff2 --- /dev/null +++ b/tests/qapi-schema/double-data.err @@ -0,0 +1 @@ +tests/qapi-schema/double-data.json:2:41: Duplicate key "data" diff --git a/tests/qapi-schema/double-data.exit b/tests/qapi-schema/double-data.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/double-data.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/double-data.json b/tests/qapi-schema/double-data.json new file mode 100644 index 0000000000..e76b519538 --- /dev/null +++ b/tests/qapi-schema/double-data.json @@ -0,0 +1,2 @@ +# we reject an expression with duplicate top-level keys +{ 'struct': 'bar', 'data': { }, 'data': { 'string': 'str'} } diff --git a/tests/qapi-schema/double-data.out b/tests/qapi-schema/double-data.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/double-data.out diff --git a/tests/qapi-schema/double-type.err b/tests/qapi-schema/double-type.err new file mode 100644 index 0000000000..f9613c6d6b --- /dev/null +++ b/tests/qapi-schema/double-type.err @@ -0,0 +1 @@ +tests/qapi-schema/double-type.json:2: Unknown key 'command' in struct 'bar' diff --git a/tests/qapi-schema/double-type.exit b/tests/qapi-schema/double-type.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/double-type.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/double-type.json b/tests/qapi-schema/double-type.json new file mode 100644 index 0000000000..911fa7af50 --- /dev/null +++ b/tests/qapi-schema/double-type.json @@ -0,0 +1,2 @@ +# we reject an expression with ambiguous metatype +{ 'command': 'foo', 'struct': 'bar', 'data': { } } diff --git a/tests/qapi-schema/double-type.out b/tests/qapi-schema/double-type.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/double-type.out diff --git a/tests/qapi-schema/enum-bad-name.err b/tests/qapi-schema/enum-bad-name.err new file mode 100644 index 0000000000..9c3c1002b7 --- /dev/null +++ b/tests/qapi-schema/enum-bad-name.err @@ -0,0 +1 @@ +tests/qapi-schema/enum-bad-name.json:2: Member of enum 'MyEnum' uses invalid name 'not^possible' diff --git a/tests/qapi-schema/enum-bad-name.exit b/tests/qapi-schema/enum-bad-name.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/enum-bad-name.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/enum-bad-name.json b/tests/qapi-schema/enum-bad-name.json new file mode 100644 index 0000000000..8506562b31 --- /dev/null +++ b/tests/qapi-schema/enum-bad-name.json @@ -0,0 +1,2 @@ +# we ensure all enum names can map to C +{ 'enum': 'MyEnum', 'data': [ 'not^possible' ] } diff --git a/tests/qapi-schema/enum-bad-name.out b/tests/qapi-schema/enum-bad-name.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/enum-bad-name.out diff --git a/tests/qapi-schema/enum-clash-member.err b/tests/qapi-schema/enum-clash-member.err new file mode 100644 index 0000000000..48bd1360e7 --- /dev/null +++ b/tests/qapi-schema/enum-clash-member.err @@ -0,0 +1 @@ +tests/qapi-schema/enum-clash-member.json:2: Enum 'MyEnum' member 'ONE' clashes with 'one' diff --git a/tests/qapi-schema/enum-clash-member.exit b/tests/qapi-schema/enum-clash-member.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/enum-clash-member.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/enum-clash-member.json b/tests/qapi-schema/enum-clash-member.json new file mode 100644 index 0000000000..b7dc02a28d --- /dev/null +++ b/tests/qapi-schema/enum-clash-member.json @@ -0,0 +1,2 @@ +# we reject enums where members will clash when mapped to C enum +{ 'enum': 'MyEnum', 'data': [ 'one', 'ONE' ] } diff --git a/tests/qapi-schema/enum-clash-member.out b/tests/qapi-schema/enum-clash-member.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/enum-clash-member.out diff --git a/tests/qapi-schema/enum-dict-member.err b/tests/qapi-schema/enum-dict-member.err new file mode 100644 index 0000000000..8ca146ea59 --- /dev/null +++ b/tests/qapi-schema/enum-dict-member.err @@ -0,0 +1 @@ +tests/qapi-schema/enum-dict-member.json:2: Member of enum 'MyEnum' requires a string name diff --git a/tests/qapi-schema/enum-dict-member.exit b/tests/qapi-schema/enum-dict-member.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/enum-dict-member.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/enum-dict-member.json b/tests/qapi-schema/enum-dict-member.json new file mode 100644 index 0000000000..79672e0f09 --- /dev/null +++ b/tests/qapi-schema/enum-dict-member.json @@ -0,0 +1,2 @@ +# we reject any enum member that is not a string +{ 'enum': 'MyEnum', 'data': [ { 'value': 'str' } ] } diff --git a/tests/qapi-schema/enum-dict-member.out b/tests/qapi-schema/enum-dict-member.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/enum-dict-member.out diff --git a/tests/qapi-schema/enum-empty.err b/tests/qapi-schema/enum-empty.err new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/enum-empty.err diff --git a/tests/qapi-schema/enum-empty.exit b/tests/qapi-schema/enum-empty.exit new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/tests/qapi-schema/enum-empty.exit @@ -0,0 +1 @@ +0 diff --git a/tests/qapi-schema/enum-empty.json b/tests/qapi-schema/enum-empty.json new file mode 100644 index 0000000000..40d4e85a2f --- /dev/null +++ b/tests/qapi-schema/enum-empty.json @@ -0,0 +1,2 @@ +# An empty enum, although unusual, is currently acceptable +{ 'enum': 'MyEnum', 'data': [ ] } diff --git a/tests/qapi-schema/enum-empty.out b/tests/qapi-schema/enum-empty.out new file mode 100644 index 0000000000..3b75c1613c --- /dev/null +++ b/tests/qapi-schema/enum-empty.out @@ -0,0 +1,3 @@ +[OrderedDict([('enum', 'MyEnum'), ('data', [])])] +[{'enum_name': 'MyEnum', 'enum_values': []}] +[] diff --git a/tests/qapi-schema/enum-int-member.err b/tests/qapi-schema/enum-int-member.err new file mode 100644 index 0000000000..071c5213d8 --- /dev/null +++ b/tests/qapi-schema/enum-int-member.err @@ -0,0 +1 @@ +tests/qapi-schema/enum-int-member.json:3:31: Stray "1" diff --git a/tests/qapi-schema/enum-int-member.exit b/tests/qapi-schema/enum-int-member.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/enum-int-member.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/enum-int-member.json b/tests/qapi-schema/enum-int-member.json new file mode 100644 index 0000000000..6c9c32e149 --- /dev/null +++ b/tests/qapi-schema/enum-int-member.json @@ -0,0 +1,3 @@ +# we reject any enum member that is not a string +# FIXME: once the parser understands integer inputs, improve the error message +{ 'enum': 'MyEnum', 'data': [ 1 ] } diff --git a/tests/qapi-schema/enum-int-member.out b/tests/qapi-schema/enum-int-member.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/enum-int-member.out diff --git a/tests/qapi-schema/enum-max-member.err b/tests/qapi-schema/enum-max-member.err new file mode 100644 index 0000000000..f77837fb45 --- /dev/null +++ b/tests/qapi-schema/enum-max-member.err @@ -0,0 +1 @@ +tests/qapi-schema/enum-max-member.json:3: Enum 'MyEnum' member 'max' clashes with '(automatic)' diff --git a/tests/qapi-schema/enum-max-member.exit b/tests/qapi-schema/enum-max-member.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/enum-max-member.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/enum-max-member.json b/tests/qapi-schema/enum-max-member.json new file mode 100644 index 0000000000..4bcda0bf07 --- /dev/null +++ b/tests/qapi-schema/enum-max-member.json @@ -0,0 +1,3 @@ +# we reject user-supplied 'max' for clashing with implicit enum end +# TODO: should we instead munge the implicit value to avoid the clash? +{ 'enum': 'MyEnum', 'data': [ 'max' ] } diff --git a/tests/qapi-schema/enum-max-member.out b/tests/qapi-schema/enum-max-member.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/enum-max-member.out diff --git a/tests/qapi-schema/enum-missing-data.err b/tests/qapi-schema/enum-missing-data.err new file mode 100644 index 0000000000..ba4873ae69 --- /dev/null +++ b/tests/qapi-schema/enum-missing-data.err @@ -0,0 +1 @@ +tests/qapi-schema/enum-missing-data.json:2: Key 'data' is missing from enum 'MyEnum' diff --git a/tests/qapi-schema/enum-missing-data.exit b/tests/qapi-schema/enum-missing-data.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/enum-missing-data.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/enum-missing-data.json b/tests/qapi-schema/enum-missing-data.json new file mode 100644 index 0000000000..558fd35e93 --- /dev/null +++ b/tests/qapi-schema/enum-missing-data.json @@ -0,0 +1,2 @@ +# we require that all QAPI enums have a data array +{ 'enum': 'MyEnum' } diff --git a/tests/qapi-schema/enum-missing-data.out b/tests/qapi-schema/enum-missing-data.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/enum-missing-data.out diff --git a/tests/qapi-schema/enum-union-clash.err b/tests/qapi-schema/enum-union-clash.err new file mode 100644 index 0000000000..c04e1a8064 --- /dev/null +++ b/tests/qapi-schema/enum-union-clash.err @@ -0,0 +1 @@ +tests/qapi-schema/enum-union-clash.json:2: enum 'UnionKind' should not end in 'Kind' diff --git a/tests/qapi-schema/enum-union-clash.exit b/tests/qapi-schema/enum-union-clash.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/enum-union-clash.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/enum-union-clash.json b/tests/qapi-schema/enum-union-clash.json new file mode 100644 index 0000000000..593282b6cf --- /dev/null +++ b/tests/qapi-schema/enum-union-clash.json @@ -0,0 +1,4 @@ +# we reject types that would conflict with implicit union enum +{ 'enum': 'UnionKind', 'data': [ 'oops' ] } +{ 'union': 'Union', + 'data': { 'a': 'int' } } diff --git a/tests/qapi-schema/enum-union-clash.out b/tests/qapi-schema/enum-union-clash.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/enum-union-clash.out diff --git a/tests/qapi-schema/enum-wrong-data.err b/tests/qapi-schema/enum-wrong-data.err new file mode 100644 index 0000000000..11b43471cf --- /dev/null +++ b/tests/qapi-schema/enum-wrong-data.err @@ -0,0 +1 @@ +tests/qapi-schema/enum-wrong-data.json:2: Enum 'MyEnum' requires an array for 'data' diff --git a/tests/qapi-schema/enum-wrong-data.exit b/tests/qapi-schema/enum-wrong-data.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/enum-wrong-data.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/enum-wrong-data.json b/tests/qapi-schema/enum-wrong-data.json new file mode 100644 index 0000000000..7b3e255c14 --- /dev/null +++ b/tests/qapi-schema/enum-wrong-data.json @@ -0,0 +1,2 @@ +# we require that all qapi enums have an array for data +{ 'enum': 'MyEnum', 'data': { 'value': 'str' } } diff --git a/tests/qapi-schema/enum-wrong-data.out b/tests/qapi-schema/enum-wrong-data.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/enum-wrong-data.out diff --git a/tests/qapi-schema/escape-outside-string.err b/tests/qapi-schema/escape-outside-string.err new file mode 100644 index 0000000000..b9b8837fd2 --- /dev/null +++ b/tests/qapi-schema/escape-outside-string.err @@ -0,0 +1 @@ +tests/qapi-schema/escape-outside-string.json:3:27: Stray "\" diff --git a/tests/qapi-schema/escape-outside-string.exit b/tests/qapi-schema/escape-outside-string.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/escape-outside-string.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/escape-outside-string.json b/tests/qapi-schema/escape-outside-string.json new file mode 100644 index 0000000000..482f79554b --- /dev/null +++ b/tests/qapi-schema/escape-outside-string.json @@ -0,0 +1,3 @@ +# escape sequences are permitted only inside strings +# { 'command': 'foo', 'data': {} } +{ 'command': 'foo', 'data'\u003a{} } diff --git a/tests/qapi-schema/escape-outside-string.out b/tests/qapi-schema/escape-outside-string.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/escape-outside-string.out diff --git a/tests/qapi-schema/escape-too-big.err b/tests/qapi-schema/escape-too-big.err new file mode 100644 index 0000000000..d9aeb5dc38 --- /dev/null +++ b/tests/qapi-schema/escape-too-big.err @@ -0,0 +1 @@ +tests/qapi-schema/escape-too-big.json:3:14: For now, \u escape only supports non-zero values up to \u007f diff --git a/tests/qapi-schema/escape-too-big.exit b/tests/qapi-schema/escape-too-big.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/escape-too-big.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/escape-too-big.json b/tests/qapi-schema/escape-too-big.json new file mode 100644 index 0000000000..62bcecd557 --- /dev/null +++ b/tests/qapi-schema/escape-too-big.json @@ -0,0 +1,3 @@ +# we don't support full Unicode strings, yet +# { 'command': 'é' } +{ 'command': '\u00e9' } diff --git a/tests/qapi-schema/escape-too-big.out b/tests/qapi-schema/escape-too-big.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/escape-too-big.out diff --git a/tests/qapi-schema/escape-too-short.err b/tests/qapi-schema/escape-too-short.err new file mode 100644 index 0000000000..934de598ee --- /dev/null +++ b/tests/qapi-schema/escape-too-short.err @@ -0,0 +1 @@ +tests/qapi-schema/escape-too-short.json:3:14: \u escape needs 4 hex digits diff --git a/tests/qapi-schema/escape-too-short.exit b/tests/qapi-schema/escape-too-short.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/escape-too-short.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/escape-too-short.json b/tests/qapi-schema/escape-too-short.json new file mode 100644 index 0000000000..6cb1dec8f7 --- /dev/null +++ b/tests/qapi-schema/escape-too-short.json @@ -0,0 +1,3 @@ +# the \u escape requires 4 hex digits +# { 'command': 'a' } +{ 'command': '\u61' } diff --git a/tests/qapi-schema/escape-too-short.out b/tests/qapi-schema/escape-too-short.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/escape-too-short.out diff --git a/tests/qapi-schema/event-case.err b/tests/qapi-schema/event-case.err new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/event-case.err diff --git a/tests/qapi-schema/event-case.exit b/tests/qapi-schema/event-case.exit new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/tests/qapi-schema/event-case.exit @@ -0,0 +1 @@ +0 diff --git a/tests/qapi-schema/event-case.json b/tests/qapi-schema/event-case.json new file mode 100644 index 0000000000..3a92d8b610 --- /dev/null +++ b/tests/qapi-schema/event-case.json @@ -0,0 +1,3 @@ +# TODO: might be nice to enforce naming conventions; but until then this works +# even though events should usually be ALL_CAPS +{ 'event': 'oops' } diff --git a/tests/qapi-schema/event-case.out b/tests/qapi-schema/event-case.out new file mode 100644 index 0000000000..3764bc781d --- /dev/null +++ b/tests/qapi-schema/event-case.out @@ -0,0 +1,3 @@ +[OrderedDict([('event', 'oops')])] +[] +[] diff --git a/tests/qapi-schema/event-max.err b/tests/qapi-schema/event-max.err new file mode 100644 index 0000000000..c856534379 --- /dev/null +++ b/tests/qapi-schema/event-max.err @@ -0,0 +1 @@ +tests/qapi-schema/event-max.json:2: Event name 'MAX' cannot be created diff --git a/tests/qapi-schema/event-max.exit b/tests/qapi-schema/event-max.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/event-max.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/event-max.json b/tests/qapi-schema/event-max.json new file mode 100644 index 0000000000..f3d7de2a30 --- /dev/null +++ b/tests/qapi-schema/event-max.json @@ -0,0 +1,2 @@ +# an event named 'MAX' would conflict with implicit C enum +{ 'event': 'MAX' } diff --git a/tests/qapi-schema/event-max.out b/tests/qapi-schema/event-max.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/event-max.out diff --git a/tests/qapi-schema/event-nest-struct.err b/tests/qapi-schema/event-nest-struct.err index 91bde1c967..5a42701b8f 100644 --- a/tests/qapi-schema/event-nest-struct.err +++ b/tests/qapi-schema/event-nest-struct.err @@ -1 +1 @@ -tests/qapi-schema/event-nest-struct.json:1: Nested structure define in event is not supported, event 'EVENT_A', argname 'a' +tests/qapi-schema/event-nest-struct.json:1: Member 'a' of 'data' for event 'EVENT_A' should be a type name diff --git a/tests/qapi-schema/flat-union-bad-base.err b/tests/qapi-schema/flat-union-bad-base.err new file mode 100644 index 0000000000..f9c31b2bf5 --- /dev/null +++ b/tests/qapi-schema/flat-union-bad-base.err @@ -0,0 +1 @@ +tests/qapi-schema/flat-union-bad-base.json:9: Flat union 'TestUnion' must have a string base field diff --git a/tests/qapi-schema/flat-union-bad-base.exit b/tests/qapi-schema/flat-union-bad-base.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/flat-union-bad-base.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/flat-union-bad-base.json b/tests/qapi-schema/flat-union-bad-base.json new file mode 100644 index 0000000000..e2e622bb6e --- /dev/null +++ b/tests/qapi-schema/flat-union-bad-base.json @@ -0,0 +1,13 @@ +# we require the base to be an existing struct +# TODO: should we allow an anonymous inline base type? +{ 'enum': 'TestEnum', + 'data': [ 'value1', 'value2' ] } +{ 'struct': 'TestTypeA', + 'data': { 'string': 'str' } } +{ 'struct': 'TestTypeB', + 'data': { 'integer': 'int' } } +{ 'union': 'TestUnion', + 'base': { 'enum1': 'TestEnum', 'kind': 'str' }, + 'discriminator': 'enum1', + 'data': { 'value1': 'TestTypeA', + 'value2': 'TestTypeB' } } diff --git a/tests/qapi-schema/flat-union-bad-base.out b/tests/qapi-schema/flat-union-bad-base.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/flat-union-bad-base.out diff --git a/tests/qapi-schema/flat-union-bad-discriminator.err b/tests/qapi-schema/flat-union-bad-discriminator.err new file mode 100644 index 0000000000..c38cc8e4df --- /dev/null +++ b/tests/qapi-schema/flat-union-bad-discriminator.err @@ -0,0 +1 @@ +tests/qapi-schema/flat-union-bad-discriminator.json:11: Discriminator of flat union 'TestUnion' requires a string name diff --git a/tests/qapi-schema/flat-union-bad-discriminator.exit b/tests/qapi-schema/flat-union-bad-discriminator.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/flat-union-bad-discriminator.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/flat-union-bad-discriminator.json b/tests/qapi-schema/flat-union-bad-discriminator.json new file mode 100644 index 0000000000..cd10b9d901 --- /dev/null +++ b/tests/qapi-schema/flat-union-bad-discriminator.json @@ -0,0 +1,15 @@ +# we require the discriminator to be a string naming a base-type member +# this tests the old syntax for anonymous unions before we added alternates +{ 'enum': 'TestEnum', + 'data': [ 'value1', 'value2' ] } +{ 'struct': 'TestBase', + 'data': { 'enum1': 'TestEnum', 'kind': 'str' } } +{ 'struct': 'TestTypeA', + 'data': { 'string': 'str' } } +{ 'struct': 'TestTypeB', + 'data': { 'integer': 'int' } } +{ 'union': 'TestUnion', + 'base': 'TestBase', + 'discriminator': {}, + 'data': { 'kind1': 'TestTypeA', + 'kind2': 'TestTypeB' } } diff --git a/tests/qapi-schema/flat-union-bad-discriminator.out b/tests/qapi-schema/flat-union-bad-discriminator.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/flat-union-bad-discriminator.out diff --git a/tests/qapi-schema/flat-union-base-star.err b/tests/qapi-schema/flat-union-base-star.err new file mode 100644 index 0000000000..b7748f08bf --- /dev/null +++ b/tests/qapi-schema/flat-union-base-star.err @@ -0,0 +1 @@ +tests/qapi-schema/flat-union-base-star.json:8: Base '**' is not a valid struct diff --git a/tests/qapi-schema/flat-union-base-star.exit b/tests/qapi-schema/flat-union-base-star.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/flat-union-base-star.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/flat-union-base-star.json b/tests/qapi-schema/flat-union-base-star.json new file mode 100644 index 0000000000..5099439a9d --- /dev/null +++ b/tests/qapi-schema/flat-union-base-star.json @@ -0,0 +1,12 @@ +# we require the base to be an existing struct +{ 'enum': 'TestEnum', + 'data': [ 'value1', 'value2' ] } +{ 'struct': 'TestTypeA', + 'data': { 'string': 'str' } } +{ 'struct': 'TestTypeB', + 'data': { 'integer': 'int' } } +{ 'union': 'TestUnion', + 'base': '**', + 'discriminator': 'enum1', + 'data': { 'value1': 'TestTypeA', + 'value2': 'TestTypeB' } } diff --git a/tests/qapi-schema/flat-union-base-star.out b/tests/qapi-schema/flat-union-base-star.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/flat-union-base-star.out diff --git a/tests/qapi-schema/flat-union-base-union.err b/tests/qapi-schema/flat-union-base-union.err new file mode 100644 index 0000000000..ede9859a39 --- /dev/null +++ b/tests/qapi-schema/flat-union-base-union.err @@ -0,0 +1 @@ +tests/qapi-schema/flat-union-base-union.json:11: Base 'UnionBase' is not a valid struct diff --git a/tests/qapi-schema/flat-union-base-union.exit b/tests/qapi-schema/flat-union-base-union.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/flat-union-base-union.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/flat-union-base-union.json b/tests/qapi-schema/flat-union-base-union.json new file mode 100644 index 0000000000..6a8ea687a9 --- /dev/null +++ b/tests/qapi-schema/flat-union-base-union.json @@ -0,0 +1,15 @@ +# we require the base to be a struct +{ 'enum': 'TestEnum', + 'data': [ 'value1', 'value2' ] } +{ 'struct': 'TestTypeA', + 'data': { 'string': 'str' } } +{ 'struct': 'TestTypeB', + 'data': { 'integer': 'int' } } +{ 'union': 'UnionBase', + 'data': { 'kind1': 'TestTypeA', + 'kind2': 'TestTypeB' } } +{ 'union': 'TestUnion', + 'base': 'UnionBase', + 'discriminator': 'type', + 'data': { 'kind1': 'TestTypeA', + 'kind2': 'TestTypeB' } } diff --git a/tests/qapi-schema/flat-union-base-union.out b/tests/qapi-schema/flat-union-base-union.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/flat-union-base-union.out diff --git a/tests/qapi-schema/flat-union-branch-clash.err b/tests/qapi-schema/flat-union-branch-clash.err new file mode 100644 index 0000000000..f11276688c --- /dev/null +++ b/tests/qapi-schema/flat-union-branch-clash.err @@ -0,0 +1 @@ +tests/qapi-schema/flat-union-branch-clash.json:10: Member name 'name' of branch 'value1' clashes with base 'Base' diff --git a/tests/qapi-schema/flat-union-branch-clash.exit b/tests/qapi-schema/flat-union-branch-clash.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/flat-union-branch-clash.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/flat-union-branch-clash.json b/tests/qapi-schema/flat-union-branch-clash.json new file mode 100644 index 0000000000..8fb054f004 --- /dev/null +++ b/tests/qapi-schema/flat-union-branch-clash.json @@ -0,0 +1,14 @@ +# we check for no duplicate keys between branches and base +{ 'enum': 'TestEnum', + 'data': [ 'value1', 'value2' ] } +{ 'struct': 'Base', + 'data': { 'enum1': 'TestEnum', '*name': 'str' } } +{ 'struct': 'Branch1', + 'data': { 'name': 'str' } } +{ 'struct': 'Branch2', + 'data': { 'value': 'int' } } +{ 'union': 'TestUnion', + 'base': 'Base', + 'discriminator': 'enum1', + 'data': { 'value1': 'Branch1', + 'value2': 'Branch2' } } diff --git a/tests/qapi-schema/flat-union-branch-clash.out b/tests/qapi-schema/flat-union-branch-clash.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/flat-union-branch-clash.out diff --git a/tests/qapi-schema/flat-union-inline.err b/tests/qapi-schema/flat-union-inline.err new file mode 100644 index 0000000000..ec586277b7 --- /dev/null +++ b/tests/qapi-schema/flat-union-inline.err @@ -0,0 +1 @@ +tests/qapi-schema/flat-union-inline.json:7: Flat union 'TestUnion' must have a string base field diff --git a/tests/qapi-schema/flat-union-inline.exit b/tests/qapi-schema/flat-union-inline.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/flat-union-inline.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/flat-union-inline.json b/tests/qapi-schema/flat-union-inline.json new file mode 100644 index 0000000000..6bfdd65811 --- /dev/null +++ b/tests/qapi-schema/flat-union-inline.json @@ -0,0 +1,11 @@ +# we require branches to be a struct name +# TODO: should we allow anonymous inline types? +{ 'enum': 'TestEnum', + 'data': [ 'value1', 'value2' ] } +{ 'struct': 'Base', + 'data': { 'enum1': 'TestEnum', 'kind': 'str' } } +{ 'union': 'TestUnion', + 'base': { 'enum1': 'TestEnum', 'kind': 'str' }, + 'discriminator': 'enum1', + 'data': { 'value1': { 'string': 'str' }, + 'value2': { 'integer': 'int' } } } diff --git a/tests/qapi-schema/flat-union-inline.out b/tests/qapi-schema/flat-union-inline.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/flat-union-inline.out diff --git a/tests/qapi-schema/flat-union-int-branch.err b/tests/qapi-schema/flat-union-int-branch.err new file mode 100644 index 0000000000..faf01573b7 --- /dev/null +++ b/tests/qapi-schema/flat-union-int-branch.err @@ -0,0 +1 @@ +tests/qapi-schema/flat-union-int-branch.json:8: Member 'value1' of union 'TestUnion' cannot use built-in type 'int' diff --git a/tests/qapi-schema/flat-union-int-branch.exit b/tests/qapi-schema/flat-union-int-branch.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/flat-union-int-branch.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/flat-union-int-branch.json b/tests/qapi-schema/flat-union-int-branch.json new file mode 100644 index 0000000000..9370c349e8 --- /dev/null +++ b/tests/qapi-schema/flat-union-int-branch.json @@ -0,0 +1,12 @@ +# we require flat union branches to be a struct +{ 'enum': 'TestEnum', + 'data': [ 'value1', 'value2' ] } +{ 'struct': 'Base', + 'data': { 'enum1': 'TestEnum' } } +{ 'struct': 'TestTypeB', + 'data': { 'integer': 'int' } } +{ 'union': 'TestUnion', + 'base': 'Base', + 'discriminator': 'enum1', + 'data': { 'value1': 'int', + 'value2': 'TestTypeB' } } diff --git a/tests/qapi-schema/flat-union-int-branch.out b/tests/qapi-schema/flat-union-int-branch.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/flat-union-int-branch.out diff --git a/tests/qapi-schema/flat-union-invalid-branch-key.json b/tests/qapi-schema/flat-union-invalid-branch-key.json index a6242823ed..95ff7746bf 100644 --- a/tests/qapi-schema/flat-union-invalid-branch-key.json +++ b/tests/qapi-schema/flat-union-invalid-branch-key.json @@ -1,13 +1,13 @@ { 'enum': 'TestEnum', 'data': [ 'value1', 'value2' ] } -{ 'type': 'TestBase', +{ 'struct': 'TestBase', 'data': { 'enum1': 'TestEnum' } } -{ 'type': 'TestTypeA', +{ 'struct': 'TestTypeA', 'data': { 'string': 'str' } } -{ 'type': 'TestTypeB', +{ 'struct': 'TestTypeB', 'data': { 'integer': 'int' } } { 'union': 'TestUnion', diff --git a/tests/qapi-schema/flat-union-invalid-discriminator.err b/tests/qapi-schema/flat-union-invalid-discriminator.err index 790b6759b8..5f4055614e 100644 --- a/tests/qapi-schema/flat-union-invalid-discriminator.err +++ b/tests/qapi-schema/flat-union-invalid-discriminator.err @@ -1 +1 @@ -tests/qapi-schema/flat-union-invalid-discriminator.json:13: Discriminator 'enum_wrong' is not a member of base type 'TestBase' +tests/qapi-schema/flat-union-invalid-discriminator.json:13: Discriminator 'enum_wrong' is not a member of base struct 'TestBase' diff --git a/tests/qapi-schema/flat-union-invalid-discriminator.json b/tests/qapi-schema/flat-union-invalid-discriminator.json index 887157e173..48b94c3a4d 100644 --- a/tests/qapi-schema/flat-union-invalid-discriminator.json +++ b/tests/qapi-schema/flat-union-invalid-discriminator.json @@ -1,13 +1,13 @@ { 'enum': 'TestEnum', 'data': [ 'value1', 'value2' ] } -{ 'type': 'TestBase', +{ 'struct': 'TestBase', 'data': { 'enum1': 'TestEnum' } } -{ 'type': 'TestTypeA', +{ 'struct': 'TestTypeA', 'data': { 'string': 'str' } } -{ 'type': 'TestTypeB', +{ 'struct': 'TestTypeB', 'data': { 'integer': 'int' } } { 'union': 'TestUnion', diff --git a/tests/qapi-schema/flat-union-no-base.err b/tests/qapi-schema/flat-union-no-base.err index a59749eb84..bb3f708747 100644 --- a/tests/qapi-schema/flat-union-no-base.err +++ b/tests/qapi-schema/flat-union-no-base.err @@ -1 +1 @@ -tests/qapi-schema/flat-union-no-base.json:7: Flat union 'TestUnion' must have a base field +tests/qapi-schema/flat-union-no-base.json:9: Flat union 'TestUnion' must have a string base field diff --git a/tests/qapi-schema/flat-union-no-base.json b/tests/qapi-schema/flat-union-no-base.json index 50f267323b..ffc4c6f0e6 100644 --- a/tests/qapi-schema/flat-union-no-base.json +++ b/tests/qapi-schema/flat-union-no-base.json @@ -1,10 +1,12 @@ -{ 'type': 'TestTypeA', +# flat unions require a base +# TODO: simple unions should be able to use an enum discriminator +{ 'struct': 'TestTypeA', 'data': { 'string': 'str' } } - -{ 'type': 'TestTypeB', +{ 'struct': 'TestTypeB', 'data': { 'integer': 'int' } } - +{ 'enum': 'Enum', + 'data': [ 'value1', 'value2' ] } { 'union': 'TestUnion', - 'discriminator': 'enum1', + 'discriminator': 'Enum', 'data': { 'value1': 'TestTypeA', 'value2': 'TestTypeB' } } diff --git a/tests/qapi-schema/flat-union-optional-discriminator.err b/tests/qapi-schema/flat-union-optional-discriminator.err new file mode 100644 index 0000000000..aaabedb3bd --- /dev/null +++ b/tests/qapi-schema/flat-union-optional-discriminator.err @@ -0,0 +1 @@ +tests/qapi-schema/flat-union-optional-discriminator.json:6: Discriminator of flat union 'MyUnion' does not allow optional name '*switch' diff --git a/tests/qapi-schema/flat-union-optional-discriminator.exit b/tests/qapi-schema/flat-union-optional-discriminator.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/flat-union-optional-discriminator.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/flat-union-optional-discriminator.json b/tests/qapi-schema/flat-union-optional-discriminator.json new file mode 100644 index 0000000000..08a8f7ef8b --- /dev/null +++ b/tests/qapi-schema/flat-union-optional-discriminator.json @@ -0,0 +1,10 @@ +# we require the discriminator to be non-optional +{ 'enum': 'Enum', 'data': [ 'one', 'two' ] } +{ 'struct': 'Base', + 'data': { '*switch': 'Enum' } } +{ 'struct': 'Branch', 'data': { 'name': 'str' } } +{ 'union': 'MyUnion', + 'base': 'Base', + 'discriminator': '*switch', + 'data': { 'one': 'Branch', + 'two': 'Branch' } } diff --git a/tests/qapi-schema/flat-union-optional-discriminator.out b/tests/qapi-schema/flat-union-optional-discriminator.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/flat-union-optional-discriminator.out diff --git a/tests/qapi-schema/flat-union-reverse-define.json b/tests/qapi-schema/flat-union-reverse-define.json index 9ea7e72201..648bbfe2b7 100644 --- a/tests/qapi-schema/flat-union-reverse-define.json +++ b/tests/qapi-schema/flat-union-reverse-define.json @@ -4,14 +4,14 @@ 'data': { 'value1': 'TestTypeA', 'value2': 'TestTypeB' } } -{ 'type': 'TestBase', +{ 'struct': 'TestBase', 'data': { 'enum1': 'TestEnum' } } { 'enum': 'TestEnum', 'data': [ 'value1', 'value2' ] } -{ 'type': 'TestTypeA', +{ 'struct': 'TestTypeA', 'data': { 'string': 'str' } } -{ 'type': 'TestTypeB', +{ 'struct': 'TestTypeB', 'data': { 'integer': 'int' } } diff --git a/tests/qapi-schema/flat-union-reverse-define.out b/tests/qapi-schema/flat-union-reverse-define.out index 03c952e28a..1ed7b8a519 100644 --- a/tests/qapi-schema/flat-union-reverse-define.out +++ b/tests/qapi-schema/flat-union-reverse-define.out @@ -1,9 +1,9 @@ [OrderedDict([('union', 'TestUnion'), ('base', 'TestBase'), ('discriminator', 'enum1'), ('data', OrderedDict([('value1', 'TestTypeA'), ('value2', 'TestTypeB')]))]), - OrderedDict([('type', 'TestBase'), ('data', OrderedDict([('enum1', 'TestEnum')]))]), + OrderedDict([('struct', 'TestBase'), ('data', OrderedDict([('enum1', 'TestEnum')]))]), OrderedDict([('enum', 'TestEnum'), ('data', ['value1', 'value2'])]), - OrderedDict([('type', 'TestTypeA'), ('data', OrderedDict([('string', 'str')]))]), - OrderedDict([('type', 'TestTypeB'), ('data', OrderedDict([('integer', 'int')]))])] + OrderedDict([('struct', 'TestTypeA'), ('data', OrderedDict([('string', 'str')]))]), + OrderedDict([('struct', 'TestTypeB'), ('data', OrderedDict([('integer', 'int')]))])] [{'enum_name': 'TestEnum', 'enum_values': ['value1', 'value2']}] -[OrderedDict([('type', 'TestBase'), ('data', OrderedDict([('enum1', 'TestEnum')]))]), - OrderedDict([('type', 'TestTypeA'), ('data', OrderedDict([('string', 'str')]))]), - OrderedDict([('type', 'TestTypeB'), ('data', OrderedDict([('integer', 'int')]))])] +[OrderedDict([('struct', 'TestBase'), ('data', OrderedDict([('enum1', 'TestEnum')]))]), + OrderedDict([('struct', 'TestTypeA'), ('data', OrderedDict([('string', 'str')]))]), + OrderedDict([('struct', 'TestTypeB'), ('data', OrderedDict([('integer', 'int')]))])] diff --git a/tests/qapi-schema/flat-union-string-discriminator.json b/tests/qapi-schema/flat-union-string-discriminator.json index e966aeb395..8af60333b6 100644 --- a/tests/qapi-schema/flat-union-string-discriminator.json +++ b/tests/qapi-schema/flat-union-string-discriminator.json @@ -1,13 +1,13 @@ { 'enum': 'TestEnum', 'data': [ 'value1', 'value2' ] } -{ 'type': 'TestBase', +{ 'struct': 'TestBase', 'data': { 'enum1': 'TestEnum', 'kind': 'str' } } -{ 'type': 'TestTypeA', +{ 'struct': 'TestTypeA', 'data': { 'string': 'str' } } -{ 'type': 'TestTypeB', +{ 'struct': 'TestTypeB', 'data': { 'integer': 'int' } } { 'union': 'TestUnion', diff --git a/tests/qapi-schema/ident-with-escape.err b/tests/qapi-schema/ident-with-escape.err new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/ident-with-escape.err diff --git a/tests/qapi-schema/ident-with-escape.exit b/tests/qapi-schema/ident-with-escape.exit new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/tests/qapi-schema/ident-with-escape.exit @@ -0,0 +1 @@ +0 diff --git a/tests/qapi-schema/ident-with-escape.json b/tests/qapi-schema/ident-with-escape.json new file mode 100644 index 0000000000..56617501e7 --- /dev/null +++ b/tests/qapi-schema/ident-with-escape.json @@ -0,0 +1,4 @@ +# we allow escape sequences in strings, if they map back to ASCII +# { 'command': 'fooA', 'data': { 'bar1': 'str' } } +{ 'c\u006fmmand': '\u0066\u006f\u006FA', + 'd\u0061ta': { '\u0062\u0061\u00721': '\u0073\u0074\u0072' } } diff --git a/tests/qapi-schema/ident-with-escape.out b/tests/qapi-schema/ident-with-escape.out new file mode 100644 index 0000000000..402843081b --- /dev/null +++ b/tests/qapi-schema/ident-with-escape.out @@ -0,0 +1,3 @@ +[OrderedDict([('command', 'fooA'), ('data', OrderedDict([('bar1', 'str')]))])] +[] +[] diff --git a/tests/qapi-schema/indented-expr.json b/tests/qapi-schema/indented-expr.json index d80af60564..7115d3131e 100644 --- a/tests/qapi-schema/indented-expr.json +++ b/tests/qapi-schema/indented-expr.json @@ -1,2 +1,2 @@ -{ 'id' : 'eins' } - { 'id' : 'zwei' } +{ 'command' : 'eins' } + { 'command' : 'zwei' } diff --git a/tests/qapi-schema/indented-expr.out b/tests/qapi-schema/indented-expr.out index 98af89aa1d..b5ce9151bc 100644 --- a/tests/qapi-schema/indented-expr.out +++ b/tests/qapi-schema/indented-expr.out @@ -1,3 +1,3 @@ -[OrderedDict([('id', 'eins')]), OrderedDict([('id', 'zwei')])] +[OrderedDict([('command', 'eins')]), OrderedDict([('command', 'zwei')])] [] [] diff --git a/tests/qapi-schema/missing-type.err b/tests/qapi-schema/missing-type.err new file mode 100644 index 0000000000..b3e7b14e42 --- /dev/null +++ b/tests/qapi-schema/missing-type.err @@ -0,0 +1 @@ +tests/qapi-schema/missing-type.json:2: Expression is missing metatype diff --git a/tests/qapi-schema/missing-type.exit b/tests/qapi-schema/missing-type.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/missing-type.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/missing-type.json b/tests/qapi-schema/missing-type.json new file mode 100644 index 0000000000..ff5349d3fe --- /dev/null +++ b/tests/qapi-schema/missing-type.json @@ -0,0 +1,2 @@ +# we reject an expression with missing metatype +{ 'data': { } } diff --git a/tests/qapi-schema/missing-type.out b/tests/qapi-schema/missing-type.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/missing-type.out diff --git a/tests/qapi-schema/nested-struct-data.err b/tests/qapi-schema/nested-struct-data.err new file mode 100644 index 0000000000..da767bade2 --- /dev/null +++ b/tests/qapi-schema/nested-struct-data.err @@ -0,0 +1 @@ +tests/qapi-schema/nested-struct-data.json:2: Member 'a' of 'data' for command 'foo' should be a type name diff --git a/tests/qapi-schema/nested-struct-data.exit b/tests/qapi-schema/nested-struct-data.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/nested-struct-data.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/nested-struct-data.json b/tests/qapi-schema/nested-struct-data.json new file mode 100644 index 0000000000..3d52d2b398 --- /dev/null +++ b/tests/qapi-schema/nested-struct-data.json @@ -0,0 +1,4 @@ +# inline subtypes collide with our desired future use of defaults +{ 'command': 'foo', + 'data': { 'a' : { 'string' : 'str', 'integer': 'int' }, 'b' : 'str' }, + 'returns': {} } diff --git a/tests/qapi-schema/nested-struct-data.out b/tests/qapi-schema/nested-struct-data.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/nested-struct-data.out diff --git a/tests/qapi-schema/nested-struct-returns.err b/tests/qapi-schema/nested-struct-returns.err new file mode 100644 index 0000000000..5238d075b7 --- /dev/null +++ b/tests/qapi-schema/nested-struct-returns.err @@ -0,0 +1 @@ +tests/qapi-schema/nested-struct-returns.json:2: Member 'a' of 'returns' for command 'foo' should be a type name diff --git a/tests/qapi-schema/nested-struct-returns.exit b/tests/qapi-schema/nested-struct-returns.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/nested-struct-returns.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/nested-struct-returns.json b/tests/qapi-schema/nested-struct-returns.json new file mode 100644 index 0000000000..d2cd047f0d --- /dev/null +++ b/tests/qapi-schema/nested-struct-returns.json @@ -0,0 +1,3 @@ +# inline subtypes collide with our desired future use of defaults +{ 'command': 'foo', + 'returns': { 'a' : { 'string' : 'str', 'integer': 'int' }, 'b' : 'str' } } diff --git a/tests/qapi-schema/nested-struct-returns.out b/tests/qapi-schema/nested-struct-returns.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/nested-struct-returns.out diff --git a/tests/qapi-schema/qapi-schema-test.json b/tests/qapi-schema/qapi-schema-test.json index d43b5fd2e9..8193dc13a9 100644 --- a/tests/qapi-schema/qapi-schema-test.json +++ b/tests/qapi-schema/qapi-schema-test.json @@ -3,44 +3,40 @@ # for testing enums { 'enum': 'EnumOne', 'data': [ 'value1', 'value2', 'value3' ] } -{ 'type': 'NestedEnumsOne', +{ 'struct': 'NestedEnumsOne', 'data': { 'enum1': 'EnumOne', '*enum2': 'EnumOne', 'enum3': 'EnumOne', '*enum4': 'EnumOne' } } # for testing nested structs -{ 'type': 'UserDefZero', +{ 'struct': 'UserDefZero', 'data': { 'integer': 'int' } } -{ 'type': 'UserDefOne', +{ 'struct': 'UserDefOne', 'base': 'UserDefZero', 'data': { 'string': 'str', '*enum1': 'EnumOne' } } -{ 'type': 'UserDefTwo', - 'data': { 'string': 'str', - 'dict': { 'string': 'str', - 'dict': { 'userdef': 'UserDefOne', 'string': 'str' }, - '*dict2': { 'userdef': 'UserDefOne', 'string': 'str' } } } } +{ 'struct': 'UserDefTwoDictDict', + 'data': { 'userdef': 'UserDefOne', 'string': 'str' } } -{ 'type': 'UserDefNested', +{ 'struct': 'UserDefTwoDict', + 'data': { 'string1': 'str', + 'dict2': 'UserDefTwoDictDict', + '*dict3': 'UserDefTwoDictDict' } } + +{ 'struct': 'UserDefTwo', 'data': { 'string0': 'str', - 'dict1': { 'string1': 'str', - 'dict2': { 'userdef1': 'UserDefOne', 'string2': 'str' }, - '*dict3': { 'userdef2': 'UserDefOne', 'string3': 'str' } } } } + 'dict1': 'UserDefTwoDict' } } # for testing unions -{ 'type': 'UserDefA', +{ 'struct': 'UserDefA', 'data': { 'boolean': 'bool' } } -{ 'type': 'UserDefB', +{ 'struct': 'UserDefB', 'data': { 'integer': 'int' } } -{ 'type': 'UserDefC', +{ 'struct': 'UserDefC', 'data': { 'string1': 'str', 'string2': 'str' } } -{ 'union': 'UserDefUnion', - 'base': 'UserDefZero', - 'data': { 'a' : 'UserDefA', 'b' : 'UserDefB' } } - -{ 'type': 'UserDefUnionBase', +{ 'struct': 'UserDefUnionBase', 'data': { 'string': 'str', 'enum1': 'EnumOne' } } { 'union': 'UserDefFlatUnion', @@ -57,8 +53,7 @@ 'discriminator': 'enum1', 'data': { 'value1' : 'UserDefC', 'value2' : 'UserDefB', 'value3' : 'UserDefA' } } -{ 'union': 'UserDefAnonUnion', - 'discriminator': {}, +{ 'alternate': 'UserDefAlternate', 'data': { 'uda': 'UserDefA', 's': 'str', 'i': 'int' } } # for testing native lists @@ -74,7 +69,8 @@ 'u64': ['uint64'], 'number': ['number'], 'boolean': ['bool'], - 'string': ['str'] } } + 'string': ['str'], + 'sizes': ['size'] } } # testing commands { 'command': 'user_def_cmd', 'data': {} } @@ -92,7 +88,7 @@ # # For simplicity, this example doesn't use [type=]discriminator nor optargs # specific to discriminator values. -{ 'type': 'UserDefOptions', +{ 'struct': 'UserDefOptions', 'data': { '*i64' : [ 'int' ], '*u64' : [ 'uint64' ], @@ -101,7 +97,7 @@ '*u64x': 'uint64' } } # testing event -{ 'type': 'EventStructOne', +{ 'struct': 'EventStructOne', 'data': { 'struct1': 'UserDefOne', 'string': 'str', '*enum2': 'EnumOne' } } { 'event': 'EVENT_A' } diff --git a/tests/qapi-schema/qapi-schema-test.out b/tests/qapi-schema/qapi-schema-test.out index 08d7304dfa..93c49635eb 100644 --- a/tests/qapi-schema/qapi-schema-test.out +++ b/tests/qapi-schema/qapi-schema-test.out @@ -1,40 +1,40 @@ [OrderedDict([('enum', 'EnumOne'), ('data', ['value1', 'value2', 'value3'])]), - OrderedDict([('type', 'NestedEnumsOne'), ('data', OrderedDict([('enum1', 'EnumOne'), ('*enum2', 'EnumOne'), ('enum3', 'EnumOne'), ('*enum4', 'EnumOne')]))]), - OrderedDict([('type', 'UserDefZero'), ('data', OrderedDict([('integer', 'int')]))]), - OrderedDict([('type', 'UserDefOne'), ('base', 'UserDefZero'), ('data', OrderedDict([('string', 'str'), ('*enum1', 'EnumOne')]))]), - OrderedDict([('type', 'UserDefTwo'), ('data', OrderedDict([('string', 'str'), ('dict', OrderedDict([('string', 'str'), ('dict', OrderedDict([('userdef', 'UserDefOne'), ('string', 'str')])), ('*dict2', OrderedDict([('userdef', 'UserDefOne'), ('string', 'str')]))]))]))]), - OrderedDict([('type', 'UserDefNested'), ('data', OrderedDict([('string0', 'str'), ('dict1', OrderedDict([('string1', 'str'), ('dict2', OrderedDict([('userdef1', 'UserDefOne'), ('string2', 'str')])), ('*dict3', OrderedDict([('userdef2', 'UserDefOne'), ('string3', 'str')]))]))]))]), - OrderedDict([('type', 'UserDefA'), ('data', OrderedDict([('boolean', 'bool')]))]), - OrderedDict([('type', 'UserDefB'), ('data', OrderedDict([('integer', 'int')]))]), - OrderedDict([('type', 'UserDefC'), ('data', OrderedDict([('string1', 'str'), ('string2', 'str')]))]), - OrderedDict([('union', 'UserDefUnion'), ('base', 'UserDefZero'), ('data', OrderedDict([('a', 'UserDefA'), ('b', 'UserDefB')]))]), - OrderedDict([('type', 'UserDefUnionBase'), ('data', OrderedDict([('string', 'str'), ('enum1', 'EnumOne')]))]), + OrderedDict([('struct', 'NestedEnumsOne'), ('data', OrderedDict([('enum1', 'EnumOne'), ('*enum2', 'EnumOne'), ('enum3', 'EnumOne'), ('*enum4', 'EnumOne')]))]), + OrderedDict([('struct', 'UserDefZero'), ('data', OrderedDict([('integer', 'int')]))]), + OrderedDict([('struct', 'UserDefOne'), ('base', 'UserDefZero'), ('data', OrderedDict([('string', 'str'), ('*enum1', 'EnumOne')]))]), + OrderedDict([('struct', 'UserDefTwoDictDict'), ('data', OrderedDict([('userdef', 'UserDefOne'), ('string', 'str')]))]), + OrderedDict([('struct', 'UserDefTwoDict'), ('data', OrderedDict([('string1', 'str'), ('dict2', 'UserDefTwoDictDict'), ('*dict3', 'UserDefTwoDictDict')]))]), + OrderedDict([('struct', 'UserDefTwo'), ('data', OrderedDict([('string0', 'str'), ('dict1', 'UserDefTwoDict')]))]), + OrderedDict([('struct', 'UserDefA'), ('data', OrderedDict([('boolean', 'bool')]))]), + OrderedDict([('struct', 'UserDefB'), ('data', OrderedDict([('integer', 'int')]))]), + OrderedDict([('struct', 'UserDefC'), ('data', OrderedDict([('string1', 'str'), ('string2', 'str')]))]), + OrderedDict([('struct', 'UserDefUnionBase'), ('data', OrderedDict([('string', 'str'), ('enum1', 'EnumOne')]))]), OrderedDict([('union', 'UserDefFlatUnion'), ('base', 'UserDefUnionBase'), ('discriminator', 'enum1'), ('data', OrderedDict([('value1', 'UserDefA'), ('value2', 'UserDefB'), ('value3', 'UserDefB')]))]), OrderedDict([('union', 'UserDefFlatUnion2'), ('base', 'UserDefUnionBase'), ('discriminator', 'enum1'), ('data', OrderedDict([('value1', 'UserDefC'), ('value2', 'UserDefB'), ('value3', 'UserDefA')]))]), - OrderedDict([('union', 'UserDefAnonUnion'), ('discriminator', OrderedDict()), ('data', OrderedDict([('uda', 'UserDefA'), ('s', 'str'), ('i', 'int')]))]), - OrderedDict([('union', 'UserDefNativeListUnion'), ('data', OrderedDict([('integer', ['int']), ('s8', ['int8']), ('s16', ['int16']), ('s32', ['int32']), ('s64', ['int64']), ('u8', ['uint8']), ('u16', ['uint16']), ('u32', ['uint32']), ('u64', ['uint64']), ('number', ['number']), ('boolean', ['bool']), ('string', ['str'])]))]), + OrderedDict([('alternate', 'UserDefAlternate'), ('data', OrderedDict([('uda', 'UserDefA'), ('s', 'str'), ('i', 'int')]))]), + OrderedDict([('union', 'UserDefNativeListUnion'), ('data', OrderedDict([('integer', ['int']), ('s8', ['int8']), ('s16', ['int16']), ('s32', ['int32']), ('s64', ['int64']), ('u8', ['uint8']), ('u16', ['uint16']), ('u32', ['uint32']), ('u64', ['uint64']), ('number', ['number']), ('boolean', ['bool']), ('string', ['str']), ('sizes', ['size'])]))]), OrderedDict([('command', 'user_def_cmd'), ('data', OrderedDict())]), OrderedDict([('command', 'user_def_cmd1'), ('data', OrderedDict([('ud1a', 'UserDefOne')]))]), OrderedDict([('command', 'user_def_cmd2'), ('data', OrderedDict([('ud1a', 'UserDefOne'), ('*ud1b', 'UserDefOne')])), ('returns', 'UserDefTwo')]), OrderedDict([('command', 'user_def_cmd3'), ('data', OrderedDict([('a', 'int'), ('*b', 'int')])), ('returns', 'int')]), - OrderedDict([('type', 'UserDefOptions'), ('data', OrderedDict([('*i64', ['int']), ('*u64', ['uint64']), ('*u16', ['uint16']), ('*i64x', 'int'), ('*u64x', 'uint64')]))]), - OrderedDict([('type', 'EventStructOne'), ('data', OrderedDict([('struct1', 'UserDefOne'), ('string', 'str'), ('*enum2', 'EnumOne')]))]), + OrderedDict([('struct', 'UserDefOptions'), ('data', OrderedDict([('*i64', ['int']), ('*u64', ['uint64']), ('*u16', ['uint16']), ('*i64x', 'int'), ('*u64x', 'uint64')]))]), + OrderedDict([('struct', 'EventStructOne'), ('data', OrderedDict([('struct1', 'UserDefOne'), ('string', 'str'), ('*enum2', 'EnumOne')]))]), OrderedDict([('event', 'EVENT_A')]), OrderedDict([('event', 'EVENT_B'), ('data', OrderedDict())]), OrderedDict([('event', 'EVENT_C'), ('data', OrderedDict([('*a', 'int'), ('*b', 'UserDefOne'), ('c', 'str')]))]), OrderedDict([('event', 'EVENT_D'), ('data', OrderedDict([('a', 'EventStructOne'), ('b', 'str'), ('*c', 'str'), ('*enum3', 'EnumOne')]))])] [{'enum_name': 'EnumOne', 'enum_values': ['value1', 'value2', 'value3']}, - {'enum_name': 'UserDefUnionKind', 'enum_values': None}, - {'enum_name': 'UserDefAnonUnionKind', 'enum_values': None}, + {'enum_name': 'UserDefAlternateKind', 'enum_values': None}, {'enum_name': 'UserDefNativeListUnionKind', 'enum_values': None}] -[OrderedDict([('type', 'NestedEnumsOne'), ('data', OrderedDict([('enum1', 'EnumOne'), ('*enum2', 'EnumOne'), ('enum3', 'EnumOne'), ('*enum4', 'EnumOne')]))]), - OrderedDict([('type', 'UserDefZero'), ('data', OrderedDict([('integer', 'int')]))]), - OrderedDict([('type', 'UserDefOne'), ('base', 'UserDefZero'), ('data', OrderedDict([('string', 'str'), ('*enum1', 'EnumOne')]))]), - OrderedDict([('type', 'UserDefTwo'), ('data', OrderedDict([('string', 'str'), ('dict', OrderedDict([('string', 'str'), ('dict', OrderedDict([('userdef', 'UserDefOne'), ('string', 'str')])), ('*dict2', OrderedDict([('userdef', 'UserDefOne'), ('string', 'str')]))]))]))]), - OrderedDict([('type', 'UserDefNested'), ('data', OrderedDict([('string0', 'str'), ('dict1', OrderedDict([('string1', 'str'), ('dict2', OrderedDict([('userdef1', 'UserDefOne'), ('string2', 'str')])), ('*dict3', OrderedDict([('userdef2', 'UserDefOne'), ('string3', 'str')]))]))]))]), - OrderedDict([('type', 'UserDefA'), ('data', OrderedDict([('boolean', 'bool')]))]), - OrderedDict([('type', 'UserDefB'), ('data', OrderedDict([('integer', 'int')]))]), - OrderedDict([('type', 'UserDefC'), ('data', OrderedDict([('string1', 'str'), ('string2', 'str')]))]), - OrderedDict([('type', 'UserDefUnionBase'), ('data', OrderedDict([('string', 'str'), ('enum1', 'EnumOne')]))]), - OrderedDict([('type', 'UserDefOptions'), ('data', OrderedDict([('*i64', ['int']), ('*u64', ['uint64']), ('*u16', ['uint16']), ('*i64x', 'int'), ('*u64x', 'uint64')]))]), - OrderedDict([('type', 'EventStructOne'), ('data', OrderedDict([('struct1', 'UserDefOne'), ('string', 'str'), ('*enum2', 'EnumOne')]))])] +[OrderedDict([('struct', 'NestedEnumsOne'), ('data', OrderedDict([('enum1', 'EnumOne'), ('*enum2', 'EnumOne'), ('enum3', 'EnumOne'), ('*enum4', 'EnumOne')]))]), + OrderedDict([('struct', 'UserDefZero'), ('data', OrderedDict([('integer', 'int')]))]), + OrderedDict([('struct', 'UserDefOne'), ('base', 'UserDefZero'), ('data', OrderedDict([('string', 'str'), ('*enum1', 'EnumOne')]))]), + OrderedDict([('struct', 'UserDefTwoDictDict'), ('data', OrderedDict([('userdef', 'UserDefOne'), ('string', 'str')]))]), + OrderedDict([('struct', 'UserDefTwoDict'), ('data', OrderedDict([('string1', 'str'), ('dict2', 'UserDefTwoDictDict'), ('*dict3', 'UserDefTwoDictDict')]))]), + OrderedDict([('struct', 'UserDefTwo'), ('data', OrderedDict([('string0', 'str'), ('dict1', 'UserDefTwoDict')]))]), + OrderedDict([('struct', 'UserDefA'), ('data', OrderedDict([('boolean', 'bool')]))]), + OrderedDict([('struct', 'UserDefB'), ('data', OrderedDict([('integer', 'int')]))]), + OrderedDict([('struct', 'UserDefC'), ('data', OrderedDict([('string1', 'str'), ('string2', 'str')]))]), + OrderedDict([('struct', 'UserDefUnionBase'), ('data', OrderedDict([('string', 'str'), ('enum1', 'EnumOne')]))]), + OrderedDict([('struct', 'UserDefOptions'), ('data', OrderedDict([('*i64', ['int']), ('*u64', ['uint64']), ('*u16', ['uint16']), ('*i64x', 'int'), ('*u64x', 'uint64')]))]), + OrderedDict([('struct', 'EventStructOne'), ('data', OrderedDict([('struct1', 'UserDefOne'), ('string', 'str'), ('*enum2', 'EnumOne')]))])] diff --git a/tests/qapi-schema/redefined-builtin.err b/tests/qapi-schema/redefined-builtin.err new file mode 100644 index 0000000000..b2757225c4 --- /dev/null +++ b/tests/qapi-schema/redefined-builtin.err @@ -0,0 +1 @@ +tests/qapi-schema/redefined-builtin.json:2: built-in 'size' is already defined diff --git a/tests/qapi-schema/redefined-builtin.exit b/tests/qapi-schema/redefined-builtin.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/redefined-builtin.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/redefined-builtin.json b/tests/qapi-schema/redefined-builtin.json new file mode 100644 index 0000000000..45b8a550ad --- /dev/null +++ b/tests/qapi-schema/redefined-builtin.json @@ -0,0 +1,2 @@ +# we reject types that duplicate builtin names +{ 'struct': 'size', 'data': { 'myint': 'size' } } diff --git a/tests/qapi-schema/redefined-builtin.out b/tests/qapi-schema/redefined-builtin.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/redefined-builtin.out diff --git a/tests/qapi-schema/redefined-command.err b/tests/qapi-schema/redefined-command.err new file mode 100644 index 0000000000..82ae256e63 --- /dev/null +++ b/tests/qapi-schema/redefined-command.err @@ -0,0 +1 @@ +tests/qapi-schema/redefined-command.json:3: command 'foo' is already defined diff --git a/tests/qapi-schema/redefined-command.exit b/tests/qapi-schema/redefined-command.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/redefined-command.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/redefined-command.json b/tests/qapi-schema/redefined-command.json new file mode 100644 index 0000000000..247e401948 --- /dev/null +++ b/tests/qapi-schema/redefined-command.json @@ -0,0 +1,3 @@ +# we reject commands defined more than once +{ 'command': 'foo', 'data': { 'one': 'str' } } +{ 'command': 'foo', 'data': { '*two': 'str' } } diff --git a/tests/qapi-schema/redefined-command.out b/tests/qapi-schema/redefined-command.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/redefined-command.out diff --git a/tests/qapi-schema/redefined-event.err b/tests/qapi-schema/redefined-event.err new file mode 100644 index 0000000000..35429cb481 --- /dev/null +++ b/tests/qapi-schema/redefined-event.err @@ -0,0 +1 @@ +tests/qapi-schema/redefined-event.json:3: event 'EVENT_A' is already defined diff --git a/tests/qapi-schema/redefined-event.exit b/tests/qapi-schema/redefined-event.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/redefined-event.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/redefined-event.json b/tests/qapi-schema/redefined-event.json new file mode 100644 index 0000000000..7717e91c18 --- /dev/null +++ b/tests/qapi-schema/redefined-event.json @@ -0,0 +1,3 @@ +# we reject duplicate events +{ 'event': 'EVENT_A', 'data': { 'myint': 'int' } } +{ 'event': 'EVENT_A', 'data': { 'myint': 'int' } } diff --git a/tests/qapi-schema/redefined-event.out b/tests/qapi-schema/redefined-event.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/redefined-event.out diff --git a/tests/qapi-schema/redefined-type.err b/tests/qapi-schema/redefined-type.err new file mode 100644 index 0000000000..06ea78c478 --- /dev/null +++ b/tests/qapi-schema/redefined-type.err @@ -0,0 +1 @@ +tests/qapi-schema/redefined-type.json:3: struct 'foo' is already defined diff --git a/tests/qapi-schema/redefined-type.exit b/tests/qapi-schema/redefined-type.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/redefined-type.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/redefined-type.json b/tests/qapi-schema/redefined-type.json new file mode 100644 index 0000000000..a09e768bae --- /dev/null +++ b/tests/qapi-schema/redefined-type.json @@ -0,0 +1,3 @@ +# we reject types defined more than once +{ 'struct': 'foo', 'data': { 'one': 'str' } } +{ 'enum': 'foo', 'data': [ 'two' ] } diff --git a/tests/qapi-schema/redefined-type.out b/tests/qapi-schema/redefined-type.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/redefined-type.out diff --git a/tests/qapi-schema/returns-alternate.err b/tests/qapi-schema/returns-alternate.err new file mode 100644 index 0000000000..dfbb419cac --- /dev/null +++ b/tests/qapi-schema/returns-alternate.err @@ -0,0 +1 @@ +tests/qapi-schema/returns-alternate.json:3: 'returns' for command 'oops' cannot use alternate type 'Alt' diff --git a/tests/qapi-schema/returns-alternate.exit b/tests/qapi-schema/returns-alternate.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/returns-alternate.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/returns-alternate.json b/tests/qapi-schema/returns-alternate.json new file mode 100644 index 0000000000..972390c06b --- /dev/null +++ b/tests/qapi-schema/returns-alternate.json @@ -0,0 +1,3 @@ +# we reject returns if it is an alternate type +{ 'alternate': 'Alt', 'data': { 'a': 'int', 'b': 'str' } } +{ 'command': 'oops', 'returns': 'Alt' } diff --git a/tests/qapi-schema/returns-alternate.out b/tests/qapi-schema/returns-alternate.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/returns-alternate.out diff --git a/tests/qapi-schema/returns-array-bad.err b/tests/qapi-schema/returns-array-bad.err new file mode 100644 index 0000000000..138095ccde --- /dev/null +++ b/tests/qapi-schema/returns-array-bad.err @@ -0,0 +1 @@ +tests/qapi-schema/returns-array-bad.json:2: 'returns' for command 'oops': array type must contain single type name diff --git a/tests/qapi-schema/returns-array-bad.exit b/tests/qapi-schema/returns-array-bad.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/returns-array-bad.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/returns-array-bad.json b/tests/qapi-schema/returns-array-bad.json new file mode 100644 index 0000000000..09b0b1f182 --- /dev/null +++ b/tests/qapi-schema/returns-array-bad.json @@ -0,0 +1,2 @@ +# we reject an array return that is not a single type +{ 'command': 'oops', 'returns': [ 'str', 'str' ] } diff --git a/tests/qapi-schema/returns-array-bad.out b/tests/qapi-schema/returns-array-bad.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/returns-array-bad.out diff --git a/tests/qapi-schema/returns-int.err b/tests/qapi-schema/returns-int.err new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/returns-int.err diff --git a/tests/qapi-schema/returns-int.exit b/tests/qapi-schema/returns-int.exit new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/tests/qapi-schema/returns-int.exit @@ -0,0 +1 @@ +0 diff --git a/tests/qapi-schema/returns-int.json b/tests/qapi-schema/returns-int.json new file mode 100644 index 0000000000..870ec6366b --- /dev/null +++ b/tests/qapi-schema/returns-int.json @@ -0,0 +1,3 @@ +# It is okay (although not extensible) to return a non-dictionary +# But to make it work, the name must be in a whitelist +{ 'command': 'guest-get-time', 'returns': 'int' } diff --git a/tests/qapi-schema/returns-int.out b/tests/qapi-schema/returns-int.out new file mode 100644 index 0000000000..70b3ac5e6f --- /dev/null +++ b/tests/qapi-schema/returns-int.out @@ -0,0 +1,3 @@ +[OrderedDict([('command', 'guest-get-time'), ('returns', 'int')])] +[] +[] diff --git a/tests/qapi-schema/returns-unknown.err b/tests/qapi-schema/returns-unknown.err new file mode 100644 index 0000000000..1f43e3ac9f --- /dev/null +++ b/tests/qapi-schema/returns-unknown.err @@ -0,0 +1 @@ +tests/qapi-schema/returns-unknown.json:2: 'returns' for command 'oops' uses unknown type 'NoSuchType' diff --git a/tests/qapi-schema/returns-unknown.exit b/tests/qapi-schema/returns-unknown.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/returns-unknown.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/returns-unknown.json b/tests/qapi-schema/returns-unknown.json new file mode 100644 index 0000000000..25bd498bff --- /dev/null +++ b/tests/qapi-schema/returns-unknown.json @@ -0,0 +1,2 @@ +# we reject returns if it does not contain a known type +{ 'command': 'oops', 'returns': 'NoSuchType' } diff --git a/tests/qapi-schema/returns-unknown.out b/tests/qapi-schema/returns-unknown.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/returns-unknown.out diff --git a/tests/qapi-schema/returns-whitelist.err b/tests/qapi-schema/returns-whitelist.err new file mode 100644 index 0000000000..a41f019a52 --- /dev/null +++ b/tests/qapi-schema/returns-whitelist.err @@ -0,0 +1 @@ +tests/qapi-schema/returns-whitelist.json:10: 'returns' for command 'no-way-this-will-get-whitelisted' cannot use built-in type 'array of int' diff --git a/tests/qapi-schema/returns-whitelist.exit b/tests/qapi-schema/returns-whitelist.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/returns-whitelist.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/returns-whitelist.json b/tests/qapi-schema/returns-whitelist.json new file mode 100644 index 0000000000..e8b3cea396 --- /dev/null +++ b/tests/qapi-schema/returns-whitelist.json @@ -0,0 +1,11 @@ +# we enforce that 'returns' be a dict or array of dict unless whitelisted +{ 'command': 'human-monitor-command', + 'data': {'command-line': 'str', '*cpu-index': 'int'}, + 'returns': 'str' } +{ 'enum': 'TpmModel', 'data': [ 'tpm-tis' ] } +{ 'command': 'query-tpm-models', 'returns': ['TpmModel'] } +{ 'command': 'guest-get-time', + 'returns': 'int' } + +{ 'command': 'no-way-this-will-get-whitelisted', + 'returns': [ 'int' ] } diff --git a/tests/qapi-schema/returns-whitelist.out b/tests/qapi-schema/returns-whitelist.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/returns-whitelist.out diff --git a/tests/qapi-schema/struct-base-clash-deep.err b/tests/qapi-schema/struct-base-clash-deep.err new file mode 100644 index 0000000000..e3e9f8d289 --- /dev/null +++ b/tests/qapi-schema/struct-base-clash-deep.err @@ -0,0 +1 @@ +tests/qapi-schema/struct-base-clash-deep.json:7: Member name 'name' clashes with base 'Base' diff --git a/tests/qapi-schema/struct-base-clash-deep.exit b/tests/qapi-schema/struct-base-clash-deep.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/struct-base-clash-deep.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/struct-base-clash-deep.json b/tests/qapi-schema/struct-base-clash-deep.json new file mode 100644 index 0000000000..552fe94317 --- /dev/null +++ b/tests/qapi-schema/struct-base-clash-deep.json @@ -0,0 +1,9 @@ +# we check for no duplicate keys with indirect base +{ 'struct': 'Base', + 'data': { 'name': 'str' } } +{ 'struct': 'Mid', + 'base': 'Base', + 'data': { 'value': 'int' } } +{ 'struct': 'Sub', + 'base': 'Mid', + 'data': { '*name': 'str' } } diff --git a/tests/qapi-schema/struct-base-clash-deep.out b/tests/qapi-schema/struct-base-clash-deep.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/struct-base-clash-deep.out diff --git a/tests/qapi-schema/struct-base-clash.err b/tests/qapi-schema/struct-base-clash.err new file mode 100644 index 0000000000..3ac37fb26a --- /dev/null +++ b/tests/qapi-schema/struct-base-clash.err @@ -0,0 +1 @@ +tests/qapi-schema/struct-base-clash.json:4: Member name 'name' clashes with base 'Base' diff --git a/tests/qapi-schema/struct-base-clash.exit b/tests/qapi-schema/struct-base-clash.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/struct-base-clash.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/struct-base-clash.json b/tests/qapi-schema/struct-base-clash.json new file mode 100644 index 0000000000..f2afc9b6f6 --- /dev/null +++ b/tests/qapi-schema/struct-base-clash.json @@ -0,0 +1,6 @@ +# we check for no duplicate keys with base +{ 'struct': 'Base', + 'data': { 'name': 'str' } } +{ 'struct': 'Sub', + 'base': 'Base', + 'data': { 'name': 'str' } } diff --git a/tests/qapi-schema/struct-base-clash.out b/tests/qapi-schema/struct-base-clash.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/struct-base-clash.out diff --git a/tests/qapi-schema/type-bypass-bad-gen.err b/tests/qapi-schema/type-bypass-bad-gen.err new file mode 100644 index 0000000000..a83c3c655d --- /dev/null +++ b/tests/qapi-schema/type-bypass-bad-gen.err @@ -0,0 +1 @@ +tests/qapi-schema/type-bypass-bad-gen.json:2: 'gen' of command 'foo' should only use false value diff --git a/tests/qapi-schema/type-bypass-bad-gen.exit b/tests/qapi-schema/type-bypass-bad-gen.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/type-bypass-bad-gen.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/type-bypass-bad-gen.json b/tests/qapi-schema/type-bypass-bad-gen.json new file mode 100644 index 0000000000..e8dec34249 --- /dev/null +++ b/tests/qapi-schema/type-bypass-bad-gen.json @@ -0,0 +1,2 @@ +# 'gen' should only appear with value false +{ 'command': 'foo', 'gen': 'whatever' } diff --git a/tests/qapi-schema/type-bypass-bad-gen.out b/tests/qapi-schema/type-bypass-bad-gen.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/type-bypass-bad-gen.out diff --git a/tests/qapi-schema/type-bypass-no-gen.err b/tests/qapi-schema/type-bypass-no-gen.err new file mode 100644 index 0000000000..20cef0a8a7 --- /dev/null +++ b/tests/qapi-schema/type-bypass-no-gen.err @@ -0,0 +1 @@ +tests/qapi-schema/type-bypass-no-gen.json:2: Member 'arg' of 'data' for command 'unsafe' uses '**' but did not request 'gen':false diff --git a/tests/qapi-schema/type-bypass-no-gen.exit b/tests/qapi-schema/type-bypass-no-gen.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/type-bypass-no-gen.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/type-bypass-no-gen.json b/tests/qapi-schema/type-bypass-no-gen.json new file mode 100644 index 0000000000..4feae3719c --- /dev/null +++ b/tests/qapi-schema/type-bypass-no-gen.json @@ -0,0 +1,2 @@ +# type bypass only works with 'gen':false +{ 'command': 'unsafe', 'data': { 'arg': '**' }, 'returns': '**' } diff --git a/tests/qapi-schema/type-bypass-no-gen.out b/tests/qapi-schema/type-bypass-no-gen.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/type-bypass-no-gen.out diff --git a/tests/qapi-schema/type-bypass.err b/tests/qapi-schema/type-bypass.err new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/type-bypass.err diff --git a/tests/qapi-schema/type-bypass.exit b/tests/qapi-schema/type-bypass.exit new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/tests/qapi-schema/type-bypass.exit @@ -0,0 +1 @@ +0 diff --git a/tests/qapi-schema/type-bypass.json b/tests/qapi-schema/type-bypass.json new file mode 100644 index 0000000000..48b2137833 --- /dev/null +++ b/tests/qapi-schema/type-bypass.json @@ -0,0 +1,2 @@ +# Use of 'gen':false allows bypassing type system +{ 'command': 'unsafe', 'data': { 'arg': '**' }, 'returns': '**', 'gen': false } diff --git a/tests/qapi-schema/type-bypass.out b/tests/qapi-schema/type-bypass.out new file mode 100644 index 0000000000..eaf20f8344 --- /dev/null +++ b/tests/qapi-schema/type-bypass.out @@ -0,0 +1,3 @@ +[OrderedDict([('command', 'unsafe'), ('data', OrderedDict([('arg', '**')])), ('returns', '**'), ('gen', False)])] +[] +[] diff --git a/tests/qapi-schema/unicode-str.err b/tests/qapi-schema/unicode-str.err new file mode 100644 index 0000000000..f621cd6448 --- /dev/null +++ b/tests/qapi-schema/unicode-str.err @@ -0,0 +1 @@ +tests/qapi-schema/unicode-str.json:2: 'command' uses invalid name 'é' diff --git a/tests/qapi-schema/unicode-str.exit b/tests/qapi-schema/unicode-str.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/unicode-str.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/unicode-str.json b/tests/qapi-schema/unicode-str.json new file mode 100644 index 0000000000..5253a1b9f3 --- /dev/null +++ b/tests/qapi-schema/unicode-str.json @@ -0,0 +1,2 @@ +# we don't support full Unicode strings, yet +{ 'command': 'é' } diff --git a/tests/qapi-schema/unicode-str.out b/tests/qapi-schema/unicode-str.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/unicode-str.out diff --git a/tests/qapi-schema/union-bad-branch.err b/tests/qapi-schema/union-bad-branch.err new file mode 100644 index 0000000000..8822735561 --- /dev/null +++ b/tests/qapi-schema/union-bad-branch.err @@ -0,0 +1 @@ +tests/qapi-schema/union-bad-branch.json:6: Union 'MyUnion' member 'ONE' clashes with 'one' diff --git a/tests/qapi-schema/union-bad-branch.exit b/tests/qapi-schema/union-bad-branch.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/union-bad-branch.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/union-bad-branch.json b/tests/qapi-schema/union-bad-branch.json new file mode 100644 index 0000000000..913aa38bc8 --- /dev/null +++ b/tests/qapi-schema/union-bad-branch.json @@ -0,0 +1,8 @@ +# we reject normal unions where branches would collide in C +{ 'struct': 'One', + 'data': { 'string': 'str' } } +{ 'struct': 'Two', + 'data': { 'number': 'int' } } +{ 'union': 'MyUnion', + 'data': { 'one': 'One', + 'ONE': 'Two' } } diff --git a/tests/qapi-schema/union-bad-branch.out b/tests/qapi-schema/union-bad-branch.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/union-bad-branch.out diff --git a/tests/qapi-schema/union-base-no-discriminator.err b/tests/qapi-schema/union-base-no-discriminator.err new file mode 100644 index 0000000000..fc8b79c459 --- /dev/null +++ b/tests/qapi-schema/union-base-no-discriminator.err @@ -0,0 +1 @@ +tests/qapi-schema/union-base-no-discriminator.json:11: Union 'TestUnion' requires a discriminator to go along with base diff --git a/tests/qapi-schema/union-base-no-discriminator.exit b/tests/qapi-schema/union-base-no-discriminator.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/union-base-no-discriminator.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/union-base-no-discriminator.json b/tests/qapi-schema/union-base-no-discriminator.json new file mode 100644 index 0000000000..1409cf5c9e --- /dev/null +++ b/tests/qapi-schema/union-base-no-discriminator.json @@ -0,0 +1,14 @@ +# we reject simple unions with a base (or flat unions without discriminator) +{ 'struct': 'TestTypeA', + 'data': { 'string': 'str' } } + +{ 'struct': 'TestTypeB', + 'data': { 'integer': 'int' } } + +{ 'struct': 'Base', + 'data': { 'string': 'str' } } + +{ 'union': 'TestUnion', + 'base': 'Base', + 'data': { 'value1': 'TestTypeA', + 'value2': 'TestTypeB' } } diff --git a/tests/qapi-schema/union-base-no-discriminator.out b/tests/qapi-schema/union-base-no-discriminator.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/union-base-no-discriminator.out diff --git a/tests/qapi-schema/union-invalid-base.err b/tests/qapi-schema/union-invalid-base.err index 938f96962b..9f637963e8 100644 --- a/tests/qapi-schema/union-invalid-base.err +++ b/tests/qapi-schema/union-invalid-base.err @@ -1 +1 @@ -tests/qapi-schema/union-invalid-base.json:7: Base 'TestBaseWrong' is not a valid type +tests/qapi-schema/union-invalid-base.json:8: Base 'int' is not a valid struct diff --git a/tests/qapi-schema/union-invalid-base.json b/tests/qapi-schema/union-invalid-base.json index 1fa4930010..92be39df69 100644 --- a/tests/qapi-schema/union-invalid-base.json +++ b/tests/qapi-schema/union-invalid-base.json @@ -1,10 +1,12 @@ -{ 'type': 'TestTypeA', +# a union base type must be a struct +{ 'struct': 'TestTypeA', 'data': { 'string': 'str' } } -{ 'type': 'TestTypeB', +{ 'struct': 'TestTypeB', 'data': { 'integer': 'int' } } { 'union': 'TestUnion', - 'base': 'TestBaseWrong', + 'base': 'int', + 'discriminator': 'int', 'data': { 'value1': 'TestTypeA', 'value2': 'TestTypeB' } } diff --git a/tests/qapi-schema/union-max.err b/tests/qapi-schema/union-max.err new file mode 100644 index 0000000000..55ce4399d6 --- /dev/null +++ b/tests/qapi-schema/union-max.err @@ -0,0 +1 @@ +tests/qapi-schema/union-max.json:2: Union 'Union' member 'max' clashes with '(automatic)' diff --git a/tests/qapi-schema/union-max.exit b/tests/qapi-schema/union-max.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/union-max.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/union-max.json b/tests/qapi-schema/union-max.json new file mode 100644 index 0000000000..d6ad986999 --- /dev/null +++ b/tests/qapi-schema/union-max.json @@ -0,0 +1,3 @@ +# we reject 'max' branch in a union, for collision with C enum +{ 'union': 'Union', + 'data': { 'max': 'int' } } diff --git a/tests/qapi-schema/union-max.out b/tests/qapi-schema/union-max.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/union-max.out diff --git a/tests/qapi-schema/union-optional-branch.err b/tests/qapi-schema/union-optional-branch.err new file mode 100644 index 0000000000..3ada1334dc --- /dev/null +++ b/tests/qapi-schema/union-optional-branch.err @@ -0,0 +1 @@ +tests/qapi-schema/union-optional-branch.json:2: Member of union 'Union' does not allow optional name '*a' diff --git a/tests/qapi-schema/union-optional-branch.exit b/tests/qapi-schema/union-optional-branch.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/union-optional-branch.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/union-optional-branch.json b/tests/qapi-schema/union-optional-branch.json new file mode 100644 index 0000000000..591615fc68 --- /dev/null +++ b/tests/qapi-schema/union-optional-branch.json @@ -0,0 +1,2 @@ +# union branches cannot be optional +{ 'union': 'Union', 'data': { '*a': 'int', 'b': 'str' } } diff --git a/tests/qapi-schema/union-optional-branch.out b/tests/qapi-schema/union-optional-branch.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/union-optional-branch.out diff --git a/tests/qapi-schema/union-unknown.err b/tests/qapi-schema/union-unknown.err new file mode 100644 index 0000000000..54fe456f9c --- /dev/null +++ b/tests/qapi-schema/union-unknown.err @@ -0,0 +1 @@ +tests/qapi-schema/union-unknown.json:2: Member 'unknown' of union 'Union' uses unknown type 'MissingType' diff --git a/tests/qapi-schema/union-unknown.exit b/tests/qapi-schema/union-unknown.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/union-unknown.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/union-unknown.json b/tests/qapi-schema/union-unknown.json new file mode 100644 index 0000000000..aa7e8143d8 --- /dev/null +++ b/tests/qapi-schema/union-unknown.json @@ -0,0 +1,3 @@ +# we reject a union with unknown type in branch +{ 'union': 'Union', + 'data': { 'unknown': 'MissingType' } } diff --git a/tests/qapi-schema/union-unknown.out b/tests/qapi-schema/union-unknown.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/union-unknown.out diff --git a/tests/qapi-schema/unknown-escape.err b/tests/qapi-schema/unknown-escape.err new file mode 100644 index 0000000000..000e30ddf3 --- /dev/null +++ b/tests/qapi-schema/unknown-escape.err @@ -0,0 +1 @@ +tests/qapi-schema/unknown-escape.json:3:21: Unknown escape \x diff --git a/tests/qapi-schema/unknown-escape.exit b/tests/qapi-schema/unknown-escape.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/unknown-escape.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/unknown-escape.json b/tests/qapi-schema/unknown-escape.json new file mode 100644 index 0000000000..8e6891e52a --- /dev/null +++ b/tests/qapi-schema/unknown-escape.json @@ -0,0 +1,3 @@ +# we only recognize JSON escape sequences, plus our \' extension (no \x) +# { 'command': 'foo', 'data': {} } +{ 'command': 'foo', 'dat\x61':{} } diff --git a/tests/qapi-schema/unknown-escape.out b/tests/qapi-schema/unknown-escape.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/unknown-escape.out diff --git a/tests/qapi-schema/unknown-expr-key.err b/tests/qapi-schema/unknown-expr-key.err new file mode 100644 index 0000000000..12f5ed5b43 --- /dev/null +++ b/tests/qapi-schema/unknown-expr-key.err @@ -0,0 +1 @@ +tests/qapi-schema/unknown-expr-key.json:2: Unknown key 'bogus' in struct 'bar' diff --git a/tests/qapi-schema/unknown-expr-key.exit b/tests/qapi-schema/unknown-expr-key.exit new file mode 100644 index 0000000000..d00491fd7e --- /dev/null +++ b/tests/qapi-schema/unknown-expr-key.exit @@ -0,0 +1 @@ +1 diff --git a/tests/qapi-schema/unknown-expr-key.json b/tests/qapi-schema/unknown-expr-key.json new file mode 100644 index 0000000000..3b2be00cc4 --- /dev/null +++ b/tests/qapi-schema/unknown-expr-key.json @@ -0,0 +1,2 @@ +# we reject an expression with unknown top-level keys +{ 'struct': 'bar', 'data': { 'string': 'str'}, 'bogus': { } } diff --git a/tests/qapi-schema/unknown-expr-key.out b/tests/qapi-schema/unknown-expr-key.out new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/qapi-schema/unknown-expr-key.out diff --git a/tests/qemu-iotests/122 b/tests/qemu-iotests/122 new file mode 100755 index 0000000000..350ca9c466 --- /dev/null +++ b/tests/qemu-iotests/122 @@ -0,0 +1,223 @@ +#!/bin/bash +# +# Test some qemu-img convert cases +# +# Copyright (C) 2015 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +# creator +owner=kwolf@redhat.com + +seq="$(basename $0)" +echo "QA output created by $seq" + +here="$PWD" +tmp=/tmp/$$ +status=1 # failure is the default! + +_cleanup() +{ + rm -f "$TEST_IMG".[123] + _cleanup_test_img +} +trap "_cleanup; exit \$status" 0 1 2 3 15 + +# get standard environment, filters and checks +. ./common.rc +. ./common.filter + +_supported_fmt qcow2 +_supported_proto file +_supported_os Linux + + +TEST_IMG="$TEST_IMG".base _make_test_img 64M +$QEMU_IO -c "write -P 0x11 0 64M" "$TEST_IMG".base 2>&1 | _filter_qemu_io | _filter_testdir + + +echo +echo "=== Check allocation status regression with -B ===" +echo + +_make_test_img -b "$TEST_IMG".base +$QEMU_IO -c "write -P 0x22 0 3M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IMG convert -O $IMGFMT -B "$TEST_IMG".base "$TEST_IMG" "$TEST_IMG".orig +$QEMU_IMG map "$TEST_IMG".orig | _filter_qemu_img_map + + +echo +echo "=== Check that zero clusters are kept in overlay ===" +echo + +_make_test_img -b "$TEST_IMG".base + +$QEMU_IO -c "write -P 0 0 3M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IMG convert -O $IMGFMT -B "$TEST_IMG".base "$TEST_IMG" "$TEST_IMG".orig +$QEMU_IO -c "read -P 0 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IMG convert -O $IMGFMT -c -B "$TEST_IMG".base "$TEST_IMG" "$TEST_IMG".orig +$QEMU_IO -c "read -P 0 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir + +$QEMU_IO -c "write -z 0 3M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IMG convert -O $IMGFMT -B "$TEST_IMG".base "$TEST_IMG" "$TEST_IMG".orig +$QEMU_IO -c "read -P 0 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IMG convert -O $IMGFMT -c -B "$TEST_IMG".base "$TEST_IMG" "$TEST_IMG".orig +$QEMU_IO -c "read -P 0 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir + + +echo +echo "=== Concatenate multiple source images ===" +echo + +TEST_IMG="$TEST_IMG".1 _make_test_img 4M +TEST_IMG="$TEST_IMG".2 _make_test_img 4M +TEST_IMG="$TEST_IMG".3 _make_test_img 4M + +$QEMU_IO -c "write -P 0x11 0 64k" "$TEST_IMG".1 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "write -P 0x22 0 64k" "$TEST_IMG".2 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "write -P 0x33 0 64k" "$TEST_IMG".3 2>&1 | _filter_qemu_io | _filter_testdir + +$QEMU_IMG convert -O $IMGFMT "$TEST_IMG".[123] "$TEST_IMG" +$QEMU_IMG map "$TEST_IMG" | _filter_qemu_img_map +$QEMU_IO -c "read -P 0x11 0 64k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0x22 4M 64k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0x33 8M 64k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir + +$QEMU_IMG convert -c -O $IMGFMT "$TEST_IMG".[123] "$TEST_IMG" +$QEMU_IMG map --output=json "$TEST_IMG" | _filter_qemu_img_map +$QEMU_IO -c "read -P 0x11 0 64k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0x22 4M 64k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0x33 8M 64k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir + +# -B can't be combined with concatenation +$QEMU_IMG convert -O $IMGFMT -B "$TEST_IMG".base "$TEST_IMG".[123] "$TEST_IMG" +$QEMU_IMG convert -O $IMGFMT -c -B "$TEST_IMG".base "$TEST_IMG".[123] "$TEST_IMG" + + +echo +echo "=== Compression with misaligned allocations and image sizes ===" +echo + +TEST_IMG="$TEST_IMG".1 _make_test_img 1023k -o cluster_size=1024 +TEST_IMG="$TEST_IMG".2 _make_test_img 1023k -o cluster_size=1024 + +$QEMU_IO -c "write -P 0x11 16k 16k" "$TEST_IMG".1 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "write -P 0x22 130k 130k" "$TEST_IMG".1 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "write -P 0x33 1022k 1k" "$TEST_IMG".1 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "write -P 0x44 0k 1k" "$TEST_IMG".2 2>&1 | _filter_qemu_io | _filter_testdir + +$QEMU_IMG convert -c -O $IMGFMT "$TEST_IMG".[12] "$TEST_IMG" +$QEMU_IMG map --output=json "$TEST_IMG" | _filter_qemu_img_map +$QEMU_IO -c "read -P 0 0k 16k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0x11 16k 16k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0 32k 98k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0x22 130k 130k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0 260k 762k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0x33 1022k 1k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0x44 1023k 1k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0 1024k 1022k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir + + +echo +echo "=== Full allocation with -S 0 ===" +echo + +# Standalone image +_make_test_img 64M +$QEMU_IO -c "write -P 0x22 0 3M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "write -P 0 3M 3M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir + +echo +echo convert -S 0: +$QEMU_IMG convert -O $IMGFMT -S 0 "$TEST_IMG" "$TEST_IMG".orig +$QEMU_IO -c "read -P 0x22 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0 3M 61M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IMG map --output=json "$TEST_IMG".orig | _filter_qemu_img_map + +echo +echo convert -c -S 0: +$QEMU_IMG convert -O $IMGFMT -c -S 0 "$TEST_IMG" "$TEST_IMG".orig +$QEMU_IO -c "read -P 0x22 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0 3M 61M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IMG map --output=json "$TEST_IMG".orig | _filter_qemu_img_map + +# With backing file +TEST_IMG="$TEST_IMG".base _make_test_img 64M +$QEMU_IO -c "write -P 0x11 0 32M" "$TEST_IMG".base 2>&1 | _filter_qemu_io | _filter_testdir + +_make_test_img -b "$TEST_IMG".base 64M +$QEMU_IO -c "write -P 0x22 0 3M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir + +echo +echo convert -S 0 with source backing file: +$QEMU_IMG convert -O $IMGFMT -S 0 "$TEST_IMG" "$TEST_IMG".orig +$QEMU_IO -c "read -P 0x22 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0x11 3M 29M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0 32M 32M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IMG map --output=json "$TEST_IMG".orig | _filter_qemu_img_map + +echo +echo convert -c -S 0 with source backing file: +$QEMU_IMG convert -O $IMGFMT -c -S 0 "$TEST_IMG" "$TEST_IMG".orig +$QEMU_IO -c "read -P 0x22 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0x11 3M 29M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0 32M 32M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IMG map --output=json "$TEST_IMG".orig | _filter_qemu_img_map + +# With keeping the backing file +echo +echo convert -S 0 -B ... +$QEMU_IMG convert -O $IMGFMT -S 0 "$TEST_IMG" "$TEST_IMG".orig +$QEMU_IO -c "read -P 0x22 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0x11 3M 29M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0 32M 32M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IMG map --output=json "$TEST_IMG".orig | _filter_qemu_img_map + +echo +echo convert -c -S 0 -B ... +$QEMU_IMG convert -O $IMGFMT -c -S 0 "$TEST_IMG" "$TEST_IMG".orig +$QEMU_IO -c "read -P 0x22 0 3M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0x11 3M 29M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "read -P 0 32M 32M" "$TEST_IMG".orig 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IMG map --output=json "$TEST_IMG".orig | _filter_qemu_img_map + + +echo +echo "=== Non-zero -S ===" +echo + +_make_test_img 64M -o cluster_size=1k +$QEMU_IO -c "write -P 0 0 64k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "write 0 1k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "write 8k 1k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir +$QEMU_IO -c "write 17k 1k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir + +for min_sparse in 4k 8k; do + echo + echo convert -S $min_sparse + $QEMU_IMG convert -O $IMGFMT -o cluster_size=1k -S $min_sparse "$TEST_IMG" "$TEST_IMG".orig + $QEMU_IMG map --output=json "$TEST_IMG".orig | _filter_qemu_img_map + + echo + echo convert -c -S $min_sparse + # For compressed images, -S values other than 0 are ignored + $QEMU_IMG convert -O $IMGFMT -o cluster_size=1k -c -S $min_sparse "$TEST_IMG" "$TEST_IMG".orig + $QEMU_IMG map --output=json "$TEST_IMG".orig | _filter_qemu_img_map +done + +# success, all done +echo '*** done' +rm -f $seq.full +status=0 diff --git a/tests/qemu-iotests/122.out b/tests/qemu-iotests/122.out new file mode 100644 index 0000000000..1f853b9e93 --- /dev/null +++ b/tests/qemu-iotests/122.out @@ -0,0 +1,209 @@ +QA output created by 122 +Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=67108864 +wrote 67108864/67108864 bytes at offset 0 +64 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +=== Check allocation status regression with -B === + +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 backing_file='TEST_DIR/t.IMGFMT.base' +wrote 3145728/3145728 bytes at offset 0 +3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +Offset Length File +0 0x300000 TEST_DIR/t.IMGFMT.orig +0x300000 0x3d00000 TEST_DIR/t.IMGFMT.base + +=== Check that zero clusters are kept in overlay === + +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 backing_file='TEST_DIR/t.IMGFMT.base' +wrote 3145728/3145728 bytes at offset 0 +3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 3145728/3145728 bytes at offset 0 +3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 3145728/3145728 bytes at offset 0 +3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 3145728/3145728 bytes at offset 0 +3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 3145728/3145728 bytes at offset 0 +3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 3145728/3145728 bytes at offset 0 +3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +=== Concatenate multiple source images === + +Formatting 'TEST_DIR/t.IMGFMT.1', fmt=IMGFMT size=4194304 +Formatting 'TEST_DIR/t.IMGFMT.2', fmt=IMGFMT size=4194304 +Formatting 'TEST_DIR/t.IMGFMT.3', fmt=IMGFMT size=4194304 +wrote 65536/65536 bytes at offset 0 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset 0 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 65536/65536 bytes at offset 0 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +Offset Length File +0 0x10000 TEST_DIR/t.IMGFMT +0x400000 0x10000 TEST_DIR/t.IMGFMT +0x800000 0x10000 TEST_DIR/t.IMGFMT +read 65536/65536 bytes at offset 0 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 65536/65536 bytes at offset 4194304 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 65536/65536 bytes at offset 8388608 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +[{ "start": 0, "length": 65536, "depth": 0, "zero": false, "data": true}, +{ "start": 65536, "length": 4128768, "depth": 0, "zero": true, "data": false}, +{ "start": 4194304, "length": 65536, "depth": 0, "zero": false, "data": true}, +{ "start": 4259840, "length": 4128768, "depth": 0, "zero": true, "data": false}, +{ "start": 8388608, "length": 65536, "depth": 0, "zero": false, "data": true}, +{ "start": 8454144, "length": 4128768, "depth": 0, "zero": true, "data": false}] +read 65536/65536 bytes at offset 0 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 65536/65536 bytes at offset 4194304 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 65536/65536 bytes at offset 8388608 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +qemu-img: -B makes no sense when concatenating multiple input images +qemu-img: -B makes no sense when concatenating multiple input images + +=== Compression with misaligned allocations and image sizes === + +Formatting 'TEST_DIR/t.IMGFMT.1', fmt=IMGFMT size=1047552 +Formatting 'TEST_DIR/t.IMGFMT.2', fmt=IMGFMT size=1047552 +wrote 16384/16384 bytes at offset 16384 +16 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 133120/133120 bytes at offset 133120 +130 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 1024/1024 bytes at offset 1046528 +1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 1024/1024 bytes at offset 0 +1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +[{ "start": 0, "length": 65536, "depth": 0, "zero": false, "data": true}, +{ "start": 65536, "length": 65536, "depth": 0, "zero": true, "data": false}, +{ "start": 131072, "length": 196608, "depth": 0, "zero": false, "data": true}, +{ "start": 327680, "length": 655360, "depth": 0, "zero": true, "data": false}, +{ "start": 983040, "length": 65536, "depth": 0, "zero": false, "data": true}, +{ "start": 1048576, "length": 1046528, "depth": 0, "zero": true, "data": false}] +read 16384/16384 bytes at offset 0 +16 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 16384/16384 bytes at offset 16384 +16 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 100352/100352 bytes at offset 32768 +98 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 133120/133120 bytes at offset 133120 +130 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 780288/780288 bytes at offset 266240 +762 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 1024/1024 bytes at offset 1046528 +1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 1024/1024 bytes at offset 1047552 +1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 1046528/1046528 bytes at offset 1048576 +1022 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +=== Full allocation with -S 0 === + +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 +wrote 3145728/3145728 bytes at offset 0 +3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 3145728/3145728 bytes at offset 3145728 +3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +convert -S 0: +read 3145728/3145728 bytes at offset 0 +3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 63963136/63963136 bytes at offset 3145728 +61 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +[{ "start": 0, "length": 6291456, "depth": 0, "zero": false, "data": true, "offset": 327680}, +{ "start": 6291456, "length": 60817408, "depth": 0, "zero": true, "data": false}] + +convert -c -S 0: +read 3145728/3145728 bytes at offset 0 +3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 63963136/63963136 bytes at offset 3145728 +61 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +[{ "start": 0, "length": 6291456, "depth": 0, "zero": false, "data": true}, +{ "start": 6291456, "length": 60817408, "depth": 0, "zero": true, "data": false}] +Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=67108864 +wrote 33554432/33554432 bytes at offset 0 +32 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 backing_file='TEST_DIR/t.IMGFMT.base' +wrote 3145728/3145728 bytes at offset 0 +3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +convert -S 0 with source backing file: +read 3145728/3145728 bytes at offset 0 +3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 30408704/30408704 bytes at offset 3145728 +29 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 33554432/33554432 bytes at offset 33554432 +32 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +[{ "start": 0, "length": 67108864, "depth": 0, "zero": false, "data": true, "offset": 327680}] + +convert -c -S 0 with source backing file: +read 3145728/3145728 bytes at offset 0 +3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 30408704/30408704 bytes at offset 3145728 +29 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 33554432/33554432 bytes at offset 33554432 +32 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +[{ "start": 0, "length": 67108864, "depth": 0, "zero": false, "data": true}] + +convert -S 0 -B ... +read 3145728/3145728 bytes at offset 0 +3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 30408704/30408704 bytes at offset 3145728 +29 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 33554432/33554432 bytes at offset 33554432 +32 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +[{ "start": 0, "length": 67108864, "depth": 0, "zero": false, "data": true, "offset": 327680}] + +convert -c -S 0 -B ... +read 3145728/3145728 bytes at offset 0 +3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 30408704/30408704 bytes at offset 3145728 +29 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 33554432/33554432 bytes at offset 33554432 +32 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +[{ "start": 0, "length": 67108864, "depth": 0, "zero": false, "data": true}] + +=== Non-zero -S === + +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 +wrote 65536/65536 bytes at offset 0 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 1024/1024 bytes at offset 0 +1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 1024/1024 bytes at offset 8192 +1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 1024/1024 bytes at offset 17408 +1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +convert -S 4k +[{ "start": 0, "length": 1024, "depth": 0, "zero": false, "data": true, "offset": 8192}, +{ "start": 1024, "length": 7168, "depth": 0, "zero": true, "data": false}, +{ "start": 8192, "length": 1024, "depth": 0, "zero": false, "data": true, "offset": 9216}, +{ "start": 9216, "length": 8192, "depth": 0, "zero": true, "data": false}, +{ "start": 17408, "length": 1024, "depth": 0, "zero": false, "data": true, "offset": 10240}, +{ "start": 18432, "length": 67090432, "depth": 0, "zero": true, "data": false}] + +convert -c -S 4k +[{ "start": 0, "length": 1024, "depth": 0, "zero": false, "data": true}, +{ "start": 1024, "length": 7168, "depth": 0, "zero": true, "data": false}, +{ "start": 8192, "length": 1024, "depth": 0, "zero": false, "data": true}, +{ "start": 9216, "length": 8192, "depth": 0, "zero": true, "data": false}, +{ "start": 17408, "length": 1024, "depth": 0, "zero": false, "data": true}, +{ "start": 18432, "length": 67090432, "depth": 0, "zero": true, "data": false}] + +convert -S 8k +[{ "start": 0, "length": 9216, "depth": 0, "zero": false, "data": true, "offset": 8192}, +{ "start": 9216, "length": 8192, "depth": 0, "zero": true, "data": false}, +{ "start": 17408, "length": 1024, "depth": 0, "zero": false, "data": true, "offset": 17408}, +{ "start": 18432, "length": 67090432, "depth": 0, "zero": true, "data": false}] + +convert -c -S 8k +[{ "start": 0, "length": 1024, "depth": 0, "zero": false, "data": true}, +{ "start": 1024, "length": 7168, "depth": 0, "zero": true, "data": false}, +{ "start": 8192, "length": 1024, "depth": 0, "zero": false, "data": true}, +{ "start": 9216, "length": 8192, "depth": 0, "zero": true, "data": false}, +{ "start": 17408, "length": 1024, "depth": 0, "zero": false, "data": true}, +{ "start": 18432, "length": 67090432, "depth": 0, "zero": true, "data": false}] +*** done diff --git a/tests/qemu-iotests/124 b/tests/qemu-iotests/124 new file mode 100644 index 0000000000..3ee78cd1f1 --- /dev/null +++ b/tests/qemu-iotests/124 @@ -0,0 +1,363 @@ +#!/usr/bin/env python +# +# Tests for incremental drive-backup +# +# Copyright (C) 2015 John Snow for Red Hat, Inc. +# +# Based on 056. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +import os +import iotests + + +def io_write_patterns(img, patterns): + for pattern in patterns: + iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img) + + +def try_remove(img): + try: + os.remove(img) + except OSError: + pass + + +class Bitmap: + def __init__(self, name, drive): + self.name = name + self.drive = drive + self.num = 0 + self.backups = list() + + def base_target(self): + return (self.drive['backup'], None) + + def new_target(self, num=None): + if num is None: + num = self.num + self.num = num + 1 + base = os.path.join(iotests.test_dir, + "%s.%s." % (self.drive['id'], self.name)) + suff = "%i.%s" % (num, self.drive['fmt']) + target = base + "inc" + suff + reference = base + "ref" + suff + self.backups.append((target, reference)) + return (target, reference) + + def last_target(self): + if self.backups: + return self.backups[-1] + return self.base_target() + + def del_target(self): + for image in self.backups.pop(): + try_remove(image) + self.num -= 1 + + def cleanup(self): + for backup in self.backups: + for image in backup: + try_remove(image) + + +class TestIncrementalBackup(iotests.QMPTestCase): + def setUp(self): + self.bitmaps = list() + self.files = list() + self.drives = list() + self.vm = iotests.VM() + self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt) + + # Create a base image with a distinctive patterning + drive0 = self.add_node('drive0') + self.img_create(drive0['file'], drive0['fmt']) + self.vm.add_drive(drive0['file']) + io_write_patterns(drive0['file'], (('0x41', 0, 512), + ('0xd5', '1M', '32k'), + ('0xdc', '32M', '124k'))) + self.vm.launch() + + + def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None): + if path is None: + path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt)) + if backup is None: + backup = os.path.join(iotests.test_dir, + '%s.full.backup.%s' % (node_id, fmt)) + + self.drives.append({ + 'id': node_id, + 'file': path, + 'backup': backup, + 'fmt': fmt }) + return self.drives[-1] + + + def img_create(self, img, fmt=iotests.imgfmt, size='64M', + parent=None, parentFormat=None): + if parent: + if parentFormat is None: + parentFormat = fmt + iotests.qemu_img('create', '-f', fmt, img, size, + '-b', parent, '-F', parentFormat) + else: + iotests.qemu_img('create', '-f', fmt, img, size) + self.files.append(img) + + + def do_qmp_backup(self, error='Input/output error', **kwargs): + res = self.vm.qmp('drive-backup', **kwargs) + self.assert_qmp(res, 'return', {}) + + event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED", + match={'data': {'device': kwargs['device']}}) + self.assertIsNotNone(event) + + try: + failure = self.dictpath(event, 'data/error') + except AssertionError: + # Backup succeeded. + self.assert_qmp(event, 'data/offset', event['data']['len']) + return True + else: + # Backup failed. + self.assert_qmp(event, 'data/error', error) + return False + + + def create_anchor_backup(self, drive=None): + if drive is None: + drive = self.drives[-1] + res = self.do_qmp_backup(device=drive['id'], sync='full', + format=drive['fmt'], target=drive['backup']) + self.assertTrue(res) + self.files.append(drive['backup']) + return drive['backup'] + + + def make_reference_backup(self, bitmap=None): + if bitmap is None: + bitmap = self.bitmaps[-1] + _, reference = bitmap.last_target() + res = self.do_qmp_backup(device=bitmap.drive['id'], sync='full', + format=bitmap.drive['fmt'], target=reference) + self.assertTrue(res) + + + def add_bitmap(self, name, drive, **kwargs): + bitmap = Bitmap(name, drive) + self.bitmaps.append(bitmap) + result = self.vm.qmp('block-dirty-bitmap-add', node=drive['id'], + name=bitmap.name, **kwargs) + self.assert_qmp(result, 'return', {}) + return bitmap + + + def prepare_backup(self, bitmap=None, parent=None): + if bitmap is None: + bitmap = self.bitmaps[-1] + if parent is None: + parent, _ = bitmap.last_target() + + target, _ = bitmap.new_target() + self.img_create(target, bitmap.drive['fmt'], parent=parent) + return target + + + def create_incremental(self, bitmap=None, parent=None, + parentFormat=None, validate=True): + if bitmap is None: + bitmap = self.bitmaps[-1] + if parent is None: + parent, _ = bitmap.last_target() + + target = self.prepare_backup(bitmap, parent) + res = self.do_qmp_backup(device=bitmap.drive['id'], + sync='dirty-bitmap', bitmap=bitmap.name, + format=bitmap.drive['fmt'], target=target, + mode='existing') + if not res: + bitmap.del_target(); + self.assertFalse(validate) + else: + self.make_reference_backup(bitmap) + return res + + + def check_backups(self): + for bitmap in self.bitmaps: + for incremental, reference in bitmap.backups: + self.assertTrue(iotests.compare_images(incremental, reference)) + last = bitmap.last_target()[0] + self.assertTrue(iotests.compare_images(last, bitmap.drive['file'])) + + + def hmp_io_writes(self, drive, patterns): + for pattern in patterns: + self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern) + self.vm.hmp_qemu_io(drive, 'flush') + + + def do_incremental_simple(self, **kwargs): + self.create_anchor_backup() + self.add_bitmap('bitmap0', self.drives[0], **kwargs) + + # Sanity: Create a "hollow" incremental backup + self.create_incremental() + # Three writes: One complete overwrite, one new segment, + # and one partial overlap. + self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512), + ('0xfe', '16M', '256k'), + ('0x64', '32736k', '64k'))) + self.create_incremental() + # Three more writes, one of each kind, like above + self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512), + ('0x55', '8M', '352k'), + ('0x78', '15872k', '1M'))) + self.create_incremental() + self.vm.shutdown() + self.check_backups() + + + def test_incremental_simple(self): + ''' + Test: Create and verify three incremental backups. + + Create a bitmap and a full backup before VM execution begins, + then create a series of three incremental backups "during execution," + i.e.; after IO requests begin modifying the drive. + ''' + return self.do_incremental_simple() + + + def test_small_granularity(self): + ''' + Test: Create and verify backups made with a small granularity bitmap. + + Perform the same test as test_incremental_simple, but with a granularity + of only 32KiB instead of the present default of 64KiB. + ''' + return self.do_incremental_simple(granularity=32768) + + + def test_large_granularity(self): + ''' + Test: Create and verify backups made with a large granularity bitmap. + + Perform the same test as test_incremental_simple, but with a granularity + of 128KiB instead of the present default of 64KiB. + ''' + return self.do_incremental_simple(granularity=131072) + + + def test_incremental_failure(self): + '''Test: Verify backups made after a failure are correct. + + Simulate a failure during an incremental backup block job, + emulate additional writes, then create another incremental backup + afterwards and verify that the backup created is correct. + ''' + + # Create a blkdebug interface to this img as 'drive1', + # but don't actually create a new image. + drive1 = self.add_node('drive1', self.drives[0]['fmt'], + path=self.drives[0]['file'], + backup=self.drives[0]['backup']) + result = self.vm.qmp('blockdev-add', options={ + 'id': drive1['id'], + 'driver': drive1['fmt'], + 'file': { + 'driver': 'blkdebug', + 'image': { + 'driver': 'file', + 'filename': drive1['file'] + }, + 'set-state': [{ + 'event': 'flush_to_disk', + 'state': 1, + 'new_state': 2 + }], + 'inject-error': [{ + 'event': 'read_aio', + 'errno': 5, + 'state': 2, + 'immediately': False, + 'once': True + }], + } + }) + self.assert_qmp(result, 'return', {}) + + self.create_anchor_backup(self.drives[0]) + self.add_bitmap('bitmap0', drive1) + # Note: at this point, during a normal execution, + # Assume that the VM resumes and begins issuing IO requests here. + + self.hmp_io_writes(drive1['id'], (('0xab', 0, 512), + ('0xfe', '16M', '256k'), + ('0x64', '32736k', '64k'))) + + result = self.create_incremental(validate=False) + self.assertFalse(result) + self.hmp_io_writes(drive1['id'], (('0x9a', 0, 512), + ('0x55', '8M', '352k'), + ('0x78', '15872k', '1M'))) + self.create_incremental() + self.vm.shutdown() + self.check_backups() + + + def test_sync_dirty_bitmap_missing(self): + self.assert_no_active_block_jobs() + self.files.append(self.err_img) + result = self.vm.qmp('drive-backup', device=self.drives[0]['id'], + sync='dirty-bitmap', format=self.drives[0]['fmt'], + target=self.err_img) + self.assert_qmp(result, 'error/class', 'GenericError') + + + def test_sync_dirty_bitmap_not_found(self): + self.assert_no_active_block_jobs() + self.files.append(self.err_img) + result = self.vm.qmp('drive-backup', device=self.drives[0]['id'], + sync='dirty-bitmap', bitmap='unknown', + format=self.drives[0]['fmt'], target=self.err_img) + self.assert_qmp(result, 'error/class', 'GenericError') + + + def test_sync_dirty_bitmap_bad_granularity(self): + ''' + Test: Test what happens if we provide an improper granularity. + + The granularity must always be a power of 2. + ''' + self.assert_no_active_block_jobs() + self.assertRaises(AssertionError, self.add_bitmap, + 'bitmap0', self.drives[0], + granularity=64000) + + + def tearDown(self): + self.vm.shutdown() + for bitmap in self.bitmaps: + bitmap.cleanup() + for filename in self.files: + try_remove(filename) + + +if __name__ == '__main__': + iotests.main(supported_fmts=['qcow2']) diff --git a/tests/qemu-iotests/124.out b/tests/qemu-iotests/124.out new file mode 100644 index 0000000000..2f7d3902f2 --- /dev/null +++ b/tests/qemu-iotests/124.out @@ -0,0 +1,5 @@ +....... +---------------------------------------------------------------------- +Ran 7 tests + +OK diff --git a/tests/qemu-iotests/129 b/tests/qemu-iotests/129 new file mode 100644 index 0000000000..9e87e1c8d9 --- /dev/null +++ b/tests/qemu-iotests/129 @@ -0,0 +1,86 @@ +#!/usr/bin/env python +# +# Tests that "bdrv_drain_all" doesn't drain block jobs +# +# Copyright (C) 2015 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +import os +import iotests +import time + +class TestStopWithBlockJob(iotests.QMPTestCase): + test_img = os.path.join(iotests.test_dir, 'test.img') + target_img = os.path.join(iotests.test_dir, 'target.img') + base_img = os.path.join(iotests.test_dir, 'base.img') + + def setUp(self): + iotests.qemu_img('create', '-f', iotests.imgfmt, self.base_img, "1G") + iotests.qemu_img('create', '-f', iotests.imgfmt, self.test_img, "-b", self.base_img) + iotests.qemu_io('-f', iotests.imgfmt, '-c', 'write -P0x5d 1M 128M', self.test_img) + self.vm = iotests.VM().add_drive(self.test_img) + self.vm.launch() + + def tearDown(self): + params = {"device": "drive0", + "bps": 0, + "bps_rd": 0, + "bps_wr": 0, + "iops": 0, + "iops_rd": 0, + "iops_wr": 0, + } + result = self.vm.qmp("block_set_io_throttle", conv_keys=False, + **params) + self.vm.shutdown() + + def do_test_stop(self, cmd, **args): + """Test 'stop' while block job is running on a throttled drive. + The 'stop' command shouldn't drain the job""" + params = {"device": "drive0", + "bps": 1024, + "bps_rd": 0, + "bps_wr": 0, + "iops": 0, + "iops_rd": 0, + "iops_wr": 0, + } + result = self.vm.qmp("block_set_io_throttle", conv_keys=False, + **params) + self.assert_qmp(result, 'return', {}) + result = self.vm.qmp(cmd, **args) + self.assert_qmp(result, 'return', {}) + result = self.vm.qmp("stop") + self.assert_qmp(result, 'return', {}) + result = self.vm.qmp("query-block-jobs") + self.assert_qmp(result, 'return[0]/busy', True) + self.assert_qmp(result, 'return[0]/ready', False) + + def test_drive_mirror(self): + self.do_test_stop("drive-mirror", device="drive0", + target=self.target_img, + sync="full") + + def test_drive_backup(self): + self.do_test_stop("drive-backup", device="drive0", + target=self.target_img, + sync="full") + + def test_block_commit(self): + self.do_test_stop("block-commit", device="drive0") + +if __name__ == '__main__': + iotests.main(supported_fmts=["qcow2"]) diff --git a/tests/qemu-iotests/129.out b/tests/qemu-iotests/129.out new file mode 100644 index 0000000000..8d7e996700 --- /dev/null +++ b/tests/qemu-iotests/129.out @@ -0,0 +1,5 @@ +... +---------------------------------------------------------------------- +Ran 3 tests + +OK diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group index bcf25786ab..6ca3466ec5 100644 --- a/tests/qemu-iotests/group +++ b/tests/qemu-iotests/group @@ -122,6 +122,9 @@ 115 rw auto 116 rw auto quick 121 rw auto +122 rw auto 123 rw auto quick +124 rw auto backing 128 rw auto quick +129 rw auto quick 130 rw auto quick diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py index 14028540b3..e93e62387b 100644 --- a/tests/qemu-iotests/iotests.py +++ b/tests/qemu-iotests/iotests.py @@ -78,6 +78,23 @@ def create_image(name, size): i = i + 512 file.close() +# Test if 'match' is a recursive subset of 'event' +def event_match(event, match=None): + if match is None: + return True + + for key in match: + if key in event: + if isinstance(event[key], dict): + if not event_match(event[key], match[key]): + return False + elif event[key] != match[key]: + return False + else: + return False + + return True + class VM(object): '''A QEMU VM''' @@ -92,6 +109,7 @@ class VM(object): '-machine', 'accel=qtest', '-display', 'none', '-vga', 'none'] self._num_drives = 0 + self._events = [] # This can be used to add an unused monitor instance. def add_monitor_telnet(self, ip, port): @@ -202,14 +220,34 @@ class VM(object): def get_qmp_event(self, wait=False): '''Poll for one queued QMP events and return it''' + if len(self._events) > 0: + return self._events.pop(0) return self._qmp.pull_event(wait=wait) def get_qmp_events(self, wait=False): '''Poll for queued QMP events and return a list of dicts''' events = self._qmp.get_events(wait=wait) + events.extend(self._events) + del self._events[:] self._qmp.clear_events() return events + def event_wait(self, name='BLOCK_JOB_COMPLETED', timeout=60.0, match=None): + # Search cached events + for event in self._events: + if (event['event'] == name) and event_match(event, match): + self._events.remove(event) + return event + + # Poll for new events + while True: + event = self._qmp.pull_event(wait=timeout) + if (event['event'] == name) and event_match(event, match): + return event + self._events.append(event) + + return None + index_re = re.compile(r'([^\[]+)\[([^\]]+)\]') class QMPTestCase(unittest.TestCase): diff --git a/tests/test-aio.c b/tests/test-aio.c index a7cb5c9915..4b0cb45d31 100644 --- a/tests/test-aio.c +++ b/tests/test-aio.c @@ -107,6 +107,7 @@ static void test_notify(void) typedef struct { QemuMutex start_lock; + EventNotifier notifier; bool thread_acquired; } AcquireTestData; @@ -118,6 +119,8 @@ static void *test_acquire_thread(void *opaque) qemu_mutex_lock(&data->start_lock); qemu_mutex_unlock(&data->start_lock); + g_usleep(500000); + event_notifier_set(&data->notifier); aio_context_acquire(ctx); aio_context_release(ctx); @@ -126,20 +129,19 @@ static void *test_acquire_thread(void *opaque) return NULL; } -static void dummy_notifier_read(EventNotifier *unused) +static void dummy_notifier_read(EventNotifier *n) { - g_assert(false); /* should never be invoked */ + event_notifier_test_and_clear(n); } static void test_acquire(void) { QemuThread thread; - EventNotifier notifier; AcquireTestData data; /* Dummy event notifier ensures aio_poll() will block */ - event_notifier_init(¬ifier, false); - aio_set_event_notifier(ctx, ¬ifier, dummy_notifier_read); + event_notifier_init(&data.notifier, false); + aio_set_event_notifier(ctx, &data.notifier, dummy_notifier_read); g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */ qemu_mutex_init(&data.start_lock); @@ -153,12 +155,13 @@ static void test_acquire(void) /* Block in aio_poll(), let other thread kick us and acquire context */ aio_context_acquire(ctx); qemu_mutex_unlock(&data.start_lock); /* let the thread run */ - g_assert(!aio_poll(ctx, true)); + g_assert(aio_poll(ctx, true)); + g_assert(!data.thread_acquired); aio_context_release(ctx); qemu_thread_join(&thread); - aio_set_event_notifier(ctx, ¬ifier, NULL); - event_notifier_cleanup(¬ifier); + aio_set_event_notifier(ctx, &data.notifier, NULL); + event_notifier_cleanup(&data.notifier); g_assert(data.thread_acquired); } diff --git a/tests/test-hbitmap.c b/tests/test-hbitmap.c index 8c902f2055..9f41b5fd2e 100644 --- a/tests/test-hbitmap.c +++ b/tests/test-hbitmap.c @@ -11,6 +11,8 @@ #include <glib.h> #include <stdarg.h> +#include <string.h> +#include <sys/types.h> #include "qemu/hbitmap.h" #define LOG_BITS_PER_LONG (BITS_PER_LONG == 32 ? 5 : 6) @@ -23,6 +25,7 @@ typedef struct TestHBitmapData { HBitmap *hb; unsigned long *bits; size_t size; + size_t old_size; int granularity; } TestHBitmapData; @@ -91,6 +94,44 @@ static void hbitmap_test_init(TestHBitmapData *data, } } +static inline size_t hbitmap_test_array_size(size_t bits) +{ + size_t n = (bits + BITS_PER_LONG - 1) / BITS_PER_LONG; + return n ? n : 1; +} + +static void hbitmap_test_truncate_impl(TestHBitmapData *data, + size_t size) +{ + size_t n; + size_t m; + data->old_size = data->size; + data->size = size; + + if (data->size == data->old_size) { + return; + } + + n = hbitmap_test_array_size(size); + m = hbitmap_test_array_size(data->old_size); + data->bits = g_realloc(data->bits, sizeof(unsigned long) * n); + if (n > m) { + memset(&data->bits[m], 0x00, sizeof(unsigned long) * (n - m)); + } + + /* If we shrink to an uneven multiple of sizeof(unsigned long), + * scrub the leftover memory. */ + if (data->size < data->old_size) { + m = size % (sizeof(unsigned long) * 8); + if (m) { + unsigned long mask = (1ULL << m) - 1; + data->bits[n-1] &= mask; + } + } + + hbitmap_truncate(data->hb, size); +} + static void hbitmap_test_teardown(TestHBitmapData *data, const void *unused) { @@ -369,6 +410,198 @@ static void test_hbitmap_iter_granularity(TestHBitmapData *data, g_assert_cmpint(hbitmap_iter_next(&hbi), <, 0); } +static void hbitmap_test_set_boundary_bits(TestHBitmapData *data, ssize_t diff) +{ + size_t size = data->size; + + /* First bit */ + hbitmap_test_set(data, 0, 1); + if (diff < 0) { + /* Last bit in new, shortened map */ + hbitmap_test_set(data, size + diff - 1, 1); + + /* First bit to be truncated away */ + hbitmap_test_set(data, size + diff, 1); + } + /* Last bit */ + hbitmap_test_set(data, size - 1, 1); + if (data->granularity == 0) { + hbitmap_test_check_get(data); + } +} + +static void hbitmap_test_check_boundary_bits(TestHBitmapData *data) +{ + size_t size = MIN(data->size, data->old_size); + + if (data->granularity == 0) { + hbitmap_test_check_get(data); + hbitmap_test_check(data, 0); + } else { + /* If a granularity was set, note that every distinct + * (bit >> granularity) value that was set will increase + * the bit pop count by 2^granularity, not just 1. + * + * The hbitmap_test_check facility does not currently tolerate + * non-zero granularities, so test the boundaries and the population + * count manually. + */ + g_assert(hbitmap_get(data->hb, 0)); + g_assert(hbitmap_get(data->hb, size - 1)); + g_assert_cmpint(2 << data->granularity, ==, hbitmap_count(data->hb)); + } +} + +/* Generic truncate test. */ +static void hbitmap_test_truncate(TestHBitmapData *data, + size_t size, + ssize_t diff, + int granularity) +{ + hbitmap_test_init(data, size, granularity); + hbitmap_test_set_boundary_bits(data, diff); + hbitmap_test_truncate_impl(data, size + diff); + hbitmap_test_check_boundary_bits(data); +} + +static void test_hbitmap_truncate_nop(TestHBitmapData *data, + const void *unused) +{ + hbitmap_test_truncate(data, L2, 0, 0); +} + +/** + * Grow by an amount smaller than the granularity, without crossing + * a granularity alignment boundary. Effectively a NOP. + */ +static void test_hbitmap_truncate_grow_negligible(TestHBitmapData *data, + const void *unused) +{ + size_t size = L2 - 1; + size_t diff = 1; + int granularity = 1; + + hbitmap_test_truncate(data, size, diff, granularity); +} + +/** + * Shrink by an amount smaller than the granularity, without crossing + * a granularity alignment boundary. Effectively a NOP. + */ +static void test_hbitmap_truncate_shrink_negligible(TestHBitmapData *data, + const void *unused) +{ + size_t size = L2; + ssize_t diff = -1; + int granularity = 1; + + hbitmap_test_truncate(data, size, diff, granularity); +} + +/** + * Grow by an amount smaller than the granularity, but crossing over + * a granularity alignment boundary. + */ +static void test_hbitmap_truncate_grow_tiny(TestHBitmapData *data, + const void *unused) +{ + size_t size = L2 - 2; + ssize_t diff = 1; + int granularity = 1; + + hbitmap_test_truncate(data, size, diff, granularity); +} + +/** + * Shrink by an amount smaller than the granularity, but crossing over + * a granularity alignment boundary. + */ +static void test_hbitmap_truncate_shrink_tiny(TestHBitmapData *data, + const void *unused) +{ + size_t size = L2 - 1; + ssize_t diff = -1; + int granularity = 1; + + hbitmap_test_truncate(data, size, diff, granularity); +} + +/** + * Grow by an amount smaller than sizeof(long), and not crossing over + * a sizeof(long) alignment boundary. + */ +static void test_hbitmap_truncate_grow_small(TestHBitmapData *data, + const void *unused) +{ + size_t size = L2 + 1; + size_t diff = sizeof(long) / 2; + + hbitmap_test_truncate(data, size, diff, 0); +} + +/** + * Shrink by an amount smaller than sizeof(long), and not crossing over + * a sizeof(long) alignment boundary. + */ +static void test_hbitmap_truncate_shrink_small(TestHBitmapData *data, + const void *unused) +{ + size_t size = L2; + size_t diff = sizeof(long) / 2; + + hbitmap_test_truncate(data, size, -diff, 0); +} + +/** + * Grow by an amount smaller than sizeof(long), while crossing over + * a sizeof(long) alignment boundary. + */ +static void test_hbitmap_truncate_grow_medium(TestHBitmapData *data, + const void *unused) +{ + size_t size = L2 - 1; + size_t diff = sizeof(long) / 2; + + hbitmap_test_truncate(data, size, diff, 0); +} + +/** + * Shrink by an amount smaller than sizeof(long), while crossing over + * a sizeof(long) alignment boundary. + */ +static void test_hbitmap_truncate_shrink_medium(TestHBitmapData *data, + const void *unused) +{ + size_t size = L2 + 1; + size_t diff = sizeof(long) / 2; + + hbitmap_test_truncate(data, size, -diff, 0); +} + +/** + * Grow by an amount larger than sizeof(long). + */ +static void test_hbitmap_truncate_grow_large(TestHBitmapData *data, + const void *unused) +{ + size_t size = L2; + size_t diff = 8 * sizeof(long); + + hbitmap_test_truncate(data, size, diff, 0); +} + +/** + * Shrink by an amount larger than sizeof(long). + */ +static void test_hbitmap_truncate_shrink_large(TestHBitmapData *data, + const void *unused) +{ + size_t size = L2; + size_t diff = 8 * sizeof(long); + + hbitmap_test_truncate(data, size, -diff, 0); +} + static void hbitmap_test_add(const char *testpath, void (*test_func)(TestHBitmapData *data, const void *user_data)) { @@ -395,6 +628,28 @@ int main(int argc, char **argv) hbitmap_test_add("/hbitmap/reset/empty", test_hbitmap_reset_empty); hbitmap_test_add("/hbitmap/reset/general", test_hbitmap_reset); hbitmap_test_add("/hbitmap/granularity", test_hbitmap_granularity); + + hbitmap_test_add("/hbitmap/truncate/nop", test_hbitmap_truncate_nop); + hbitmap_test_add("/hbitmap/truncate/grow/negligible", + test_hbitmap_truncate_grow_negligible); + hbitmap_test_add("/hbitmap/truncate/shrink/negligible", + test_hbitmap_truncate_shrink_negligible); + hbitmap_test_add("/hbitmap/truncate/grow/tiny", + test_hbitmap_truncate_grow_tiny); + hbitmap_test_add("/hbitmap/truncate/shrink/tiny", + test_hbitmap_truncate_shrink_tiny); + hbitmap_test_add("/hbitmap/truncate/grow/small", + test_hbitmap_truncate_grow_small); + hbitmap_test_add("/hbitmap/truncate/shrink/small", + test_hbitmap_truncate_shrink_small); + hbitmap_test_add("/hbitmap/truncate/grow/medium", + test_hbitmap_truncate_grow_medium); + hbitmap_test_add("/hbitmap/truncate/shrink/medium", + test_hbitmap_truncate_shrink_medium); + hbitmap_test_add("/hbitmap/truncate/grow/large", + test_hbitmap_truncate_grow_large); + hbitmap_test_add("/hbitmap/truncate/shrink/large", + test_hbitmap_truncate_shrink_large); g_test_run(); return 0; diff --git a/tests/test-qmp-commands.c b/tests/test-qmp-commands.c index 554e222b32..ad2e4030b2 100644 --- a/tests/test-qmp-commands.c +++ b/tests/test-qmp-commands.c @@ -31,14 +31,17 @@ UserDefTwo *qmp_user_def_cmd2(UserDefOne *ud1a, ud1d->base = g_new0(UserDefZero, 1); ud1d->base->integer = has_udb1 ? ud1b->base->integer : 0; - ret = g_malloc0(sizeof(UserDefTwo)); - ret->string = strdup("blah1"); - ret->dict.string = strdup("blah2"); - ret->dict.dict.userdef = ud1c; - ret->dict.dict.string = strdup("blah3"); - ret->dict.has_dict2 = true; - ret->dict.dict2.userdef = ud1d; - ret->dict.dict2.string = strdup("blah4"); + ret = g_new0(UserDefTwo, 1); + ret->string0 = strdup("blah1"); + ret->dict1 = g_new0(UserDefTwoDict, 1); + ret->dict1->string1 = strdup("blah2"); + ret->dict1->dict2 = g_new0(UserDefTwoDictDict, 1); + ret->dict1->dict2->userdef = ud1c; + ret->dict1->dict2->string = strdup("blah3"); + ret->dict1->dict3 = g_new0(UserDefTwoDictDict, 1); + ret->dict1->has_dict3 = true; + ret->dict1->dict3->userdef = ud1d; + ret->dict1->dict3->string = strdup("blah4"); return ret; } @@ -120,15 +123,15 @@ static void test_dispatch_cmd_io(void) ret = qobject_to_qdict(test_qmp_dispatch(req)); - assert(!strcmp(qdict_get_str(ret, "string"), "blah1")); - ret_dict = qdict_get_qdict(ret, "dict"); - assert(!strcmp(qdict_get_str(ret_dict, "string"), "blah2")); - ret_dict_dict = qdict_get_qdict(ret_dict, "dict"); + assert(!strcmp(qdict_get_str(ret, "string0"), "blah1")); + ret_dict = qdict_get_qdict(ret, "dict1"); + assert(!strcmp(qdict_get_str(ret_dict, "string1"), "blah2")); + ret_dict_dict = qdict_get_qdict(ret_dict, "dict2"); ret_dict_dict_userdef = qdict_get_qdict(ret_dict_dict, "userdef"); assert(qdict_get_int(ret_dict_dict_userdef, "integer") == 42); assert(!strcmp(qdict_get_str(ret_dict_dict_userdef, "string"), "hello")); assert(!strcmp(qdict_get_str(ret_dict_dict, "string"), "blah3")); - ret_dict_dict2 = qdict_get_qdict(ret_dict, "dict2"); + ret_dict_dict2 = qdict_get_qdict(ret_dict, "dict3"); ret_dict_dict2_userdef = qdict_get_qdict(ret_dict_dict2, "userdef"); assert(qdict_get_int(ret_dict_dict2_userdef, "integer") == 422); assert(!strcmp(qdict_get_str(ret_dict_dict2_userdef, "string"), "hello2")); @@ -192,7 +195,7 @@ static void test_dealloc_partial(void) QmpInputVisitor *qiv; ud2_dict = qdict_new(); - qdict_put_obj(ud2_dict, "string", QOBJECT(qstring_from_str(text))); + qdict_put_obj(ud2_dict, "string0", QOBJECT(qstring_from_str(text))); qiv = qmp_input_visitor_new(QOBJECT(ud2_dict)); visit_type_UserDefTwo(qmp_input_get_visitor(qiv), &ud2, NULL, &err); @@ -202,9 +205,9 @@ static void test_dealloc_partial(void) /* verify partial success */ assert(ud2 != NULL); - assert(ud2->string != NULL); - assert(strcmp(ud2->string, text) == 0); - assert(ud2->dict.dict.userdef == NULL); + assert(ud2->string0 != NULL); + assert(strcmp(ud2->string0, text) == 0); + assert(ud2->dict1 == NULL); /* confirm & release construction error */ assert(err != NULL); diff --git a/tests/test-qmp-input-strict.c b/tests/test-qmp-input-strict.c index d5360c6a87..68f855bdf3 100644 --- a/tests/test-qmp-input-strict.c +++ b/tests/test-qmp-input-strict.c @@ -1,7 +1,7 @@ /* * QMP Input Visitor unit-tests (strict mode). * - * Copyright (C) 2011-2012 Red Hat Inc. + * Copyright (C) 2011-2012, 2015 Red Hat Inc. * * Authors: * Luiz Capitulino <lcapitulino@redhat.com> @@ -116,15 +116,18 @@ static void test_validate_struct(TestInputVisitorData *data, static void test_validate_struct_nested(TestInputVisitorData *data, const void *unused) { - UserDefNested *udp = NULL; + UserDefTwo *udp = NULL; Error *err = NULL; Visitor *v; - v = validate_test_init(data, "{ 'string0': 'string0', 'dict1': { 'string1': 'string1', 'dict2': { 'userdef1': { 'integer': 42, 'string': 'string' }, 'string2': 'string2'}}}"); + v = validate_test_init(data, "{ 'string0': 'string0', " + "'dict1': { 'string1': 'string1', " + "'dict2': { 'userdef': { 'integer': 42, " + "'string': 'string' }, 'string': 'string2'}}}"); - visit_type_UserDefNested(v, &udp, NULL, &err); + visit_type_UserDefTwo(v, &udp, NULL, &err); g_assert(!err); - qapi_free_UserDefNested(udp); + qapi_free_UserDefTwo(udp); } static void test_validate_list(TestInputVisitorData *data, @@ -141,18 +144,18 @@ static void test_validate_list(TestInputVisitorData *data, qapi_free_UserDefOneList(head); } -static void test_validate_union(TestInputVisitorData *data, - const void *unused) +static void test_validate_union_native_list(TestInputVisitorData *data, + const void *unused) { - UserDefUnion *tmp = NULL; + UserDefNativeListUnion *tmp = NULL; Visitor *v; Error *err = NULL; - v = validate_test_init(data, "{ 'type': 'b', 'integer': 41, 'data' : { 'integer': 42 } }"); + v = validate_test_init(data, "{ 'type': 'integer', 'data' : [ 1, 2 ] }"); - visit_type_UserDefUnion(v, &tmp, NULL, &err); + visit_type_UserDefNativeListUnion(v, &tmp, NULL, &err); g_assert(!err); - qapi_free_UserDefUnion(tmp); + qapi_free_UserDefNativeListUnion(tmp); } static void test_validate_union_flat(TestInputVisitorData *data, @@ -173,18 +176,18 @@ static void test_validate_union_flat(TestInputVisitorData *data, qapi_free_UserDefFlatUnion(tmp); } -static void test_validate_union_anon(TestInputVisitorData *data, - const void *unused) +static void test_validate_alternate(TestInputVisitorData *data, + const void *unused) { - UserDefAnonUnion *tmp = NULL; + UserDefAlternate *tmp = NULL; Visitor *v; Error *err = NULL; v = validate_test_init(data, "42"); - visit_type_UserDefAnonUnion(v, &tmp, NULL, &err); + visit_type_UserDefAlternate(v, &tmp, NULL, &err); g_assert(!err); - qapi_free_UserDefAnonUnion(tmp); + qapi_free_UserDefAlternate(tmp); } static void test_validate_fail_struct(TestInputVisitorData *data, @@ -207,15 +210,15 @@ static void test_validate_fail_struct(TestInputVisitorData *data, static void test_validate_fail_struct_nested(TestInputVisitorData *data, const void *unused) { - UserDefNested *udp = NULL; + UserDefTwo *udp = NULL; Error *err = NULL; Visitor *v; v = validate_test_init(data, "{ 'string0': 'string0', 'dict1': { 'string1': 'string1', 'dict2': { 'userdef1': { 'integer': 42, 'string': 'string', 'extra': [42, 23, {'foo':'bar'}] }, 'string2': 'string2'}}}"); - visit_type_UserDefNested(v, &udp, NULL, &err); + visit_type_UserDefTwo(v, &udp, NULL, &err); g_assert(err); - qapi_free_UserDefNested(udp); + qapi_free_UserDefTwo(udp); } static void test_validate_fail_list(TestInputVisitorData *data, @@ -232,18 +235,19 @@ static void test_validate_fail_list(TestInputVisitorData *data, qapi_free_UserDefOneList(head); } -static void test_validate_fail_union(TestInputVisitorData *data, - const void *unused) +static void test_validate_fail_union_native_list(TestInputVisitorData *data, + const void *unused) { - UserDefUnion *tmp = NULL; + UserDefNativeListUnion *tmp = NULL; Error *err = NULL; Visitor *v; - v = validate_test_init(data, "{ 'type': 'b', 'data' : { 'integer': 42 } }"); + v = validate_test_init(data, + "{ 'type': 'integer', 'data' : [ 'string' ] }"); - visit_type_UserDefUnion(v, &tmp, NULL, &err); + visit_type_UserDefNativeListUnion(v, &tmp, NULL, &err); g_assert(err); - qapi_free_UserDefUnion(tmp); + qapi_free_UserDefNativeListUnion(tmp); } static void test_validate_fail_union_flat(TestInputVisitorData *data, @@ -275,18 +279,18 @@ static void test_validate_fail_union_flat_no_discrim(TestInputVisitorData *data, qapi_free_UserDefFlatUnion2(tmp); } -static void test_validate_fail_union_anon(TestInputVisitorData *data, - const void *unused) +static void test_validate_fail_alternate(TestInputVisitorData *data, + const void *unused) { - UserDefAnonUnion *tmp = NULL; + UserDefAlternate *tmp = NULL; Visitor *v; Error *err = NULL; v = validate_test_init(data, "3.14"); - visit_type_UserDefAnonUnion(v, &tmp, NULL, &err); + visit_type_UserDefAlternate(v, &tmp, NULL, &err); g_assert(err); - qapi_free_UserDefAnonUnion(tmp); + qapi_free_UserDefAlternate(tmp); } static void validate_test_add(const char *testpath, @@ -304,31 +308,31 @@ int main(int argc, char **argv) g_test_init(&argc, &argv, NULL); validate_test_add("/visitor/input-strict/pass/struct", - &testdata, test_validate_struct); + &testdata, test_validate_struct); validate_test_add("/visitor/input-strict/pass/struct-nested", - &testdata, test_validate_struct_nested); + &testdata, test_validate_struct_nested); validate_test_add("/visitor/input-strict/pass/list", - &testdata, test_validate_list); - validate_test_add("/visitor/input-strict/pass/union", - &testdata, test_validate_union); + &testdata, test_validate_list); validate_test_add("/visitor/input-strict/pass/union-flat", - &testdata, test_validate_union_flat); - validate_test_add("/visitor/input-strict/pass/union-anon", - &testdata, test_validate_union_anon); + &testdata, test_validate_union_flat); + validate_test_add("/visitor/input-strict/pass/alternate", + &testdata, test_validate_alternate); + validate_test_add("/visitor/input-strict/pass/union-native-list", + &testdata, test_validate_union_native_list); validate_test_add("/visitor/input-strict/fail/struct", - &testdata, test_validate_fail_struct); + &testdata, test_validate_fail_struct); validate_test_add("/visitor/input-strict/fail/struct-nested", - &testdata, test_validate_fail_struct_nested); + &testdata, test_validate_fail_struct_nested); validate_test_add("/visitor/input-strict/fail/list", - &testdata, test_validate_fail_list); - validate_test_add("/visitor/input-strict/fail/union", - &testdata, test_validate_fail_union); + &testdata, test_validate_fail_list); validate_test_add("/visitor/input-strict/fail/union-flat", - &testdata, test_validate_fail_union_flat); + &testdata, test_validate_fail_union_flat); validate_test_add("/visitor/input-strict/fail/union-flat-no-discriminator", - &testdata, test_validate_fail_union_flat_no_discrim); - validate_test_add("/visitor/input-strict/fail/union-anon", - &testdata, test_validate_fail_union_anon); + &testdata, test_validate_fail_union_flat_no_discrim); + validate_test_add("/visitor/input-strict/fail/alternate", + &testdata, test_validate_fail_alternate); + validate_test_add("/visitor/input-strict/fail/union-native-list", + &testdata, test_validate_fail_union_native_list); g_test_run(); diff --git a/tests/test-qmp-input-visitor.c b/tests/test-qmp-input-visitor.c index 1c8e87295c..b96195309b 100644 --- a/tests/test-qmp-input-visitor.c +++ b/tests/test-qmp-input-visitor.c @@ -1,7 +1,7 @@ /* * QMP Input Visitor unit-tests. * - * Copyright (C) 2011 Red Hat Inc. + * Copyright (C) 2011, 2015 Red Hat Inc. * * Authors: * Luiz Capitulino <lcapitulino@redhat.com> @@ -248,23 +248,28 @@ static void check_and_free_str(char *str, const char *cmp) static void test_visitor_in_struct_nested(TestInputVisitorData *data, const void *unused) { - UserDefNested *udp = NULL; + UserDefTwo *udp = NULL; Error *err = NULL; Visitor *v; - v = visitor_input_test_init(data, "{ 'string0': 'string0', 'dict1': { 'string1': 'string1', 'dict2': { 'userdef1': { 'integer': 42, 'string': 'string' }, 'string2': 'string2'}}}"); + v = visitor_input_test_init(data, "{ 'string0': 'string0', " + "'dict1': { 'string1': 'string1', " + "'dict2': { 'userdef': { 'integer': 42, " + "'string': 'string' }, 'string': 'string2'}}}"); - visit_type_UserDefNested(v, &udp, NULL, &err); + visit_type_UserDefTwo(v, &udp, NULL, &err); g_assert(!err); check_and_free_str(udp->string0, "string0"); - check_and_free_str(udp->dict1.string1, "string1"); - g_assert_cmpint(udp->dict1.dict2.userdef1->base->integer, ==, 42); - check_and_free_str(udp->dict1.dict2.userdef1->string, "string"); - check_and_free_str(udp->dict1.dict2.string2, "string2"); - g_assert(udp->dict1.has_dict3 == false); - - g_free(udp->dict1.dict2.userdef1); + check_and_free_str(udp->dict1->string1, "string1"); + g_assert_cmpint(udp->dict1->dict2->userdef->base->integer, ==, 42); + check_and_free_str(udp->dict1->dict2->userdef->string, "string"); + check_and_free_str(udp->dict1->dict2->string, "string2"); + g_assert(udp->dict1->has_dict3 == false); + + g_free(udp->dict1->dict2->userdef); + g_free(udp->dict1->dict2); + g_free(udp->dict1); g_free(udp); } @@ -293,23 +298,6 @@ static void test_visitor_in_list(TestInputVisitorData *data, qapi_free_UserDefOneList(head); } -static void test_visitor_in_union(TestInputVisitorData *data, - const void *unused) -{ - Visitor *v; - Error *err = NULL; - UserDefUnion *tmp; - - v = visitor_input_test_init(data, "{ 'type': 'b', 'integer': 41, 'data' : { 'integer': 42 } }"); - - visit_type_UserDefUnion(v, &tmp, NULL, &err); - g_assert(err == NULL); - g_assert_cmpint(tmp->kind, ==, USER_DEF_UNION_KIND_B); - g_assert_cmpint(tmp->integer, ==, 41); - g_assert_cmpint(tmp->b->integer, ==, 42); - qapi_free_UserDefUnion(tmp); -} - static void test_visitor_in_union_flat(TestInputVisitorData *data, const void *unused) { @@ -332,20 +320,20 @@ static void test_visitor_in_union_flat(TestInputVisitorData *data, qapi_free_UserDefFlatUnion(tmp); } -static void test_visitor_in_union_anon(TestInputVisitorData *data, - const void *unused) +static void test_visitor_in_alternate(TestInputVisitorData *data, + const void *unused) { Visitor *v; Error *err = NULL; - UserDefAnonUnion *tmp; + UserDefAlternate *tmp; v = visitor_input_test_init(data, "42"); - visit_type_UserDefAnonUnion(v, &tmp, NULL, &err); + visit_type_UserDefAlternate(v, &tmp, NULL, &err); g_assert(err == NULL); - g_assert_cmpint(tmp->kind, ==, USER_DEF_ANON_UNION_KIND_I); + g_assert_cmpint(tmp->kind, ==, USER_DEF_ALTERNATE_KIND_I); g_assert_cmpint(tmp->i, ==, 42); - qapi_free_UserDefAnonUnion(tmp); + qapi_free_UserDefAlternate(tmp); } static void test_native_list_integer_helper(TestInputVisitorData *data, @@ -670,55 +658,56 @@ int main(int argc, char **argv) input_visitor_test_add("/visitor/input/number", &in_visitor_data, test_visitor_in_number); input_visitor_test_add("/visitor/input/string", - &in_visitor_data, test_visitor_in_string); + &in_visitor_data, test_visitor_in_string); input_visitor_test_add("/visitor/input/enum", - &in_visitor_data, test_visitor_in_enum); + &in_visitor_data, test_visitor_in_enum); input_visitor_test_add("/visitor/input/struct", - &in_visitor_data, test_visitor_in_struct); + &in_visitor_data, test_visitor_in_struct); input_visitor_test_add("/visitor/input/struct-nested", - &in_visitor_data, test_visitor_in_struct_nested); + &in_visitor_data, test_visitor_in_struct_nested); input_visitor_test_add("/visitor/input/list", - &in_visitor_data, test_visitor_in_list); - input_visitor_test_add("/visitor/input/union", - &in_visitor_data, test_visitor_in_union); + &in_visitor_data, test_visitor_in_list); input_visitor_test_add("/visitor/input/union-flat", - &in_visitor_data, test_visitor_in_union_flat); - input_visitor_test_add("/visitor/input/union-anon", - &in_visitor_data, test_visitor_in_union_anon); + &in_visitor_data, test_visitor_in_union_flat); + input_visitor_test_add("/visitor/input/alternate", + &in_visitor_data, test_visitor_in_alternate); input_visitor_test_add("/visitor/input/errors", - &in_visitor_data, test_visitor_in_errors); + &in_visitor_data, test_visitor_in_errors); input_visitor_test_add("/visitor/input/native_list/int", - &in_visitor_data, - test_visitor_in_native_list_int); + &in_visitor_data, + test_visitor_in_native_list_int); input_visitor_test_add("/visitor/input/native_list/int8", - &in_visitor_data, - test_visitor_in_native_list_int8); + &in_visitor_data, + test_visitor_in_native_list_int8); input_visitor_test_add("/visitor/input/native_list/int16", - &in_visitor_data, - test_visitor_in_native_list_int16); + &in_visitor_data, + test_visitor_in_native_list_int16); input_visitor_test_add("/visitor/input/native_list/int32", - &in_visitor_data, - test_visitor_in_native_list_int32); + &in_visitor_data, + test_visitor_in_native_list_int32); input_visitor_test_add("/visitor/input/native_list/int64", - &in_visitor_data, - test_visitor_in_native_list_int64); + &in_visitor_data, + test_visitor_in_native_list_int64); input_visitor_test_add("/visitor/input/native_list/uint8", - &in_visitor_data, - test_visitor_in_native_list_uint8); + &in_visitor_data, + test_visitor_in_native_list_uint8); input_visitor_test_add("/visitor/input/native_list/uint16", - &in_visitor_data, - test_visitor_in_native_list_uint16); + &in_visitor_data, + test_visitor_in_native_list_uint16); input_visitor_test_add("/visitor/input/native_list/uint32", - &in_visitor_data, - test_visitor_in_native_list_uint32); + &in_visitor_data, + test_visitor_in_native_list_uint32); input_visitor_test_add("/visitor/input/native_list/uint64", - &in_visitor_data, test_visitor_in_native_list_uint64); + &in_visitor_data, + test_visitor_in_native_list_uint64); input_visitor_test_add("/visitor/input/native_list/bool", - &in_visitor_data, test_visitor_in_native_list_bool); + &in_visitor_data, test_visitor_in_native_list_bool); input_visitor_test_add("/visitor/input/native_list/str", - &in_visitor_data, test_visitor_in_native_list_string); + &in_visitor_data, + test_visitor_in_native_list_string); input_visitor_test_add("/visitor/input/native_list/number", - &in_visitor_data, test_visitor_in_native_list_number); + &in_visitor_data, + test_visitor_in_native_list_number); g_test_run(); diff --git a/tests/test-qmp-output-visitor.c b/tests/test-qmp-output-visitor.c index 74020de5e7..f8c9367e48 100644 --- a/tests/test-qmp-output-visitor.c +++ b/tests/test-qmp-output-visitor.c @@ -1,7 +1,7 @@ /* * QMP Output Visitor unit-tests. * - * Copyright (C) 2011 Red Hat Inc. + * Copyright (C) 2011, 2015 Red Hat Inc. * * Authors: * Luiz Capitulino <lcapitulino@redhat.com> @@ -234,7 +234,7 @@ static void test_visitor_out_struct_nested(TestOutputVisitorData *data, { int64_t value = 42; Error *err = NULL; - UserDefNested *ud2; + UserDefTwo *ud2; QObject *obj; QDict *qdict, *dict1, *dict2, *dict3, *userdef; const char *string = "user def string"; @@ -244,21 +244,25 @@ static void test_visitor_out_struct_nested(TestOutputVisitorData *data, ud2 = g_malloc0(sizeof(*ud2)); ud2->string0 = g_strdup(strings[0]); - ud2->dict1.string1 = g_strdup(strings[1]); - ud2->dict1.dict2.userdef1 = g_malloc0(sizeof(UserDefOne)); - ud2->dict1.dict2.userdef1->string = g_strdup(string); - ud2->dict1.dict2.userdef1->base = g_new0(UserDefZero, 1); - ud2->dict1.dict2.userdef1->base->integer = value; - ud2->dict1.dict2.string2 = g_strdup(strings[2]); - - ud2->dict1.has_dict3 = true; - ud2->dict1.dict3.userdef2 = g_malloc0(sizeof(UserDefOne)); - ud2->dict1.dict3.userdef2->string = g_strdup(string); - ud2->dict1.dict3.userdef2->base = g_new0(UserDefZero, 1); - ud2->dict1.dict3.userdef2->base->integer = value; - ud2->dict1.dict3.string3 = g_strdup(strings[3]); - - visit_type_UserDefNested(data->ov, &ud2, "unused", &err); + ud2->dict1 = g_malloc0(sizeof(*ud2->dict1)); + ud2->dict1->string1 = g_strdup(strings[1]); + + ud2->dict1->dict2 = g_malloc0(sizeof(*ud2->dict1->dict2)); + ud2->dict1->dict2->userdef = g_new0(UserDefOne, 1); + ud2->dict1->dict2->userdef->string = g_strdup(string); + ud2->dict1->dict2->userdef->base = g_new0(UserDefZero, 1); + ud2->dict1->dict2->userdef->base->integer = value; + ud2->dict1->dict2->string = g_strdup(strings[2]); + + ud2->dict1->dict3 = g_malloc0(sizeof(*ud2->dict1->dict3)); + ud2->dict1->has_dict3 = true; + ud2->dict1->dict3->userdef = g_new0(UserDefOne, 1); + ud2->dict1->dict3->userdef->string = g_strdup(string); + ud2->dict1->dict3->userdef->base = g_new0(UserDefZero, 1); + ud2->dict1->dict3->userdef->base->integer = value; + ud2->dict1->dict3->string = g_strdup(strings[3]); + + visit_type_UserDefTwo(data->ov, &ud2, "unused", &err); g_assert(!err); obj = qmp_output_get_qobject(data->qov); @@ -275,22 +279,22 @@ static void test_visitor_out_struct_nested(TestOutputVisitorData *data, dict2 = qdict_get_qdict(dict1, "dict2"); g_assert_cmpint(qdict_size(dict2), ==, 2); - g_assert_cmpstr(qdict_get_str(dict2, "string2"), ==, strings[2]); - userdef = qdict_get_qdict(dict2, "userdef1"); + g_assert_cmpstr(qdict_get_str(dict2, "string"), ==, strings[2]); + userdef = qdict_get_qdict(dict2, "userdef"); g_assert_cmpint(qdict_size(userdef), ==, 2); g_assert_cmpint(qdict_get_int(userdef, "integer"), ==, value); g_assert_cmpstr(qdict_get_str(userdef, "string"), ==, string); dict3 = qdict_get_qdict(dict1, "dict3"); g_assert_cmpint(qdict_size(dict3), ==, 2); - g_assert_cmpstr(qdict_get_str(dict3, "string3"), ==, strings[3]); - userdef = qdict_get_qdict(dict3, "userdef2"); + g_assert_cmpstr(qdict_get_str(dict3, "string"), ==, strings[3]); + userdef = qdict_get_qdict(dict3, "userdef"); g_assert_cmpint(qdict_size(userdef), ==, 2); g_assert_cmpint(qdict_get_int(userdef, "integer"), ==, value); g_assert_cmpstr(qdict_get_str(userdef, "string"), ==, string); QDECREF(qdict); - qapi_free_UserDefNested(ud2); + qapi_free_UserDefTwo(ud2); } static void test_visitor_out_struct_errors(TestOutputVisitorData *data, @@ -398,7 +402,7 @@ static void test_visitor_out_list(TestOutputVisitorData *data, static void test_visitor_out_list_qapi_free(TestOutputVisitorData *data, const void *unused) { - UserDefNestedList *p, *head = NULL; + UserDefTwoList *p, *head = NULL; const char string[] = "foo bar"; int i, max_count = 1024; @@ -407,53 +411,21 @@ static void test_visitor_out_list_qapi_free(TestOutputVisitorData *data, p->value = g_malloc0(sizeof(*p->value)); p->value->string0 = g_strdup(string); - p->value->dict1.string1 = g_strdup(string); - p->value->dict1.dict2.userdef1 = g_malloc0(sizeof(UserDefOne)); - p->value->dict1.dict2.userdef1->string = g_strdup(string); - p->value->dict1.dict2.userdef1->base = g_new0(UserDefZero, 1); - p->value->dict1.dict2.userdef1->base->integer = 42; - p->value->dict1.dict2.string2 = g_strdup(string); - p->value->dict1.has_dict3 = false; + p->value->dict1 = g_new0(UserDefTwoDict, 1); + p->value->dict1->string1 = g_strdup(string); + p->value->dict1->dict2 = g_new0(UserDefTwoDictDict, 1); + p->value->dict1->dict2->userdef = g_new0(UserDefOne, 1); + p->value->dict1->dict2->userdef->string = g_strdup(string); + p->value->dict1->dict2->userdef->base = g_new0(UserDefZero, 1); + p->value->dict1->dict2->userdef->base->integer = 42; + p->value->dict1->dict2->string = g_strdup(string); + p->value->dict1->has_dict3 = false; p->next = head; head = p; } - qapi_free_UserDefNestedList(head); -} - -static void test_visitor_out_union(TestOutputVisitorData *data, - const void *unused) -{ - QObject *arg, *qvalue; - QDict *qdict, *value; - - Error *err = NULL; - - UserDefUnion *tmp = g_malloc0(sizeof(UserDefUnion)); - tmp->kind = USER_DEF_UNION_KIND_A; - tmp->integer = 41; - tmp->a = g_malloc0(sizeof(UserDefA)); - tmp->a->boolean = true; - - visit_type_UserDefUnion(data->ov, &tmp, NULL, &err); - g_assert(err == NULL); - arg = qmp_output_get_qobject(data->qov); - - g_assert(qobject_type(arg) == QTYPE_QDICT); - qdict = qobject_to_qdict(arg); - - g_assert_cmpstr(qdict_get_str(qdict, "type"), ==, "a"); - g_assert_cmpint(qdict_get_int(qdict, "integer"), ==, 41); - - qvalue = qdict_get(qdict, "data"); - g_assert(data != NULL); - g_assert(qobject_type(qvalue) == QTYPE_QDICT); - value = qobject_to_qdict(qvalue); - g_assert_cmpint(qdict_get_bool(value, "boolean"), ==, true); - - qapi_free_UserDefUnion(tmp); - QDECREF(qdict); + qapi_free_UserDefTwoList(head); } static void test_visitor_out_union_flat(TestOutputVisitorData *data, @@ -487,24 +459,24 @@ static void test_visitor_out_union_flat(TestOutputVisitorData *data, QDECREF(qdict); } -static void test_visitor_out_union_anon(TestOutputVisitorData *data, - const void *unused) +static void test_visitor_out_alternate(TestOutputVisitorData *data, + const void *unused) { QObject *arg; Error *err = NULL; - UserDefAnonUnion *tmp = g_malloc0(sizeof(UserDefAnonUnion)); - tmp->kind = USER_DEF_ANON_UNION_KIND_I; + UserDefAlternate *tmp = g_malloc0(sizeof(UserDefAlternate)); + tmp->kind = USER_DEF_ALTERNATE_KIND_I; tmp->i = 42; - visit_type_UserDefAnonUnion(data->ov, &tmp, NULL, &err); + visit_type_UserDefAlternate(data->ov, &tmp, NULL, &err); g_assert(err == NULL); arg = qmp_output_get_qobject(data->qov); g_assert(qobject_type(arg) == QTYPE_QINT); g_assert_cmpint(qint_get_int(qobject_to_qint(arg)), ==, 42); - qapi_free_UserDefAnonUnion(tmp); + qapi_free_UserDefAlternate(tmp); } static void test_visitor_out_empty(TestOutputVisitorData *data, @@ -862,38 +834,48 @@ int main(int argc, char **argv) &out_visitor_data, test_visitor_out_list); output_visitor_test_add("/visitor/output/list-qapi-free", &out_visitor_data, test_visitor_out_list_qapi_free); - output_visitor_test_add("/visitor/output/union", - &out_visitor_data, test_visitor_out_union); output_visitor_test_add("/visitor/output/union-flat", &out_visitor_data, test_visitor_out_union_flat); - output_visitor_test_add("/visitor/output/union-anon", - &out_visitor_data, test_visitor_out_union_anon); + output_visitor_test_add("/visitor/output/alternate", + &out_visitor_data, test_visitor_out_alternate); output_visitor_test_add("/visitor/output/empty", &out_visitor_data, test_visitor_out_empty); output_visitor_test_add("/visitor/output/native_list/int", - &out_visitor_data, test_visitor_out_native_list_int); + &out_visitor_data, + test_visitor_out_native_list_int); output_visitor_test_add("/visitor/output/native_list/int8", - &out_visitor_data, test_visitor_out_native_list_int8); + &out_visitor_data, + test_visitor_out_native_list_int8); output_visitor_test_add("/visitor/output/native_list/int16", - &out_visitor_data, test_visitor_out_native_list_int16); + &out_visitor_data, + test_visitor_out_native_list_int16); output_visitor_test_add("/visitor/output/native_list/int32", - &out_visitor_data, test_visitor_out_native_list_int32); + &out_visitor_data, + test_visitor_out_native_list_int32); output_visitor_test_add("/visitor/output/native_list/int64", - &out_visitor_data, test_visitor_out_native_list_int64); + &out_visitor_data, + test_visitor_out_native_list_int64); output_visitor_test_add("/visitor/output/native_list/uint8", - &out_visitor_data, test_visitor_out_native_list_uint8); + &out_visitor_data, + test_visitor_out_native_list_uint8); output_visitor_test_add("/visitor/output/native_list/uint16", - &out_visitor_data, test_visitor_out_native_list_uint16); + &out_visitor_data, + test_visitor_out_native_list_uint16); output_visitor_test_add("/visitor/output/native_list/uint32", - &out_visitor_data, test_visitor_out_native_list_uint32); + &out_visitor_data, + test_visitor_out_native_list_uint32); output_visitor_test_add("/visitor/output/native_list/uint64", - &out_visitor_data, test_visitor_out_native_list_uint64); + &out_visitor_data, + test_visitor_out_native_list_uint64); output_visitor_test_add("/visitor/output/native_list/bool", - &out_visitor_data, test_visitor_out_native_list_bool); + &out_visitor_data, + test_visitor_out_native_list_bool); output_visitor_test_add("/visitor/output/native_list/string", - &out_visitor_data, test_visitor_out_native_list_str); + &out_visitor_data, + test_visitor_out_native_list_str); output_visitor_test_add("/visitor/output/native_list/number", - &out_visitor_data, test_visitor_out_native_list_number); + &out_visitor_data, + test_visitor_out_native_list_number); g_test_run(); diff --git a/tests/test-visitor-serialization.c b/tests/test-visitor-serialization.c index 7ad1886397..fa86cae88a 100644 --- a/tests/test-visitor-serialization.c +++ b/tests/test-visitor-serialization.c @@ -1,6 +1,7 @@ /* * Unit-tests for visitor-based serialization * + * Copyright (C) 2014-2015 Red Hat, Inc. * Copyright IBM, Corp. 2012 * * Authors: @@ -249,57 +250,62 @@ static void visit_struct(Visitor *v, void **native, Error **errp) visit_type_TestStruct(v, (TestStruct **)native, NULL, errp); } -static UserDefNested *nested_struct_create(void) +static UserDefTwo *nested_struct_create(void) { - UserDefNested *udnp = g_malloc0(sizeof(*udnp)); + UserDefTwo *udnp = g_malloc0(sizeof(*udnp)); udnp->string0 = strdup("test_string0"); - udnp->dict1.string1 = strdup("test_string1"); - udnp->dict1.dict2.userdef1 = g_malloc0(sizeof(UserDefOne)); - udnp->dict1.dict2.userdef1->base = g_new0(UserDefZero, 1); - udnp->dict1.dict2.userdef1->base->integer = 42; - udnp->dict1.dict2.userdef1->string = strdup("test_string"); - udnp->dict1.dict2.string2 = strdup("test_string2"); - udnp->dict1.has_dict3 = true; - udnp->dict1.dict3.userdef2 = g_malloc0(sizeof(UserDefOne)); - udnp->dict1.dict3.userdef2->base = g_new0(UserDefZero, 1); - udnp->dict1.dict3.userdef2->base->integer = 43; - udnp->dict1.dict3.userdef2->string = strdup("test_string"); - udnp->dict1.dict3.string3 = strdup("test_string3"); + udnp->dict1 = g_malloc0(sizeof(*udnp->dict1)); + udnp->dict1->string1 = strdup("test_string1"); + udnp->dict1->dict2 = g_malloc0(sizeof(*udnp->dict1->dict2)); + udnp->dict1->dict2->userdef = g_new0(UserDefOne, 1); + udnp->dict1->dict2->userdef->base = g_new0(UserDefZero, 1); + udnp->dict1->dict2->userdef->base->integer = 42; + udnp->dict1->dict2->userdef->string = strdup("test_string"); + udnp->dict1->dict2->string = strdup("test_string2"); + udnp->dict1->dict3 = g_malloc0(sizeof(*udnp->dict1->dict3)); + udnp->dict1->has_dict3 = true; + udnp->dict1->dict3->userdef = g_new0(UserDefOne, 1); + udnp->dict1->dict3->userdef->base = g_new0(UserDefZero, 1); + udnp->dict1->dict3->userdef->base->integer = 43; + udnp->dict1->dict3->userdef->string = strdup("test_string"); + udnp->dict1->dict3->string = strdup("test_string3"); return udnp; } -static void nested_struct_compare(UserDefNested *udnp1, UserDefNested *udnp2) +static void nested_struct_compare(UserDefTwo *udnp1, UserDefTwo *udnp2) { g_assert(udnp1); g_assert(udnp2); g_assert_cmpstr(udnp1->string0, ==, udnp2->string0); - g_assert_cmpstr(udnp1->dict1.string1, ==, udnp2->dict1.string1); - g_assert_cmpint(udnp1->dict1.dict2.userdef1->base->integer, ==, - udnp2->dict1.dict2.userdef1->base->integer); - g_assert_cmpstr(udnp1->dict1.dict2.userdef1->string, ==, - udnp2->dict1.dict2.userdef1->string); - g_assert_cmpstr(udnp1->dict1.dict2.string2, ==, udnp2->dict1.dict2.string2); - g_assert(udnp1->dict1.has_dict3 == udnp2->dict1.has_dict3); - g_assert_cmpint(udnp1->dict1.dict3.userdef2->base->integer, ==, - udnp2->dict1.dict3.userdef2->base->integer); - g_assert_cmpstr(udnp1->dict1.dict3.userdef2->string, ==, - udnp2->dict1.dict3.userdef2->string); - g_assert_cmpstr(udnp1->dict1.dict3.string3, ==, udnp2->dict1.dict3.string3); + g_assert_cmpstr(udnp1->dict1->string1, ==, udnp2->dict1->string1); + g_assert_cmpint(udnp1->dict1->dict2->userdef->base->integer, ==, + udnp2->dict1->dict2->userdef->base->integer); + g_assert_cmpstr(udnp1->dict1->dict2->userdef->string, ==, + udnp2->dict1->dict2->userdef->string); + g_assert_cmpstr(udnp1->dict1->dict2->string, ==, + udnp2->dict1->dict2->string); + g_assert(udnp1->dict1->has_dict3 == udnp2->dict1->has_dict3); + g_assert_cmpint(udnp1->dict1->dict3->userdef->base->integer, ==, + udnp2->dict1->dict3->userdef->base->integer); + g_assert_cmpstr(udnp1->dict1->dict3->userdef->string, ==, + udnp2->dict1->dict3->userdef->string); + g_assert_cmpstr(udnp1->dict1->dict3->string, ==, + udnp2->dict1->dict3->string); } -static void nested_struct_cleanup(UserDefNested *udnp) +static void nested_struct_cleanup(UserDefTwo *udnp) { - qapi_free_UserDefNested(udnp); + qapi_free_UserDefTwo(udnp); } static void visit_nested_struct(Visitor *v, void **native, Error **errp) { - visit_type_UserDefNested(v, (UserDefNested **)native, NULL, errp); + visit_type_UserDefTwo(v, (UserDefTwo **)native, NULL, errp); } static void visit_nested_struct_list(Visitor *v, void **native, Error **errp) { - visit_type_UserDefNestedList(v, (UserDefNestedList **)native, NULL, errp); + visit_type_UserDefTwoList(v, (UserDefTwoList **)native, NULL, errp); } /* test cases */ @@ -715,13 +721,14 @@ static void test_nested_struct(gconstpointer opaque) { TestArgs *args = (TestArgs *) opaque; const SerializeOps *ops = args->ops; - UserDefNested *udnp = nested_struct_create(); - UserDefNested *udnp_copy = NULL; + UserDefTwo *udnp = nested_struct_create(); + UserDefTwo *udnp_copy = NULL; Error *err = NULL; void *serialize_data; - + ops->serialize(udnp, &serialize_data, visit_nested_struct, &err); - ops->deserialize((void **)&udnp_copy, serialize_data, visit_nested_struct, &err); + ops->deserialize((void **)&udnp_copy, serialize_data, visit_nested_struct, + &err); g_assert(err == NULL); nested_struct_compare(udnp, udnp_copy); @@ -737,18 +744,18 @@ static void test_nested_struct_list(gconstpointer opaque) { TestArgs *args = (TestArgs *) opaque; const SerializeOps *ops = args->ops; - UserDefNestedList *listp = NULL, *tmp, *tmp_copy, *listp_copy = NULL; + UserDefTwoList *listp = NULL, *tmp, *tmp_copy, *listp_copy = NULL; Error *err = NULL; void *serialize_data; int i = 0; for (i = 0; i < 8; i++) { - tmp = g_malloc0(sizeof(UserDefNestedList)); + tmp = g_new0(UserDefTwoList, 1); tmp->value = nested_struct_create(); tmp->next = listp; listp = tmp; } - + ops->serialize(listp, &serialize_data, visit_nested_struct_list, &err); ops->deserialize((void **)&listp_copy, serialize_data, visit_nested_struct_list, &err); @@ -764,8 +771,8 @@ static void test_nested_struct_list(gconstpointer opaque) listp_copy = listp_copy->next; } - qapi_free_UserDefNestedList(tmp); - qapi_free_UserDefNestedList(tmp_copy); + qapi_free_UserDefTwoList(tmp); + qapi_free_UserDefTwoList(tmp_copy); ops->cleanup(serialize_data); g_free(args); diff --git a/thread-pool.c b/thread-pool.c index e2cac8e4ff..ac909f4986 100644 --- a/thread-pool.c +++ b/thread-pool.c @@ -170,12 +170,12 @@ restart: if (elem->state != THREAD_DONE) { continue; } - if (elem->state == THREAD_DONE) { - trace_thread_pool_complete(pool, elem, elem->common.opaque, - elem->ret); - } - if (elem->state == THREAD_DONE && elem->common.cb) { - QLIST_REMOVE(elem, all); + + trace_thread_pool_complete(pool, elem, elem->common.opaque, + elem->ret); + QLIST_REMOVE(elem, all); + + if (elem->common.cb) { /* Read state before ret. */ smp_rmb(); @@ -188,8 +188,6 @@ restart: qemu_aio_unref(elem); goto restart; } else { - /* remove the request */ - QLIST_REMOVE(elem, all); qemu_aio_unref(elem); } } diff --git a/translate-all.c b/translate-all.c index 11763c6c3a..536008f52d 100644 --- a/translate-all.c +++ b/translate-all.c @@ -59,6 +59,7 @@ #include "exec/cputlb.h" #include "translate-all.h" +#include "qemu/bitmap.h" #include "qemu/timer.h" //#define DEBUG_TB_INVALIDATE @@ -79,7 +80,7 @@ typedef struct PageDesc { /* in order to optimize self modifying code, we count the number of lookups we do to a given page to use a bitmap */ unsigned int code_write_count; - uint8_t *code_bitmap; + unsigned long *code_bitmap; #if defined(CONFIG_USER_ONLY) unsigned long flags; #endif @@ -389,18 +390,6 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) void **lp; int i; -#if defined(CONFIG_USER_ONLY) - /* We can't use g_malloc because it may recurse into a locked mutex. */ -# define ALLOC(P, SIZE) \ - do { \ - P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \ - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \ - } while (0) -#else -# define ALLOC(P, SIZE) \ - do { P = g_malloc0(SIZE); } while (0) -#endif - /* Level 1. Always allocated. */ lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1)); @@ -412,7 +401,7 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) if (!alloc) { return NULL; } - ALLOC(p, sizeof(void *) * V_L2_SIZE); + p = g_new0(void *, V_L2_SIZE); *lp = p; } @@ -424,12 +413,10 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) if (!alloc) { return NULL; } - ALLOC(pd, sizeof(PageDesc) * V_L2_SIZE); + pd = g_new0(PageDesc, V_L2_SIZE); *lp = pd; } -#undef ALLOC - return pd + (index & (V_L2_SIZE - 1)); } @@ -978,39 +965,12 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) tcg_ctx.tb_ctx.tb_phys_invalidate_count++; } -static inline void set_bits(uint8_t *tab, int start, int len) -{ - int end, mask, end1; - - end = start + len; - tab += start >> 3; - mask = 0xff << (start & 7); - if ((start & ~7) == (end & ~7)) { - if (start < end) { - mask &= ~(0xff << (end & 7)); - *tab |= mask; - } - } else { - *tab++ |= mask; - start = (start + 8) & ~7; - end1 = end & ~7; - while (start < end1) { - *tab++ = 0xff; - start += 8; - } - if (start < end) { - mask = ~(0xff << (end & 7)); - *tab |= mask; - } - } -} - static void build_page_bitmap(PageDesc *p) { int n, tb_start, tb_end; TranslationBlock *tb; - p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8); + p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE); tb = p->first_tb; while (tb != NULL) { @@ -1029,7 +989,7 @@ static void build_page_bitmap(PageDesc *p) tb_start = 0; tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); } - set_bits(p->code_bitmap, tb_start, tb_end - tb_start); + bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start); tb = tb->page_next[n]; } } @@ -1219,7 +1179,6 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) { PageDesc *p; - int offset, b; #if 0 if (1) { @@ -1235,8 +1194,11 @@ void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) return; } if (p->code_bitmap) { - offset = start & ~TARGET_PAGE_MASK; - b = p->code_bitmap[offset >> 3] >> (offset & 7); + unsigned int nr; + unsigned long b; + + nr = start & ~TARGET_PAGE_MASK; + b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)); if (b & ((1 << len) - 1)) { goto do_invalidate; } @@ -1454,14 +1416,17 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr) MemoryRegion *mr; hwaddr l = 1; + rcu_read_lock(); mr = address_space_translate(as, addr, &addr, &l, false); if (!(memory_region_is_ram(mr) || memory_region_is_romd(mr))) { + rcu_read_unlock(); return; } ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK) + addr; tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); + rcu_read_unlock(); } #endif /* !defined(CONFIG_USER_ONLY) */ diff --git a/ui/Makefile.objs b/ui/Makefile.objs index 13b5cfbe41..029a42a688 100644 --- a/ui/Makefile.objs +++ b/ui/Makefile.objs @@ -21,7 +21,20 @@ sdl.mo-objs := sdl.o sdl_zoom.o endif ifeq ($(CONFIG_SDLABI),2.0) sdl.mo-objs := sdl2.o sdl2-input.o sdl2-2d.o +ifeq ($(CONFIG_OPENGL),y) +sdl.mo-objs += sdl2-gl.o +endif endif sdl.mo-cflags := $(SDL_CFLAGS) +ifeq ($(CONFIG_OPENGL),y) +common-obj-y += shader.o +common-obj-y += console-gl.o +endif + gtk.o-cflags := $(GTK_CFLAGS) $(VTE_CFLAGS) +shader.o-cflags += $(OPENGL_CFLAGS) +console-gl.o-cflags += $(OPENGL_CFLAGS) + +shader.o-libs += $(OPENGL_LIBS) +console-gl.o-libs += $(OPENGL_LIBS) diff --git a/ui/console-gl.c b/ui/console-gl.c new file mode 100644 index 0000000000..cb45cf8a29 --- /dev/null +++ b/ui/console-gl.c @@ -0,0 +1,168 @@ +/* + * QEMU graphical console -- opengl helper bits + * + * Copyright (c) 2014 Red Hat + * + * Authors: + * Gerd Hoffmann <kraxel@redhat.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu-common.h" +#include "ui/console.h" +#include "ui/shader.h" + +#include "shader/texture-blit-vert.h" +#include "shader/texture-blit-frag.h" + +struct ConsoleGLState { + GLint texture_blit_prog; +}; + +/* ---------------------------------------------------------------------- */ + +ConsoleGLState *console_gl_init_context(void) +{ + ConsoleGLState *gls = g_new0(ConsoleGLState, 1); + + gls->texture_blit_prog = qemu_gl_create_compile_link_program + (texture_blit_vert_src, texture_blit_frag_src); + if (!gls->texture_blit_prog) { + exit(1); + } + + return gls; +} + +void console_gl_fini_context(ConsoleGLState *gls) +{ + if (!gls) { + return; + } + g_free(gls); +} + +bool console_gl_check_format(DisplayChangeListener *dcl, + pixman_format_code_t format) +{ + switch (format) { + case PIXMAN_BE_b8g8r8x8: + case PIXMAN_BE_b8g8r8a8: + case PIXMAN_r5g6b5: + return true; + default: + return false; + } +} + +void surface_gl_create_texture(ConsoleGLState *gls, + DisplaySurface *surface) +{ + assert(gls); + assert(surface_stride(surface) % surface_bytes_per_pixel(surface) == 0); + + switch (surface->format) { + case PIXMAN_BE_b8g8r8x8: + case PIXMAN_BE_b8g8r8a8: + surface->glformat = GL_BGRA_EXT; + surface->gltype = GL_UNSIGNED_BYTE; + break; + case PIXMAN_r5g6b5: + surface->glformat = GL_RGB; + surface->gltype = GL_UNSIGNED_SHORT_5_6_5; + break; + default: + g_assert_not_reached(); + } + + glGenTextures(1, &surface->texture); + glEnable(GL_TEXTURE_2D); + glBindTexture(GL_TEXTURE_2D, surface->texture); + glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT, + surface_stride(surface) / surface_bytes_per_pixel(surface)); + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, + surface_width(surface), + surface_height(surface), + 0, surface->glformat, surface->gltype, + surface_data(surface)); + + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); +} + +void surface_gl_update_texture(ConsoleGLState *gls, + DisplaySurface *surface, + int x, int y, int w, int h) +{ + uint8_t *data = (void *)surface_data(surface); + + assert(gls); + + glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT, + surface_stride(surface) / surface_bytes_per_pixel(surface)); + glTexSubImage2D(GL_TEXTURE_2D, 0, + x, y, w, h, + surface->glformat, surface->gltype, + data + surface_stride(surface) * y + + surface_bytes_per_pixel(surface) * x); +} + +void surface_gl_render_texture(ConsoleGLState *gls, + DisplaySurface *surface) +{ + assert(gls); + + glClearColor(0.1f, 0.1f, 0.1f, 0.0f); + glClear(GL_COLOR_BUFFER_BIT); + + qemu_gl_run_texture_blit(gls->texture_blit_prog); +} + +void surface_gl_destroy_texture(ConsoleGLState *gls, + DisplaySurface *surface) +{ + if (!surface || !surface->texture) { + return; + } + glDeleteTextures(1, &surface->texture); + surface->texture = 0; +} + +void surface_gl_setup_viewport(ConsoleGLState *gls, + DisplaySurface *surface, + int ww, int wh) +{ + int gw, gh, stripe; + float sw, sh; + + assert(gls); + + gw = surface_width(surface); + gh = surface_height(surface); + + sw = (float)ww/gw; + sh = (float)wh/gh; + if (sw < sh) { + stripe = wh - wh*sw/sh; + glViewport(0, stripe / 2, ww, wh - stripe); + } else { + stripe = ww - ww*sh/sw; + glViewport(stripe / 2, 0, ww - stripe, wh); + } +} diff --git a/ui/console.c b/ui/console.c index b15ca87f0f..406c36b864 100644 --- a/ui/console.c +++ b/ui/console.c @@ -126,6 +126,7 @@ struct QemuConsole { Object *device; uint32_t head; QemuUIInfo ui_info; + QEMUTimer *ui_timer; const GraphicHwOps *hw_ops; void *hw; @@ -269,7 +270,7 @@ void graphic_hw_invalidate(QemuConsole *con) } } -static void ppm_save(const char *filename, struct DisplaySurface *ds, +static void ppm_save(const char *filename, DisplaySurface *ds, Error **errp) { int width = pixman_image_get_width(ds->image); @@ -1383,14 +1384,33 @@ void unregister_displaychangelistener(DisplayChangeListener *dcl) gui_setup_refresh(ds); } +static void dpy_set_ui_info_timer(void *opaque) +{ + QemuConsole *con = opaque; + + con->hw_ops->ui_info(con->hw, con->head, &con->ui_info); +} + +bool dpy_ui_info_supported(QemuConsole *con) +{ + return con->hw_ops->ui_info != NULL; +} + int dpy_set_ui_info(QemuConsole *con, QemuUIInfo *info) { assert(con != NULL); con->ui_info = *info; - if (con->hw_ops->ui_info) { - return con->hw_ops->ui_info(con->hw, con->head, info); + if (!dpy_ui_info_supported(con)) { + return -1; } - return -1; + + /* + * Typically we get a flood of these as the user resizes the window. + * Wait until the dust has settled (one second without updates), then + * go notify the guest. + */ + timer_mod(con->ui_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000); + return 0; } void dpy_gfx_update(QemuConsole *con, int x, int y, int w, int h) @@ -1535,7 +1555,7 @@ void dpy_text_update(QemuConsole *con, int x, int y, int w, int h) void dpy_text_resize(QemuConsole *con, int w, int h) { DisplayState *s = con->ds; - struct DisplayChangeListener *dcl; + DisplayChangeListener *dcl; if (!qemu_console_is_visible(con)) { return; @@ -1724,6 +1744,7 @@ QemuConsole *graphic_console_init(DeviceState *dev, uint32_t head, ds = get_alloc_displaystate(); trace_console_gfx_new(); s = new_console(ds, GRAPHIC_CONSOLE, head); + s->ui_timer = timer_new_ms(QEMU_CLOCK_REALTIME, dpy_set_ui_info_timer, s); graphic_console_set_hwops(s, hw_ops, opaque); if (dev) { object_property_set_link(OBJECT(s), OBJECT(dev), "device", @@ -1788,6 +1809,21 @@ bool qemu_console_is_fixedsize(QemuConsole *con) return con && (con->console_type != TEXT_CONSOLE); } +char *qemu_console_get_label(QemuConsole *con) +{ + if (con->console_type == GRAPHIC_CONSOLE) { + if (con->device) { + return g_strdup(object_get_typename(con->device)); + } + return g_strdup("VGA"); + } else { + if (con->chr && con->chr->label) { + return g_strdup(con->chr->label); + } + return g_strdup_printf("vc%d", con->index); + } +} + int qemu_console_get_index(QemuConsole *con) { if (con == NULL) { @@ -34,24 +34,11 @@ #define GETTEXT_PACKAGE "qemu" #define LOCALEDIR "po" -#ifdef _WIN32 -# define _WIN32_WINNT 0x0601 /* needed to get definition of MAPVK_VK_TO_VSC */ -#endif - #include "qemu-common.h" -#ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE -/* Work around an -Wstrict-prototypes warning in GTK headers */ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wstrict-prototypes" -#endif -#include <gtk/gtk.h> -#ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE -#pragma GCC diagnostic pop -#endif - +#include "ui/console.h" +#include "ui/gtk.h" -#include <gdk/gdkkeysyms.h> #include <glib/gi18n.h> #include <locale.h> #if defined(CONFIG_VTE) @@ -60,7 +47,6 @@ #include <math.h> #include "trace.h" -#include "ui/console.h" #include "ui/input.h" #include "sysemu/sysemu.h" #include "qmp-commands.h" @@ -68,10 +54,6 @@ #include "keymaps.h" #include "sysemu/char.h" #include "qom/object.h" -#ifdef GDK_WINDOWING_X11 -#include <gdk/gdkx.h> -#include <X11/XKBlib.h> -#endif #define MAX_VCS 10 #define VC_WINDOW_X_MIN 320 @@ -99,15 +81,6 @@ # define VTE_RESIZE_HACK 1 #endif -/* Compatibility define to let us build on both Gtk2 and Gtk3 */ -#if GTK_CHECK_VERSION(3, 0, 0) -static inline void gdk_drawable_get_size(GdkWindow *w, gint *ww, gint *wh) -{ - *ww = gdk_window_get_width(w); - *wh = gdk_window_get_height(w); -} -#endif - #if !GTK_CHECK_VERSION(2, 20, 0) #define gtk_widget_get_realized(widget) GTK_WIDGET_REALIZED(widget) #endif @@ -138,47 +111,6 @@ static const int modifier_keycode[] = { 0x2a, 0x36, 0x1d, 0x9d, 0x38, 0xb8, 0xdb, 0xdd, }; -typedef struct GtkDisplayState GtkDisplayState; - -typedef struct VirtualGfxConsole { - GtkWidget *drawing_area; - DisplayChangeListener dcl; - DisplaySurface *ds; - pixman_image_t *convert; - cairo_surface_t *surface; - double scale_x; - double scale_y; -} VirtualGfxConsole; - -#if defined(CONFIG_VTE) -typedef struct VirtualVteConsole { - GtkWidget *box; - GtkWidget *scrollbar; - GtkWidget *terminal; - CharDriverState *chr; -} VirtualVteConsole; -#endif - -typedef enum VirtualConsoleType { - GD_VC_GFX, - GD_VC_VTE, -} VirtualConsoleType; - -typedef struct VirtualConsole { - GtkDisplayState *s; - char *label; - GtkWidget *window; - GtkWidget *menu_item; - GtkWidget *tab_item; - VirtualConsoleType type; - union { - VirtualGfxConsole gfx; -#if defined(CONFIG_VTE) - VirtualVteConsole vte; -#endif - }; -} VirtualConsole; - struct GtkDisplayState { GtkWidget *window; @@ -230,6 +162,7 @@ struct GtkDisplayState { bool modifier_pressed[ARRAY_SIZE(modifier_keycode)]; bool has_evdev; + bool ignore_keys; }; static void gd_grab_pointer(VirtualConsole *vc); @@ -290,7 +223,8 @@ static void gd_update_cursor(VirtualConsole *vc) GtkDisplayState *s = vc->s; GdkWindow *window; - if (vc->type != GD_VC_GFX) { + if (vc->type != GD_VC_GFX || + !qemu_console_is_graphic(vc->gfx.dcl.con)) { return; } @@ -363,6 +297,9 @@ static void gd_update_geometry_hints(VirtualConsole *vc) GtkWindow *geo_window; if (vc->type == GD_VC_GFX) { + if (!vc->gfx.ds) { + return; + } if (s->free_scale) { geo.min_width = surface_width(vc->gfx.ds) * VC_SCALE_MIN; geo.min_height = surface_height(vc->gfx.ds) * VC_SCALE_MIN; @@ -427,7 +364,8 @@ static void gtk_release_modifiers(GtkDisplayState *s) VirtualConsole *vc = gd_vc_find_current(s); int i, keycode; - if (vc->type != GD_VC_GFX) { + if (vc->type != GD_VC_GFX || + !qemu_console_is_graphic(vc->gfx.dcl.con)) { return; } for (i = 0; i < ARRAY_SIZE(modifier_keycode); i++) { @@ -455,6 +393,7 @@ static void gd_update(DisplayChangeListener *dcl, int x, int y, int w, int h) { VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); + GdkWindow *win; int x1, x2, y1, y2; int mx, my; int fbw, fbh; @@ -481,8 +420,11 @@ static void gd_update(DisplayChangeListener *dcl, fbw = surface_width(vc->gfx.ds) * vc->gfx.scale_x; fbh = surface_height(vc->gfx.ds) * vc->gfx.scale_y; - gdk_drawable_get_size(gtk_widget_get_window(vc->gfx.drawing_area), - &ww, &wh); + win = gtk_widget_get_window(vc->gfx.drawing_area); + if (!win) { + return; + } + gdk_drawable_get_size(win, &ww, &wh); mx = my = 0; if (ww > fbw) { @@ -521,6 +463,8 @@ static void gd_mouse_set(DisplayChangeListener *dcl, gdk_device_warp(gdk_device_manager_get_client_pointer(mgr), gtk_widget_get_screen(vc->gfx.drawing_area), x_root, y_root); + vc->s->last_x = x; + vc->s->last_y = y; } #else static void gd_mouse_set(DisplayChangeListener *dcl, @@ -574,22 +518,28 @@ static void gd_switch(DisplayChangeListener *dcl, VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); bool resized = true; - trace_gd_switch(vc->label, surface_width(surface), surface_height(surface)); + trace_gd_switch(vc->label, + surface ? surface_width(surface) : 0, + surface ? surface_height(surface) : 0); if (vc->gfx.surface) { cairo_surface_destroy(vc->gfx.surface); + vc->gfx.surface = NULL; + } + if (vc->gfx.convert) { + pixman_image_unref(vc->gfx.convert); + vc->gfx.convert = NULL; } - if (vc->gfx.ds && + if (vc->gfx.ds && surface && surface_width(vc->gfx.ds) == surface_width(surface) && surface_height(vc->gfx.ds) == surface_height(surface)) { resized = false; } vc->gfx.ds = surface; - if (vc->gfx.convert) { - pixman_image_unref(vc->gfx.convert); - vc->gfx.convert = NULL; + if (!surface) { + return; } if (surface->format == PIXMAN_x8r8g8b8) { @@ -690,6 +640,9 @@ static gboolean gd_draw_event(GtkWidget *widget, cairo_t *cr, void *opaque) if (!gtk_widget_get_realized(widget)) { return FALSE; } + if (!vc->gfx.ds) { + return FALSE; + } fbw = surface_width(vc->gfx.ds); fbh = surface_height(vc->gfx.ds); @@ -771,6 +724,10 @@ static gboolean gd_motion_event(GtkWidget *widget, GdkEventMotion *motion, int fbh, fbw; int ww, wh; + if (!vc->gfx.ds) { + return TRUE; + } + fbw = surface_width(vc->gfx.ds) * vc->gfx.scale_x; fbh = surface_height(vc->gfx.ds) * vc->gfx.scale_y; @@ -945,6 +902,23 @@ static int gd_map_keycode(GtkDisplayState *s, GdkDisplay *dpy, int gdk_keycode) return qemu_keycode; } +static gboolean gd_text_key_down(GtkWidget *widget, + GdkEventKey *key, void *opaque) +{ + VirtualConsole *vc = opaque; + QemuConsole *con = vc->gfx.dcl.con; + + if (key->length) { + kbd_put_string_console(con, key->string, key->length); + } else { + int num = gd_map_keycode(vc->s, gtk_widget_get_display(widget), + key->hardware_keycode); + int qcode = qemu_input_key_number_to_qcode(num); + kbd_put_qcode_console(con, qcode); + } + return TRUE; +} + static gboolean gd_key_event(GtkWidget *widget, GdkEventKey *key, void *opaque) { VirtualConsole *vc = opaque; @@ -953,6 +927,11 @@ static gboolean gd_key_event(GtkWidget *widget, GdkEventKey *key, void *opaque) int qemu_keycode; int i; + if (s->ignore_keys) { + s->ignore_keys = (key->type == GDK_KEY_PRESS); + return TRUE; + } + if (key->keyval == GDK_KEY_Pause) { qemu_input_event_send_key_qcode(vc->gfx.dcl.con, Q_KEY_CODE_PAUSE, key->type == GDK_KEY_PRESS); @@ -1021,22 +1000,26 @@ static void gd_menu_switch_vc(GtkMenuItem *item, void *opaque) GtkDisplayState *s = opaque; VirtualConsole *vc = gd_vc_find_by_menu(s); GtkNotebook *nb = GTK_NOTEBOOK(s->notebook); - GtkWidget *child; gint page; gtk_release_modifiers(s); if (vc) { page = gtk_notebook_page_num(nb, vc->tab_item); gtk_notebook_set_current_page(nb, page); - child = gtk_notebook_get_nth_page(nb, page); - gtk_widget_grab_focus(child); + gtk_widget_grab_focus(vc->focus); } + s->ignore_keys = false; } static void gd_accel_switch_vc(void *opaque) { VirtualConsole *vc = opaque; + gtk_check_menu_item_set_active(GTK_CHECK_MENU_ITEM(vc->menu_item), TRUE); +#if !GTK_CHECK_VERSION(3, 0, 0) + /* GTK2 sends the accel key to the target console - ignore this until */ + vc->s->ignore_keys = true; +#endif } static void gd_menu_show_tabs(GtkMenuItem *item, void *opaque) @@ -1086,7 +1069,8 @@ static void gd_menu_untabify(GtkMenuItem *item, void *opaque) GtkDisplayState *s = opaque; VirtualConsole *vc = gd_vc_find_current(s); - if (vc->type == GD_VC_GFX) { + if (vc->type == GD_VC_GFX && + qemu_console_is_graphic(vc->gfx.dcl.con)) { gtk_check_menu_item_set_active(GTK_CHECK_MENU_ITEM(s->grab_item), FALSE); } @@ -1099,11 +1083,14 @@ static void gd_menu_untabify(GtkMenuItem *item, void *opaque) G_CALLBACK(gd_tab_window_close), vc); gtk_widget_show_all(vc->window); - GtkAccelGroup *ag = gtk_accel_group_new(); - gtk_window_add_accel_group(GTK_WINDOW(vc->window), ag); + if (qemu_console_is_graphic(vc->gfx.dcl.con)) { + GtkAccelGroup *ag = gtk_accel_group_new(); + gtk_window_add_accel_group(GTK_WINDOW(vc->window), ag); - GClosure *cb = g_cclosure_new_swap(G_CALLBACK(gd_win_grab), vc, NULL); - gtk_accel_group_connect(ag, GDK_KEY_g, HOTKEY_MODIFIERS, 0, cb); + GClosure *cb = g_cclosure_new_swap(G_CALLBACK(gd_win_grab), + vc, NULL); + gtk_accel_group_connect(ag, GDK_KEY_g, HOTKEY_MODIFIERS, 0, cb); + } gd_update_geometry_hints(vc); gd_update_caption(s); @@ -1120,8 +1107,10 @@ static void gd_menu_full_screen(GtkMenuItem *item, void *opaque) gtk_widget_hide(s->menu_bar); if (vc->type == GD_VC_GFX) { gtk_widget_set_size_request(vc->gfx.drawing_area, -1, -1); - gtk_check_menu_item_set_active(GTK_CHECK_MENU_ITEM(s->grab_item), - TRUE); + if (qemu_console_is_graphic(vc->gfx.dcl.con)) { + gtk_check_menu_item_set_active + (GTK_CHECK_MENU_ITEM(s->grab_item), TRUE); + } } gtk_window_fullscreen(GTK_WINDOW(s->window)); s->full_screen = TRUE; @@ -1370,7 +1359,8 @@ static void gd_change_page(GtkNotebook *nb, gpointer arg1, guint arg2, #endif gtk_check_menu_item_set_active(GTK_CHECK_MENU_ITEM(vc->menu_item), TRUE); - on_vga = (vc->type == GD_VC_GFX); + on_vga = (vc->type == GD_VC_GFX && + qemu_console_is_graphic(vc->gfx.dcl.con)); if (!on_vga) { gtk_check_menu_item_set_active(GTK_CHECK_MENU_ITEM(s->grab_item), FALSE); @@ -1421,6 +1411,19 @@ static gboolean gd_focus_out_event(GtkWidget *widget, return TRUE; } +static gboolean gd_configure(GtkWidget *widget, + GdkEventConfigure *cfg, gpointer opaque) +{ + VirtualConsole *vc = opaque; + QemuUIInfo info; + + memset(&info, 0, sizeof(info)); + info.width = cfg->width; + info.height = cfg->height; + dpy_set_ui_info(vc->gfx.dcl.con, &info); + return FALSE; +} + /** Virtual Console Callbacks **/ static GSList *gd_vc_menu_init(GtkDisplayState *s, VirtualConsole *vc, @@ -1542,6 +1545,7 @@ static GSList *gd_vc_vte_init(GtkDisplayState *s, VirtualConsole *vc, vc->type = GD_VC_VTE; vc->tab_item = box; + vc->focus = vc->vte.terminal; gtk_notebook_append_page(GTK_NOTEBOOK(s->notebook), vc->tab_item, gtk_label_new(vc->label)); @@ -1577,25 +1581,32 @@ static void gd_connect_vc_gfx_signals(VirtualConsole *vc) g_signal_connect(vc->gfx.drawing_area, "expose-event", G_CALLBACK(gd_expose_event), vc); #endif - g_signal_connect(vc->gfx.drawing_area, "event", - G_CALLBACK(gd_event), vc); - g_signal_connect(vc->gfx.drawing_area, "button-press-event", - G_CALLBACK(gd_button_event), vc); - g_signal_connect(vc->gfx.drawing_area, "button-release-event", - G_CALLBACK(gd_button_event), vc); - g_signal_connect(vc->gfx.drawing_area, "scroll-event", - G_CALLBACK(gd_scroll_event), vc); - g_signal_connect(vc->gfx.drawing_area, "key-press-event", - G_CALLBACK(gd_key_event), vc); - g_signal_connect(vc->gfx.drawing_area, "key-release-event", - G_CALLBACK(gd_key_event), vc); - - g_signal_connect(vc->gfx.drawing_area, "enter-notify-event", - G_CALLBACK(gd_enter_event), vc); - g_signal_connect(vc->gfx.drawing_area, "leave-notify-event", - G_CALLBACK(gd_leave_event), vc); - g_signal_connect(vc->gfx.drawing_area, "focus-out-event", - G_CALLBACK(gd_focus_out_event), vc); + if (qemu_console_is_graphic(vc->gfx.dcl.con)) { + g_signal_connect(vc->gfx.drawing_area, "event", + G_CALLBACK(gd_event), vc); + g_signal_connect(vc->gfx.drawing_area, "button-press-event", + G_CALLBACK(gd_button_event), vc); + g_signal_connect(vc->gfx.drawing_area, "button-release-event", + G_CALLBACK(gd_button_event), vc); + g_signal_connect(vc->gfx.drawing_area, "scroll-event", + G_CALLBACK(gd_scroll_event), vc); + g_signal_connect(vc->gfx.drawing_area, "key-press-event", + G_CALLBACK(gd_key_event), vc); + g_signal_connect(vc->gfx.drawing_area, "key-release-event", + G_CALLBACK(gd_key_event), vc); + + g_signal_connect(vc->gfx.drawing_area, "enter-notify-event", + G_CALLBACK(gd_enter_event), vc); + g_signal_connect(vc->gfx.drawing_area, "leave-notify-event", + G_CALLBACK(gd_leave_event), vc); + g_signal_connect(vc->gfx.drawing_area, "focus-out-event", + G_CALLBACK(gd_focus_out_event), vc); + g_signal_connect(vc->gfx.drawing_area, "configure-event", + G_CALLBACK(gd_configure), vc); + } else { + g_signal_connect(vc->gfx.drawing_area, "key-press-event", + G_CALLBACK(gd_text_key_down), vc); + } } static void gd_connect_signals(GtkDisplayState *s) @@ -1679,15 +1690,7 @@ static GSList *gd_vc_gfx_init(GtkDisplayState *s, VirtualConsole *vc, QemuConsole *con, int idx, GSList *group, GtkWidget *view_menu) { - Object *obj; - - obj = object_property_get_link(OBJECT(con), "device", NULL); - if (obj) { - vc->label = g_strdup_printf("%s", object_get_typename(obj)); - } else { - vc->label = g_strdup_printf("VGA"); - } - + vc->label = qemu_console_get_label(con); vc->s = s; vc->gfx.scale_x = 1.0; vc->gfx.scale_y = 1.0; @@ -1706,16 +1709,21 @@ static GSList *gd_vc_gfx_init(GtkDisplayState *s, VirtualConsole *vc, vc->type = GD_VC_GFX; vc->tab_item = vc->gfx.drawing_area; + vc->focus = vc->gfx.drawing_area; gtk_notebook_append_page(GTK_NOTEBOOK(s->notebook), vc->tab_item, gtk_label_new(vc->label)); - gd_connect_vc_gfx_signals(vc); - - group = gd_vc_menu_init(s, vc, idx, group, view_menu); vc->gfx.dcl.ops = &dcl_ops; vc->gfx.dcl.con = con; register_displaychangelistener(&vc->gfx.dcl); + gd_connect_vc_gfx_signals(vc); + group = gd_vc_menu_init(s, vc, idx, group, view_menu); + + if (dpy_ui_info_supported(vc->gfx.dcl.con)) { + gtk_menu_item_activate(GTK_MENU_ITEM(s->zoom_fit_item)); + } + return group; } @@ -1787,7 +1795,7 @@ static GtkWidget *gd_create_menu_view(GtkDisplayState *s) /* gfx */ for (vc = 0;; vc++) { con = qemu_console_lookup_by_index(vc); - if (!con || !qemu_console_is_graphic(con)) { + if (!con) { break; } group = gd_vc_gfx_init(s, &s->vc[vc], con, diff --git a/ui/input-legacy.c b/ui/input-legacy.c index 2d4ca19740..3e9bb380e5 100644 --- a/ui/input-legacy.c +++ b/ui/input-legacy.c @@ -57,8 +57,6 @@ struct QEMUPutLEDEntry { static QTAILQ_HEAD(, QEMUPutLEDEntry) led_handlers = QTAILQ_HEAD_INITIALIZER(led_handlers); -static QTAILQ_HEAD(, QEMUPutMouseEntry) mouse_handlers = - QTAILQ_HEAD_INITIALIZER(mouse_handlers); int index_from_key(const char *key) { @@ -908,6 +908,16 @@ static const DisplayChangeListenerOps dcl_ops = { .dpy_cursor_define = sdl_mouse_define, }; +void sdl_display_early_init(int opengl) +{ + if (opengl == 1 /* on */) { + fprintf(stderr, + "SDL1 display code has no opengl support.\n" + "Please recompile qemu with SDL2, using\n" + "./configure --enable-sdl --with-sdlabi=2.0\n"); + } +} + void sdl_display_init(DisplayState *ds, int full_screen, int no_frame) { int flags; diff --git a/ui/sdl2-2d.c b/ui/sdl2-2d.c index f907c21895..d0b340f956 100644 --- a/ui/sdl2-2d.c +++ b/ui/sdl2-2d.c @@ -23,12 +23,6 @@ */ /* Ported SDL 1.2 code to 2.0 by Dave Airlie. */ -/* Avoid compiler warning because macro is redefined in SDL_syswm.h. */ -#undef WIN32_LEAN_AND_MEAN - -#include <SDL.h> -#include <SDL_syswm.h> - #include "qemu-common.h" #include "ui/console.h" #include "ui/input.h" @@ -42,6 +36,8 @@ void sdl2_2d_update(DisplayChangeListener *dcl, DisplaySurface *surf = qemu_console_surface(dcl->con); SDL_Rect rect; + assert(!scon->opengl); + if (!surf) { return; } @@ -67,6 +63,8 @@ void sdl2_2d_switch(DisplayChangeListener *dcl, DisplaySurface *old_surface = scon->surface; int format = 0; + assert(!scon->opengl); + scon->surface = new_surface; if (scon->texture) { @@ -91,10 +89,21 @@ void sdl2_2d_switch(DisplayChangeListener *dcl, surface_width(new_surface), surface_height(new_surface)); - if (surface_bits_per_pixel(scon->surface) == 16) { + switch (surface_format(scon->surface)) { + case PIXMAN_x1r5g5b5: + format = SDL_PIXELFORMAT_ARGB1555; + break; + case PIXMAN_r5g6b5: format = SDL_PIXELFORMAT_RGB565; - } else if (surface_bits_per_pixel(scon->surface) == 32) { + break; + case PIXMAN_x8r8g8b8: format = SDL_PIXELFORMAT_ARGB8888; + break; + case PIXMAN_r8g8b8x8: + format = SDL_PIXELFORMAT_RGBA8888; + break; + default: + g_assert_not_reached(); } scon->texture = SDL_CreateTexture(scon->real_renderer, format, SDL_TEXTUREACCESS_STREAMING, @@ -107,12 +116,15 @@ void sdl2_2d_refresh(DisplayChangeListener *dcl) { struct sdl2_console *scon = container_of(dcl, struct sdl2_console, dcl); + assert(!scon->opengl); graphic_hw_update(dcl->con); sdl2_poll_events(scon); } void sdl2_2d_redraw(struct sdl2_console *scon) { + assert(!scon->opengl); + if (!scon->surface) { return; } diff --git a/ui/sdl2-gl.c b/ui/sdl2-gl.c new file mode 100644 index 0000000000..b604c0671e --- /dev/null +++ b/ui/sdl2-gl.c @@ -0,0 +1,112 @@ +/* + * QEMU SDL display driver -- opengl support + * + * Copyright (c) 2014 Red Hat + * + * Authors: + * Gerd Hoffmann <kraxel@redhat.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu-common.h" +#include "ui/console.h" +#include "ui/input.h" +#include "ui/sdl2.h" +#include "sysemu/sysemu.h" + +static void sdl2_gl_render_surface(struct sdl2_console *scon) +{ + int ww, wh; + + SDL_GL_MakeCurrent(scon->real_window, scon->winctx); + + SDL_GetWindowSize(scon->real_window, &ww, &wh); + surface_gl_setup_viewport(scon->gls, scon->surface, ww, wh); + + surface_gl_render_texture(scon->gls, scon->surface); + SDL_GL_SwapWindow(scon->real_window); +} + +void sdl2_gl_update(DisplayChangeListener *dcl, + int x, int y, int w, int h) +{ + struct sdl2_console *scon = container_of(dcl, struct sdl2_console, dcl); + + assert(scon->opengl); + + SDL_GL_MakeCurrent(scon->real_window, scon->winctx); + surface_gl_update_texture(scon->gls, scon->surface, x, y, w, h); + scon->updates++; +} + +void sdl2_gl_switch(DisplayChangeListener *dcl, + DisplaySurface *new_surface) +{ + struct sdl2_console *scon = container_of(dcl, struct sdl2_console, dcl); + DisplaySurface *old_surface = scon->surface; + + assert(scon->opengl); + + SDL_GL_MakeCurrent(scon->real_window, scon->winctx); + surface_gl_destroy_texture(scon->gls, scon->surface); + + scon->surface = new_surface; + + if (!new_surface) { + console_gl_fini_context(scon->gls); + scon->gls = NULL; + sdl2_window_destroy(scon); + return; + } + + if (!scon->real_window) { + sdl2_window_create(scon); + scon->gls = console_gl_init_context(); + } else if (old_surface && + ((surface_width(old_surface) != surface_width(new_surface)) || + (surface_height(old_surface) != surface_height(new_surface)))) { + sdl2_window_resize(scon); + } + + surface_gl_create_texture(scon->gls, scon->surface); +} + +void sdl2_gl_refresh(DisplayChangeListener *dcl) +{ + struct sdl2_console *scon = container_of(dcl, struct sdl2_console, dcl); + + assert(scon->opengl); + + graphic_hw_update(dcl->con); + if (scon->updates && scon->surface) { + scon->updates = 0; + sdl2_gl_render_surface(scon); + } + sdl2_poll_events(scon); +} + +void sdl2_gl_redraw(struct sdl2_console *scon) +{ + assert(scon->opengl); + + if (scon->surface) { + sdl2_gl_render_surface(scon); + } +} diff --git a/ui/sdl2-input.c b/ui/sdl2-input.c index a1973fc2e0..ac5dc9476b 100644 --- a/ui/sdl2-input.c +++ b/ui/sdl2-input.c @@ -23,12 +23,6 @@ */ /* Ported SDL 1.2 code to 2.0 by Dave Airlie. */ -/* Avoid compiler warning because macro is redefined in SDL_syswm.h. */ -#undef WIN32_LEAN_AND_MEAN - -#include <SDL.h> -#include <SDL_syswm.h> - #include "qemu-common.h" #include "ui/console.h" #include "ui/input.h" @@ -23,12 +23,6 @@ */ /* Ported SDL 1.2 code to 2.0 by Dave Airlie. */ -/* Avoid compiler warning because macro is redefined in SDL_syswm.h. */ -#undef WIN32_LEAN_AND_MEAN - -#include <SDL.h> -#include <SDL_syswm.h> - #include "qemu-common.h" #include "ui/console.h" #include "ui/input.h" @@ -92,6 +86,9 @@ void sdl2_window_create(struct sdl2_console *scon) surface_height(scon->surface), flags); scon->real_renderer = SDL_CreateRenderer(scon->real_window, -1, 0); + if (scon->opengl) { + scon->winctx = SDL_GL_GetCurrentContext(); + } sdl_update_caption(scon); } @@ -118,6 +115,17 @@ void sdl2_window_resize(struct sdl2_console *scon) surface_height(scon->surface)); } +static void sdl2_redraw(struct sdl2_console *scon) +{ + if (scon->opengl) { +#ifdef CONFIG_OPENGL + sdl2_gl_redraw(scon); +#endif + } else { + sdl2_2d_redraw(scon); + } +} + static void sdl_update_caption(struct sdl2_console *scon) { char win_title[1024]; @@ -316,7 +324,7 @@ static void toggle_full_screen(struct sdl2_console *scon) } SDL_SetWindowFullscreen(scon->real_window, 0); } - sdl2_2d_redraw(scon); + sdl2_redraw(scon); } static void handle_keydown(SDL_Event *ev) @@ -364,8 +372,10 @@ static void handle_keydown(SDL_Event *ev) case SDL_SCANCODE_U: sdl2_window_destroy(scon); sdl2_window_create(scon); - /* re-create texture */ - sdl2_2d_switch(&scon->dcl, scon->surface); + if (!scon->opengl) { + /* re-create scon->texture */ + sdl2_2d_switch(&scon->dcl, scon->surface); + } gui_keysym = 1; break; #if 0 @@ -384,7 +394,7 @@ static void handle_keydown(SDL_Event *ev) fprintf(stderr, "%s: scale to %dx%d\n", __func__, width, height); sdl_scale(scon, width, height); - sdl2_2d_redraw(scon); + sdl2_redraw(scon); gui_keysym = 1; } #endif @@ -520,10 +530,10 @@ static void handle_windowevent(SDL_Event *ev) info.height = ev->window.data2; dpy_set_ui_info(scon->dcl.con, &info); } - sdl2_2d_redraw(scon); + sdl2_redraw(scon); break; case SDL_WINDOWEVENT_EXPOSED: - sdl2_2d_redraw(scon); + sdl2_redraw(scon); break; case SDL_WINDOWEVENT_FOCUS_GAINED: case SDL_WINDOWEVENT_ENTER: @@ -677,6 +687,35 @@ static const DisplayChangeListenerOps dcl_2d_ops = { .dpy_cursor_define = sdl_mouse_define, }; +#ifdef CONFIG_OPENGL +static const DisplayChangeListenerOps dcl_gl_ops = { + .dpy_name = "sdl2-gl", + .dpy_gfx_update = sdl2_gl_update, + .dpy_gfx_switch = sdl2_gl_switch, + .dpy_gfx_check_format = console_gl_check_format, + .dpy_refresh = sdl2_gl_refresh, + .dpy_mouse_set = sdl_mouse_warp, + .dpy_cursor_define = sdl_mouse_define, +}; +#endif + +void sdl_display_early_init(int opengl) +{ + switch (opengl) { + case -1: /* default */ + case 0: /* off */ + break; + case 1: /* on */ +#ifdef CONFIG_OPENGL + display_opengl = 1; +#endif + break; + default: + g_assert_not_reached(); + break; + } +} + void sdl_display_init(DisplayState *ds, int full_screen, int no_frame) { int flags; @@ -722,10 +761,16 @@ void sdl_display_init(DisplayState *ds, int full_screen, int no_frame) if (!qemu_console_is_graphic(con)) { sdl2_console[i].hidden = true; } + sdl2_console[i].idx = i; +#ifdef CONFIG_OPENGL + sdl2_console[i].opengl = display_opengl; + sdl2_console[i].dcl.ops = display_opengl ? &dcl_gl_ops : &dcl_2d_ops; +#else + sdl2_console[i].opengl = 0; sdl2_console[i].dcl.ops = &dcl_2d_ops; +#endif sdl2_console[i].dcl.con = con; register_displaychangelistener(&sdl2_console[i].dcl); - sdl2_console[i].idx = i; } /* Load a 32x32x4 image. White pixels are transparent. */ diff --git a/ui/shader.c b/ui/shader.c new file mode 100644 index 0000000000..52a4632930 --- /dev/null +++ b/ui/shader.c @@ -0,0 +1,114 @@ +/* + * QEMU opengl shader helper functions + * + * Copyright (c) 2014 Red Hat + * + * Authors: + * Gerd Hoffmann <kraxel@redhat.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu-common.h" +#include "ui/shader.h" + +/* ---------------------------------------------------------------------- */ + +void qemu_gl_run_texture_blit(GLint texture_blit_prog) +{ + GLfloat in_position[] = { + -1, -1, + 1, -1, + -1, 1, + 1, 1, + }; + GLint l_position; + + glUseProgram(texture_blit_prog); + l_position = glGetAttribLocation(texture_blit_prog, "in_position"); + glVertexAttribPointer(l_position, 2, GL_FLOAT, GL_FALSE, 0, in_position); + glEnableVertexAttribArray(l_position); + glDrawArrays(GL_TRIANGLE_STRIP, l_position, 4); +} + +/* ---------------------------------------------------------------------- */ + +GLuint qemu_gl_create_compile_shader(GLenum type, const GLchar *src) +{ + GLuint shader; + GLint status, length; + char *errmsg; + + shader = glCreateShader(type); + glShaderSource(shader, 1, &src, 0); + glCompileShader(shader); + + glGetShaderiv(shader, GL_COMPILE_STATUS, &status); + if (!status) { + glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &length); + errmsg = malloc(length); + glGetShaderInfoLog(shader, length, &length, errmsg); + fprintf(stderr, "%s: compile %s error\n%s\n", __func__, + (type == GL_VERTEX_SHADER) ? "vertex" : "fragment", + errmsg); + free(errmsg); + return 0; + } + return shader; +} + +GLuint qemu_gl_create_link_program(GLuint vert, GLuint frag) +{ + GLuint program; + GLint status, length; + char *errmsg; + + program = glCreateProgram(); + glAttachShader(program, vert); + glAttachShader(program, frag); + glLinkProgram(program); + + glGetProgramiv(program, GL_LINK_STATUS, &status); + if (!status) { + glGetProgramiv(program, GL_INFO_LOG_LENGTH, &length); + errmsg = malloc(length); + glGetProgramInfoLog(program, length, &length, errmsg); + fprintf(stderr, "%s: link program: %s\n", __func__, errmsg); + free(errmsg); + return 0; + } + return program; +} + +GLuint qemu_gl_create_compile_link_program(const GLchar *vert_src, + const GLchar *frag_src) +{ + GLuint vert_shader, frag_shader, program; + + vert_shader = qemu_gl_create_compile_shader(GL_VERTEX_SHADER, vert_src); + frag_shader = qemu_gl_create_compile_shader(GL_FRAGMENT_SHADER, frag_src); + if (!vert_shader || !frag_shader) { + return 0; + } + + program = qemu_gl_create_link_program(vert_shader, frag_shader); + glDeleteShader(vert_shader); + glDeleteShader(frag_shader); + + return program; +} diff --git a/ui/shader/texture-blit.frag b/ui/shader/texture-blit.frag new file mode 100644 index 0000000000..bfa202c22b --- /dev/null +++ b/ui/shader/texture-blit.frag @@ -0,0 +1,10 @@ + +#version 300 es + +uniform sampler2D image; +in mediump vec2 ex_tex_coord; +out mediump vec4 out_frag_color; + +void main(void) { + out_frag_color = texture(image, ex_tex_coord); +} diff --git a/ui/shader/texture-blit.vert b/ui/shader/texture-blit.vert new file mode 100644 index 0000000000..6fe2744d68 --- /dev/null +++ b/ui/shader/texture-blit.vert @@ -0,0 +1,10 @@ + +#version 300 es + +in vec2 in_position; +out vec2 ex_tex_coord; + +void main(void) { + gl_Position = vec4(in_position, 0.0, 1.0); + ex_tex_coord = vec2(1.0 + in_position.x, 1.0 - in_position.y) * 0.5; +} diff --git a/ui/spice-core.c b/ui/spice-core.c index c8f7f183c6..f00e0742b4 100644 --- a/ui/spice-core.c +++ b/ui/spice-core.c @@ -273,14 +273,6 @@ static SpiceCoreInterface core_interface = { .channel_event = channel_event, }; -typedef struct SpiceMigration { - SpiceMigrateInstance sin; - struct { - MonitorCompletion *cb; - void *opaque; - } connect_complete; -} SpiceMigration; - static void migrate_connect_complete_cb(SpiceMigrateInstance *sin); static void migrate_end_complete_cb(SpiceMigrateInstance *sin); @@ -293,15 +285,11 @@ static const SpiceMigrateInterface migrate_interface = { .migrate_end_complete = migrate_end_complete_cb, }; -static SpiceMigration spice_migrate; +static SpiceMigrateInstance spice_migrate; static void migrate_connect_complete_cb(SpiceMigrateInstance *sin) { - SpiceMigration *sm = container_of(sin, SpiceMigration, sin); - if (sm->connect_complete.cb) { - sm->connect_complete.cb(sm->connect_complete.opaque, NULL); - } - sm->connect_complete.cb = NULL; + /* nothing, but libspice-server expects this cb being present. */ } static void migrate_end_complete_cb(SpiceMigrateInstance *sin) @@ -585,13 +573,10 @@ static void migration_state_notifier(Notifier *notifier, void *data) } int qemu_spice_migrate_info(const char *hostname, int port, int tls_port, - const char *subject, - MonitorCompletion *cb, void *opaque) + const char *subject) { int ret; - spice_migrate.connect_complete.cb = cb; - spice_migrate.connect_complete.opaque = opaque; ret = spice_server_migrate_connect(spice_server, hostname, port, tls_port, subject); spice_have_target_host = true; @@ -812,9 +797,8 @@ void qemu_spice_init(void) migration_state.notify = migration_state_notifier; add_migration_state_change_notifier(&migration_state); - spice_migrate.sin.base.sif = &migrate_interface.base; - spice_migrate.connect_complete.cb = NULL; - qemu_spice_add_interface(&spice_migrate.sin.base); + spice_migrate.base.sif = &migrate_interface.base; + qemu_spice_add_interface(&spice_migrate.base); qemu_spice_input_init(); qemu_spice_audio_init(); diff --git a/ui/spice-display.c b/ui/spice-display.c index 16441852e4..9c63132054 100644 --- a/ui/spice-display.c +++ b/ui/spice-display.c @@ -178,7 +178,7 @@ static void qemu_spice_create_one_update(SimpleSpiceDisplay *ssd, image->bitmap.palette = 0; image->bitmap.format = SPICE_BITMAP_FMT_32BIT; - dest = pixman_image_create_bits(PIXMAN_x8r8g8b8, bw, bh, + dest = pixman_image_create_bits(PIXMAN_LE_x8r8g8b8, bw, bh, (void *)update->bitmap, bw * 4); pixman_image_composite(PIXMAN_OP_SRC, ssd->surface, NULL, ssd->mirror, rect->left, rect->top, 0, 0, @@ -260,7 +260,8 @@ static void qemu_spice_create_update(SimpleSpiceDisplay *ssd) static SimpleSpiceCursor* qemu_spice_create_cursor_update(SimpleSpiceDisplay *ssd, - QEMUCursor *c) + QEMUCursor *c, + int on) { size_t size = c ? c->width * c->height * 4 : 0; SimpleSpiceCursor *update; @@ -275,8 +276,8 @@ qemu_spice_create_cursor_update(SimpleSpiceDisplay *ssd, if (c) { ccmd->type = QXL_CURSOR_SET; - ccmd->u.set.position.x = ssd->ptr_x; - ccmd->u.set.position.y = ssd->ptr_y; + ccmd->u.set.position.x = ssd->ptr_x + ssd->hot_x; + ccmd->u.set.position.y = ssd->ptr_y + ssd->hot_y; ccmd->u.set.visible = true; ccmd->u.set.shape = (uintptr_t)cursor; cursor->header.unique = ssd->unique++; @@ -288,10 +289,12 @@ qemu_spice_create_cursor_update(SimpleSpiceDisplay *ssd, cursor->data_size = size; cursor->chunk.data_size = size; memcpy(cursor->chunk.data, c->data, size); + } else if (!on) { + ccmd->type = QXL_CURSOR_HIDE; } else { ccmd->type = QXL_CURSOR_MOVE; - ccmd->u.position.x = ssd->ptr_x; - ccmd->u.position.y = ssd->ptr_y; + ccmd->u.position.x = ssd->ptr_x + ssd->hot_x; + ccmd->u.position.y = ssd->ptr_y + ssd->hot_y; } ccmd->release_info.id = (uintptr_t)(&update->ext); @@ -536,7 +539,7 @@ static void interface_get_init_info(QXLInstance *sin, QXLDevInitInfo *info) info->n_surfaces = ssd->num_surfaces; } -static int interface_get_command(QXLInstance *sin, struct QXLCommandExt *ext) +static int interface_get_command(QXLInstance *sin, QXLCommandExt *ext) { SimpleSpiceDisplay *ssd = container_of(sin, SimpleSpiceDisplay, qxl); SimpleSpiceUpdate *update; @@ -563,7 +566,7 @@ static int interface_req_cmd_notification(QXLInstance *sin) } static void interface_release_resource(QXLInstance *sin, - struct QXLReleaseInfoExt rext) + QXLReleaseInfoExt rext) { SimpleSpiceDisplay *ssd = container_of(sin, SimpleSpiceDisplay, qxl); SimpleSpiceUpdate *update; @@ -586,7 +589,7 @@ static void interface_release_resource(QXLInstance *sin, } } -static int interface_get_cursor_command(QXLInstance *sin, struct QXLCommandExt *ext) +static int interface_get_cursor_command(QXLInstance *sin, QXLCommandExt *ext) { SimpleSpiceDisplay *ssd = container_of(sin, SimpleSpiceDisplay, qxl); int ret; @@ -715,7 +718,7 @@ static void display_update(DisplayChangeListener *dcl, } static void display_switch(DisplayChangeListener *dcl, - struct DisplaySurface *surface) + DisplaySurface *surface) { SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl); qemu_spice_display_switch(ssd, surface); @@ -734,11 +737,11 @@ static void display_mouse_set(DisplayChangeListener *dcl, qemu_mutex_lock(&ssd->lock); ssd->ptr_x = x; - ssd->ptr_y = x; + ssd->ptr_y = y; if (ssd->ptr_move) { g_free(ssd->ptr_move); } - ssd->ptr_move = qemu_spice_create_cursor_update(ssd, NULL); + ssd->ptr_move = qemu_spice_create_cursor_update(ssd, NULL, on); qemu_mutex_unlock(&ssd->lock); } @@ -748,6 +751,8 @@ static void display_mouse_define(DisplayChangeListener *dcl, SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl); qemu_mutex_lock(&ssd->lock); + ssd->hot_x = c->hot_x; + ssd->hot_y = c->hot_y; if (ssd->ptr_move) { g_free(ssd->ptr_move); ssd->ptr_move = NULL; @@ -755,7 +760,7 @@ static void display_mouse_define(DisplayChangeListener *dcl, if (ssd->ptr_define) { g_free(ssd->ptr_define); } - ssd->ptr_define = qemu_spice_create_cursor_update(ssd, c); + ssd->ptr_define = qemu_spice_create_cursor_update(ssd, c, 0); qemu_mutex_unlock(&ssd->lock); } diff --git a/ui/vnc-auth-vencrypt.c b/ui/vnc-auth-vencrypt.c index a420ccbd1d..03ea48a69c 100644 --- a/ui/vnc-auth-vencrypt.c +++ b/ui/vnc-auth-vencrypt.c @@ -65,7 +65,8 @@ static void start_auth_vencrypt_subauth(VncState *vs) static void vnc_tls_handshake_io(void *opaque); -static int vnc_start_vencrypt_handshake(struct VncState *vs) { +static int vnc_start_vencrypt_handshake(VncState *vs) +{ int ret; if ((ret = gnutls_handshake(vs->tls.session)) < 0) { @@ -100,8 +101,9 @@ static int vnc_start_vencrypt_handshake(struct VncState *vs) { return 0; } -static void vnc_tls_handshake_io(void *opaque) { - struct VncState *vs = (struct VncState *)opaque; +static void vnc_tls_handshake_io(void *opaque) +{ + VncState *vs = (VncState *)opaque; VNC_DEBUG("Handshake IO continue\n"); vnc_start_vencrypt_handshake(vs); diff --git a/ui/vnc-tls.c b/ui/vnc-tls.c index eddd39b08e..028fc4db1f 100644 --- a/ui/vnc-tls.c +++ b/ui/vnc-tls.c @@ -68,7 +68,7 @@ static int vnc_tls_initialize(void) static ssize_t vnc_tls_push(gnutls_transport_ptr_t transport, const void *data, size_t len) { - struct VncState *vs = (struct VncState *)transport; + VncState *vs = (VncState *)transport; int ret; retry: @@ -85,7 +85,7 @@ static ssize_t vnc_tls_push(gnutls_transport_ptr_t transport, static ssize_t vnc_tls_pull(gnutls_transport_ptr_t transport, void *data, size_t len) { - struct VncState *vs = (struct VncState *)transport; + VncState *vs = (VncState *)transport; int ret; retry: @@ -170,7 +170,7 @@ static gnutls_certificate_credentials_t vnc_tls_initialize_x509_cred(VncDisplay } -int vnc_tls_validate_certificate(struct VncState *vs) +int vnc_tls_validate_certificate(VncState *vs) { int ret; unsigned int status; @@ -332,7 +332,7 @@ static int vnc_set_gnutls_priority(gnutls_session_t s, int x509) #endif -int vnc_tls_client_setup(struct VncState *vs, +int vnc_tls_client_setup(VncState *vs, int needX509Creds) { VNC_DEBUG("Do TLS setup\n"); if (vnc_tls_initialize() < 0) { @@ -410,7 +410,7 @@ int vnc_tls_client_setup(struct VncState *vs, } -void vnc_tls_client_cleanup(struct VncState *vs) +void vnc_tls_client_cleanup(VncState *vs) { if (vs->tls.session) { gnutls_deinit(vs->tls.session); diff --git a/ui/vnc-ws.c b/ui/vnc-ws.c index 62eb97fe76..38a1b8b646 100644 --- a/ui/vnc-ws.c +++ b/ui/vnc-ws.c @@ -24,7 +24,7 @@ #ifdef CONFIG_VNC_TLS #include "qemu/sockets.h" -static int vncws_start_tls_handshake(struct VncState *vs) +static int vncws_start_tls_handshake(VncState *vs) { int ret = gnutls_handshake(vs->tls.session); @@ -63,7 +63,7 @@ static int vncws_start_tls_handshake(struct VncState *vs) void vncws_tls_handshake_io(void *opaque) { - struct VncState *vs = (struct VncState *)opaque; + VncState *vs = (VncState *)opaque; if (!vs->tls.session) { VNC_DEBUG("TLS Websocket setup\n"); @@ -1046,7 +1046,7 @@ static void vnc_dpy_cursor_define(DisplayChangeListener *dcl, } } -static int find_and_clear_dirty_height(struct VncState *vs, +static int find_and_clear_dirty_height(VncState *vs, int y, int last_x, int x, int height) { int h; diff --git a/util/compatfd.c b/util/compatfd.c index 341ada638f..e8571502be 100644 --- a/util/compatfd.c +++ b/util/compatfd.c @@ -108,22 +108,3 @@ int qemu_signalfd(const sigset_t *mask) return qemu_signalfd_compat(mask); } - -bool qemu_signalfd_available(void) -{ -#ifdef CONFIG_SIGNALFD - sigset_t mask; - int fd; - bool ok; - sigemptyset(&mask); - errno = 0; - fd = syscall(SYS_signalfd, -1, &mask, _NSIG / 8); - ok = (errno != ENOSYS); - if (fd >= 0) { - close(fd); - } - return ok; -#else - return false; -#endif -} diff --git a/util/hbitmap.c b/util/hbitmap.c index ab139717f5..a10c7aeeda 100644 --- a/util/hbitmap.c +++ b/util/hbitmap.c @@ -90,6 +90,9 @@ struct HBitmap { * bitmap will still allocate HBITMAP_LEVELS arrays. */ unsigned long *levels[HBITMAP_LEVELS]; + + /* The length of each levels[] array. */ + uint64_t sizes[HBITMAP_LEVELS]; }; /* Advance hbi to the next nonzero word and return it. hbi->pos @@ -384,6 +387,7 @@ HBitmap *hbitmap_alloc(uint64_t size, int granularity) hb->granularity = granularity; for (i = HBITMAP_LEVELS; i-- > 0; ) { size = MAX((size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1); + hb->sizes[i] = size; hb->levels[i] = g_new0(unsigned long, size); } @@ -395,3 +399,84 @@ HBitmap *hbitmap_alloc(uint64_t size, int granularity) hb->levels[0][0] |= 1UL << (BITS_PER_LONG - 1); return hb; } + +void hbitmap_truncate(HBitmap *hb, uint64_t size) +{ + bool shrink; + unsigned i; + uint64_t num_elements = size; + uint64_t old; + + /* Size comes in as logical elements, adjust for granularity. */ + size = (size + (1ULL << hb->granularity) - 1) >> hb->granularity; + assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE)); + shrink = size < hb->size; + + /* bit sizes are identical; nothing to do. */ + if (size == hb->size) { + return; + } + + /* If we're losing bits, let's clear those bits before we invalidate all of + * our invariants. This helps keep the bitcount consistent, and will prevent + * us from carrying around garbage bits beyond the end of the map. + */ + if (shrink) { + /* Don't clear partial granularity groups; + * start at the first full one. */ + uint64_t start = QEMU_ALIGN_UP(num_elements, 1 << hb->granularity); + uint64_t fix_count = (hb->size << hb->granularity) - start; + + assert(fix_count); + hbitmap_reset(hb, start, fix_count); + } + + hb->size = size; + for (i = HBITMAP_LEVELS; i-- > 0; ) { + size = MAX(BITS_TO_LONGS(size), 1); + if (hb->sizes[i] == size) { + break; + } + old = hb->sizes[i]; + hb->sizes[i] = size; + hb->levels[i] = g_realloc(hb->levels[i], size * sizeof(unsigned long)); + if (!shrink) { + memset(&hb->levels[i][old], 0x00, + (size - old) * sizeof(*hb->levels[i])); + } + } +} + + +/** + * Given HBitmaps A and B, let A := A (BITOR) B. + * Bitmap B will not be modified. + * + * @return true if the merge was successful, + * false if it was not attempted. + */ +bool hbitmap_merge(HBitmap *a, const HBitmap *b) +{ + int i; + uint64_t j; + + if ((a->size != b->size) || (a->granularity != b->granularity)) { + return false; + } + + if (hbitmap_count(b) == 0) { + return true; + } + + /* This merge is O(size), as BITS_PER_LONG and HBITMAP_LEVELS are constant. + * It may be possible to improve running times for sparsely populated maps + * by using hbitmap_iter_next, but this is suboptimal for dense maps. + */ + for (i = HBITMAP_LEVELS - 1; i >= 0; i--) { + for (j = 0; j < a->sizes[i]; j++) { + a->levels[i][j] |= b->levels[i][j]; + } + } + + return true; +} diff --git a/util/osdep.c b/util/osdep.c index b2bd1542c5..f938b69466 100644 --- a/util/osdep.c +++ b/util/osdep.c @@ -310,72 +310,6 @@ int qemu_accept(int s, struct sockaddr *addr, socklen_t *addrlen) return ret; } -/* - * A variant of send(2) which handles partial write. - * - * Return the number of bytes transferred, which is only - * smaller than `count' if there is an error. - * - * This function won't work with non-blocking fd's. - * Any of the possibilities with non-bloking fd's is bad: - * - return a short write (then name is wrong) - * - busy wait adding (errno == EAGAIN) to the loop - */ -ssize_t qemu_send_full(int fd, const void *buf, size_t count, int flags) -{ - ssize_t ret = 0; - ssize_t total = 0; - - while (count) { - ret = send(fd, buf, count, flags); - if (ret < 0) { - if (errno == EINTR) { - continue; - } - break; - } - - count -= ret; - buf += ret; - total += ret; - } - - return total; -} - -/* - * A variant of recv(2) which handles partial write. - * - * Return the number of bytes transferred, which is only - * smaller than `count' if there is an error. - * - * This function won't work with non-blocking fd's. - * Any of the possibilities with non-bloking fd's is bad: - * - return a short write (then name is wrong) - * - busy wait adding (errno == EAGAIN) to the loop - */ -ssize_t qemu_recv_full(int fd, void *buf, size_t count, int flags) -{ - ssize_t ret = 0; - ssize_t total = 0; - - while (count) { - ret = qemu_recv(fd, buf, count, flags); - if (ret <= 0) { - if (ret < 0 && errno == EINTR) { - continue; - } - break; - } - - count -= ret; - buf += ret; - total += ret; - } - - return total; -} - void qemu_set_version(const char *version) { qemu_version = version; diff --git a/util/qemu-config.c b/util/qemu-config.c index 2d32ce7e91..30d6dcf526 100644 --- a/util/qemu-config.c +++ b/util/qemu-config.c @@ -3,10 +3,8 @@ #include "qemu/option.h" #include "qemu/config-file.h" #include "qapi/qmp/qerror.h" -#include "hw/qdev.h" #include "qapi/error.h" #include "qmp-commands.h" -#include "hw/i386/pc.h" static QemuOptsList *vm_config_groups[32]; static QemuOptsList *drive_config_groups[4]; @@ -413,7 +411,9 @@ int qemu_config_parse(FILE *fp, QemuOptsList **lists, const char *fname) opts = qemu_opts_create(list, NULL, 0, &error_abort); continue; } - if (sscanf(line, " %63s = \"%1023[^\"]\"", arg, value) == 2) { + value[0] = '\0'; + if (sscanf(line, " %63s = \"%1023[^\"]\"", arg, value) == 2 || + sscanf(line, " %63s = \"\"", arg) == 1) { /* arg = value */ if (opts == NULL) { error_report("no group defined"); @@ -130,6 +130,7 @@ static int data_dir_idx; const char *bios_name = NULL; enum vga_retrace_method vga_retrace_method = VGA_RETRACE_DUMB; DisplayType display_type = DT_DEFAULT; +int request_opengl = -1; int display_opengl; static int display_remote; const char* keyboard_layout = NULL; @@ -1990,6 +1991,15 @@ static DisplayType select_display(const char *p) } else { goto invalid_sdl_args; } + } else if (strstart(opts, ",gl=", &nextopt)) { + opts = nextopt; + if (strstart(opts, "on", &nextopt)) { + request_opengl = 1; + } else if (strstart(opts, "off", &nextopt)) { + request_opengl = 0; + } else { + goto invalid_sdl_args; + } } else { invalid_sdl_args: fprintf(stderr, "Invalid SDL option string: %s\n", p); @@ -4005,6 +4015,19 @@ int main(int argc, char **argv, char **envp) early_gtk_display_init(); } #endif +#if defined(CONFIG_SDL) + if (display_type == DT_SDL) { + sdl_display_early_init(request_opengl); + } +#endif + if (request_opengl == 1 && display_opengl == 0) { +#if defined(CONFIG_OPENGL) + fprintf(stderr, "OpenGL is not supported by the display.\n"); +#else + fprintf(stderr, "QEMU was built without opengl support.\n"); +#endif + exit(1); + } socket_init(); |