diff options
317 files changed, 3759 insertions, 3355 deletions
diff --git a/.gitlab-ci.d/cirrus.yml b/.gitlab-ci.d/cirrus.yml index 634a73a742..785b163aa6 100644 --- a/.gitlab-ci.d/cirrus.yml +++ b/.gitlab-ci.d/cirrus.yml @@ -50,7 +50,7 @@ x64-freebsd-12-build: NAME: freebsd-12 CIRRUS_VM_INSTANCE_TYPE: freebsd_instance CIRRUS_VM_IMAGE_SELECTOR: image_family - CIRRUS_VM_IMAGE_NAME: freebsd-12-3 + CIRRUS_VM_IMAGE_NAME: freebsd-12-4 CIRRUS_VM_CPUS: 8 CIRRUS_VM_RAM: 8G UPDATE_COMMAND: pkg update diff --git a/.gitlab-ci.d/crossbuilds.yml b/.gitlab-ci.d/crossbuilds.yml index c4cd96433d..8dbbb8f881 100644 --- a/.gitlab-ci.d/crossbuilds.yml +++ b/.gitlab-ci.d/crossbuilds.yml @@ -112,6 +112,14 @@ cross-ppc64el-user: variables: IMAGE: debian-ppc64el-cross +cross-ppc64el-kvm-only: + extends: .cross_accel_build_job + needs: + job: ppc64el-debian-cross-container + variables: + IMAGE: debian-ppc64el-cross + EXTRA_CONFIGURE_OPTS: --disable-tcg --without-default-devices + # The riscv64 cross-builds currently use a 'sid' container to get # compilers and libraries. Until something more stable is found we # allow_failure so as not to block CI. diff --git a/.gitlab-ci.d/windows.yml b/.gitlab-ci.d/windows.yml index a3e7a37022..9b5c4bcd8a 100644 --- a/.gitlab-ci.d/windows.yml +++ b/.gitlab-ci.d/windows.yml @@ -41,11 +41,15 @@ msys2-64bit: mingw-w64-x86_64-gcc mingw-w64-x86_64-glib2 mingw-w64-x86_64-gnutls + mingw-w64-x86_64-gtk3 + mingw-w64-x86_64-libgcrypt + mingw-w64-x86_64-libjpeg-turbo mingw-w64-x86_64-libnfs mingw-w64-x86_64-libpng mingw-w64-x86_64-libssh mingw-w64-x86_64-libtasn1 mingw-w64-x86_64-libusb + mingw-w64-x86_64-lzo2 mingw-w64-x86_64-nettle mingw-w64-x86_64-ninja mingw-w64-x86_64-pixman @@ -57,12 +61,21 @@ msys2-64bit: mingw-w64-x86_64-usbredir mingw-w64-x86_64-zstd " - $env:CHERE_INVOKING = 'yes' # Preserve the current working directory - - $env:MSYSTEM = 'MINGW64' # Start a 64 bit Mingw environment + - $env:MSYSTEM = 'MINGW64' # Start a 64-bit MinGW environment - $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink - - .\msys64\usr\bin\bash -lc './configure --target-list=x86_64-softmmu - --enable-capstone --without-default-devices' - - .\msys64\usr\bin\bash -lc 'make' - - .\msys64\usr\bin\bash -lc 'make check || { cat build/meson-logs/testlog.txt; exit 1; } ;' + - mkdir output + - cd output + # Note: do not remove "--without-default-devices"! + # commit 9f8e6cad65a6 ("gitlab-ci: Speed up the msys2-64bit job by using --without-default-devices" + # changed to compile QEMU with the --without-default-devices switch + # for the msys2 64-bit job, due to the build could not complete within + # the project timeout. + - ..\msys64\usr\bin\bash -lc '../configure --target-list=x86_64-softmmu + --without-default-devices' + - ..\msys64\usr\bin\bash -lc 'make' + # qTests don't run successfully with "--without-default-devices", + # so let's exclude the qtests from CI for now. + - ..\msys64\usr\bin\bash -lc 'make check MTESTARGS=\"--no-suite qtest\" || { cat meson-logs/testlog.txt; exit 1; } ;' msys2-32bit: extends: .shared_msys2_builder @@ -79,21 +92,27 @@ msys2-32bit: mingw-w64-i686-gtk3 mingw-w64-i686-libgcrypt mingw-w64-i686-libjpeg-turbo + mingw-w64-i686-libnfs + mingw-w64-i686-libpng mingw-w64-i686-libssh mingw-w64-i686-libtasn1 mingw-w64-i686-libusb mingw-w64-i686-lzo2 + mingw-w64-i686-nettle mingw-w64-i686-ninja mingw-w64-i686-pixman mingw-w64-i686-pkgconf mingw-w64-i686-python + mingw-w64-i686-SDL2 + mingw-w64-i686-SDL2_image mingw-w64-i686-snappy - mingw-w64-i686-usbredir " + mingw-w64-i686-usbredir + mingw-w64-i686-zstd " - $env:CHERE_INVOKING = 'yes' # Preserve the current working directory - - $env:MSYSTEM = 'MINGW32' # Start a 32-bit MinG environment + - $env:MSYSTEM = 'MINGW32' # Start a 32-bit MinGW environment - $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink - mkdir output - cd output - - ..\msys64\usr\bin\bash -lc "../configure --target-list=ppc64-softmmu" + - ..\msys64\usr\bin\bash -lc '../configure --target-list=ppc64-softmmu' - ..\msys64\usr\bin\bash -lc 'make' - ..\msys64\usr\bin\bash -lc 'make check || { cat meson-logs/testlog.txt; exit 1; } ;' diff --git a/.gitlab/issue_templates/bug.md b/.gitlab/issue_templates/bug.md index e910f7b1c2..53a79f5828 100644 --- a/.gitlab/issue_templates/bug.md +++ b/.gitlab/issue_templates/bug.md @@ -18,11 +18,11 @@ https://www.qemu.org/contribute/security-process/ --> ## Host environment - - Operating system: (Windows 10 21H1, Fedora 34, etc.) - - OS/kernel version: (For POSIX hosts, use `uname -a`) - - Architecture: (x86, ARM, s390x, etc.) - - QEMU flavor: (qemu-system-x86_64, qemu-aarch64, qemu-img, etc.) - - QEMU version: (e.g. `qemu-system-x86_64 --version`) + - Operating system: <!-- Windows 10 21H1, Fedora 37, etc. --> + - OS/kernel version: <!-- For POSIX hosts, use `uname -a` --> + - Architecture: <!-- x86, ARM, s390x, etc. --> + - QEMU flavor: <!-- qemu-system-x86_64, qemu-aarch64, qemu-img, etc. --> + - QEMU version: <!-- e.g. `qemu-system-x86_64 --version` --> - QEMU command line: <!-- Give the smallest, complete command line that exhibits the problem. @@ -35,9 +35,9 @@ https://www.qemu.org/contribute/security-process/ ``` ## Emulated/Virtualized environment - - Operating system: (Windows 10 21H1, Fedora 34, etc.) - - OS/kernel version: (For POSIX guests, use `uname -a`.) - - Architecture: (x86, ARM, s390x, etc.) + - Operating system: <!-- Windows 10 21H1, Fedora 37, etc. --> + - OS/kernel version: <!-- For POSIX guests, use `uname -a`. --> + - Architecture: <!-- x86, ARM, s390x, etc. --> ## Description of problem diff --git a/MAINTAINERS b/MAINTAINERS index 6966490c94..3bd433b65a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -78,6 +78,7 @@ M: Laurent Vivier <laurent@vivier.eu> S: Maintained L: qemu-trivial@nongnu.org K: ^Subject:.*(?i)trivial +F: docs/devel/trivial-patches.rst T: git git://git.corpit.ru/qemu.git trivial-patches T: git https://github.com/vivier/qemu.git trivial-patches @@ -129,6 +130,7 @@ F: util/cacheinfo.c F: util/cacheflush.c F: scripts/decodetree.py F: docs/devel/decodetree.rst +F: docs/devel/tcg* F: include/exec/cpu*.h F: include/exec/exec-all.h F: include/exec/helper*.h @@ -254,6 +256,7 @@ F: tests/docker/dockerfiles/debian-nios2-cross.d/build-toolchain.sh OpenRISC TCG CPUs M: Stafford Horne <shorne@gmail.com> S: Odd Fixes +F: docs/system/openrisc/cpu-features.rst F: target/openrisc/ F: hw/openrisc/ F: tests/tcg/openrisc/ @@ -332,6 +335,7 @@ F: target/i386/tcg/ F: tests/tcg/i386/ F: tests/tcg/x86_64/ F: hw/i386/ +F: docs/system/i386/cpu.rst F: docs/system/cpu-models-x86* T: git https://gitlab.com/ehabkost/qemu.git x86-next @@ -873,6 +877,7 @@ M: Peter Maydell <peter.maydell@linaro.org> R: Jean-Christophe Dubois <jcd@tribudubois.net> L: qemu-arm@nongnu.org S: Odd Fixes +F: docs/system/arm/sabrelite.rst F: hw/arm/sabrelite.c F: hw/arm/fsl-imx6.c F: hw/misc/imx6_*.c @@ -1273,6 +1278,7 @@ OpenRISC Machines or1k-sim M: Jia Liu <proljc@gmail.com> S: Maintained +F: docs/system/openrisc/or1k-sim.rst F: hw/openrisc/openrisc_sim.c PowerPC Machines @@ -2016,6 +2022,7 @@ F: hw/virtio/trace-events F: qapi/virtio.json F: net/vhost-user.c F: include/hw/virtio/ +F: docs/devel/virtio* virtio-balloon M: Michael S. Tsirkin <mst@redhat.com> @@ -2108,7 +2115,7 @@ F: tests/qtest/virtio-rng-test.c vhost-user-rng M: Mathieu Poirier <mathieu.poirier@linaro.org> S: Supported -F: docs/tools/vhost-user-rng.rst +F: docs/system/devices/vhost-user-rng.rst F: hw/virtio/vhost-user-rng.c F: hw/virtio/vhost-user-rng-pci.c F: include/hw/virtio/vhost-user-rng.h @@ -2146,7 +2153,7 @@ S: Supported F: hw/nvme/* F: include/block/nvme.h F: tests/qtest/nvme-test.c -F: docs/system/nvme.rst +F: docs/system/devices/nvme.rst T: git git://git.infradead.org/qemu-nvme.git nvme-next megasas @@ -2696,6 +2703,7 @@ GDB stub M: Alex Bennée <alex.bennee@linaro.org> R: Philippe Mathieu-Daudé <philmd@linaro.org> S: Maintained +F: docs/system/gdb.rst F: gdbstub/* F: include/exec/gdbstub.h F: gdb-xml/ @@ -2753,6 +2761,7 @@ F: ui/ F: include/ui/ F: qapi/ui.json F: util/drm.c +F: docs/devel/ui.rst Cocoa graphics M: Peter Maydell <peter.maydell@linaro.org> @@ -2930,6 +2939,7 @@ M: Paolo Bonzini <pbonzini@redhat.com> R: Daniel P. Berrange <berrange@redhat.com> R: Eduardo Habkost <eduardo@habkost.net> S: Supported +F: docs/devel/qom.rst F: docs/qdev-device-use.txt F: hw/core/qdev* F: hw/core/bus.c @@ -2978,6 +2988,7 @@ F: softmmu/qtest.c F: accel/qtest/ F: tests/qtest/ F: docs/devel/qgraph.rst +F: docs/devel/qtest.rst X: tests/qtest/bios-tables-test* Device Fuzzing @@ -3044,6 +3055,7 @@ F: include/sysemu/tpm* F: qapi/tpm.json F: backends/tpm/ F: tests/qtest/*tpm* +F: docs/specs/tpm.rst T: git https://github.com/stefanberger/qemu-tpm.git tpm-next Checkpatch @@ -3195,7 +3207,8 @@ F: replay/* F: block/blkreplay.c F: net/filter-replay.c F: include/sysemu/replay.h -F: docs/replay.txt +F: docs/devel/replay.rst +F: docs/system/replay.rst F: stubs/replay.c F: tests/avocado/replay_kernel.py F: tests/avocado/replay_linux.py @@ -3719,6 +3732,7 @@ F: tests/docker/ F: tests/vm/ F: tests/lcitool/ F: scripts/archive-source.sh +F: docs/devel/testing.rst W: https://gitlab.com/qemu-project/qemu/pipelines W: https://travis-ci.org/qemu/qemu diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index f99b0becd8..e86c33e0e6 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -3586,7 +3586,6 @@ static void kvm_set_dirty_ring_size(Object *obj, Visitor *v, Error **errp) { KVMState *s = KVM_STATE(obj); - Error *error = NULL; uint32_t value; if (s->fd != -1) { @@ -3594,9 +3593,7 @@ static void kvm_set_dirty_ring_size(Object *obj, Visitor *v, return; } - visit_type_uint32(v, name, &value, &error); - if (error) { - error_propagate(errp, error); + if (!visit_type_uint32(v, name, &value, errp)) { return; } if (value & (value - 1)) { diff --git a/audio/alsaaudio.c b/audio/alsaaudio.c index 7a2a94cd42..714bfb6453 100644 --- a/audio/alsaaudio.c +++ b/audio/alsaaudio.c @@ -449,7 +449,7 @@ static int alsa_open(bool in, struct alsa_params_req *req, snd_pcm_hw_params_t *hw_params; int err; unsigned int freq, nchannels; - const char *pcm_name = apdo->has_dev ? apdo->dev : "default"; + const char *pcm_name = apdo->dev ?: "default"; snd_pcm_uframes_t obt_buffer_size; const char *typ = in ? "ADC" : "DAC"; snd_pcm_format_t obtfmt; diff --git a/audio/audio.c b/audio/audio.c index 065602ce1b..d849a94a81 100644 --- a/audio/audio.c +++ b/audio/audio.c @@ -2035,15 +2035,13 @@ void audio_create_pdos(Audiodev *dev) switch (dev->driver) { #define CASE(DRIVER, driver, pdo_name) \ case AUDIODEV_DRIVER_##DRIVER: \ - if (!dev->u.driver.has_in) { \ + if (!dev->u.driver.in) { \ dev->u.driver.in = g_malloc0( \ sizeof(Audiodev##pdo_name##PerDirectionOptions)); \ - dev->u.driver.has_in = true; \ } \ - if (!dev->u.driver.has_out) { \ + if (!dev->u.driver.out) { \ dev->u.driver.out = g_malloc0( \ sizeof(Audiodev##pdo_name##PerDirectionOptions)); \ - dev->u.driver.has_out = true; \ } \ break diff --git a/audio/audio_legacy.c b/audio/audio_legacy.c index 595949f52c..18a89ffffb 100644 --- a/audio/audio_legacy.c +++ b/audio/audio_legacy.c @@ -62,15 +62,12 @@ static void get_int(const char *env, uint32_t *dst, bool *has_dst) } } -static void get_str(const char *env, char **dst, bool *has_dst) +static void get_str(const char *env, char **dst) { const char *val = getenv(env); if (val) { - if (*has_dst) { - g_free(*dst); - } + g_free(*dst); *dst = g_strdup(val); - *has_dst = true; } } @@ -169,7 +166,7 @@ static void handle_alsa_per_direction( get_bool(buf, &apdo->try_poll, &apdo->has_try_poll); strcpy(buf + len, "DEV"); - get_str(buf, &apdo->dev, &apdo->has_dev); + get_str(buf, &apdo->dev); strcpy(buf + len, "SIZE_IN_USEC"); get_bool(buf, &size_in_usecs, &dummy); @@ -235,7 +232,7 @@ static void handle_oss_per_direction( const char *dev_env) { get_bool(try_poll_env, &opdo->try_poll, &opdo->has_try_poll); - get_str(dev_env, &opdo->dev, &opdo->has_dev); + get_str(dev_env, &opdo->dev); get_bytes_to_usecs("QEMU_OSS_FRAGSIZE", &opdo->buffer_length, &opdo->has_buffer_length, @@ -261,7 +258,7 @@ static void handle_oss(Audiodev *dev) static void handle_pa_per_direction( AudiodevPaPerDirectionOptions *ppdo, const char *env) { - get_str(env, &ppdo->name, &ppdo->has_name); + get_str(env, &ppdo->name); } static void handle_pa(Audiodev *dev) @@ -278,7 +275,7 @@ static void handle_pa(Audiodev *dev) &dev->u.pa.out->has_buffer_length, qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.out)); - get_str("QEMU_PA_SERVER", &dev->u.pa.server, &dev->u.pa.has_server); + get_str("QEMU_PA_SERVER", &dev->u.pa.server); } /* SDL */ @@ -299,7 +296,7 @@ static void handle_wav(Audiodev *dev) &dev->u.wav.out->has_format); get_int("QEMU_WAV_DAC_FIXED_CHANNELS", &dev->u.wav.out->channels, &dev->u.wav.out->has_channels); - get_str("QEMU_WAV_PATH", &dev->u.wav.path, &dev->u.wav.has_path); + get_str("QEMU_WAV_PATH", &dev->u.wav.path); } /* general */ diff --git a/audio/ossaudio.c b/audio/ossaudio.c index 8e075edb70..e8d732b612 100644 --- a/audio/ossaudio.c +++ b/audio/ossaudio.c @@ -252,7 +252,7 @@ static int oss_open(int in, struct oss_params *req, audsettings *as, audio_buf_info abinfo; int fmt, freq, nchannels; int setfragment = 1; - const char *dspname = opdo->has_dev ? opdo->dev : "/dev/dsp"; + const char *dspname = opdo->dev ?: "/dev/dsp"; const char *typ = in ? "ADC" : "DAC"; #ifdef USE_DSP_POLICY int policy = oopts->has_dsp_policy ? oopts->dsp_policy : 5; @@ -745,10 +745,8 @@ static void *oss_audio_init(Audiodev *dev) oss_init_per_direction(oopts->in); oss_init_per_direction(oopts->out); - if (access(oopts->in->has_dev ? oopts->in->dev : "/dev/dsp", - R_OK | W_OK) < 0 || - access(oopts->out->has_dev ? oopts->out->dev : "/dev/dsp", - R_OK | W_OK) < 0) { + if (access(oopts->in->dev ?: "/dev/dsp", R_OK | W_OK) < 0 || + access(oopts->out->dev ?: "/dev/dsp", R_OK | W_OK) < 0) { return NULL; } return dev; diff --git a/audio/paaudio.c b/audio/paaudio.c index e91116f239..529b39daac 100644 --- a/audio/paaudio.c +++ b/audio/paaudio.c @@ -536,9 +536,9 @@ static int qpa_init_out(HWVoiceOut *hw, struct audsettings *as, pa->stream = qpa_simple_new ( c, - ppdo->has_stream_name ? ppdo->stream_name : g->dev->id, + ppdo->stream_name ?: g->dev->id, PA_STREAM_PLAYBACK, - ppdo->has_name ? ppdo->name : NULL, + ppdo->name, &ss, &ba, /* buffering attributes */ &error @@ -585,9 +585,9 @@ static int qpa_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque) pa->stream = qpa_simple_new ( c, - ppdo->has_stream_name ? ppdo->stream_name : g->dev->id, + ppdo->stream_name ?: g->dev->id, PA_STREAM_RECORD, - ppdo->has_name ? ppdo->name : NULL, + ppdo->name, &ss, &ba, /* buffering attributes */ &error @@ -827,7 +827,7 @@ static void *qpa_audio_init(Audiodev *dev) assert(dev->driver == AUDIODEV_DRIVER_PA); - if (!popts->has_server) { + if (!popts->server) { char pidfile[64]; char *runtime; struct stat st; @@ -850,7 +850,7 @@ static void *qpa_audio_init(Audiodev *dev) } g = g_new0(paaudio, 1); - server = popts->has_server ? popts->server : NULL; + server = popts->server; g->dev = dev; diff --git a/audio/sndioaudio.c b/audio/sndioaudio.c index 7c45276d36..632b0e3825 100644 --- a/audio/sndioaudio.c +++ b/audio/sndioaudio.c @@ -333,7 +333,7 @@ static int sndio_init(SndioVoice *self, unsigned int nch; int i, nfds; - dev_name = opts->has_dev ? opts->dev : SIO_DEVANY; + dev_name = opts->dev ?: SIO_DEVANY; latency = opts->has_latency ? opts->latency : SNDIO_LATENCY_US; /* open the device in non-blocking mode */ diff --git a/audio/wavaudio.c b/audio/wavaudio.c index 3e1d84db83..6445a2cb90 100644 --- a/audio/wavaudio.c +++ b/audio/wavaudio.c @@ -78,7 +78,7 @@ static int wav_init_out(HWVoiceOut *hw, struct audsettings *as, Audiodev *dev = drv_opaque; AudiodevWavOptions *wopts = &dev->u.wav; struct audsettings wav_as = audiodev_to_audsettings(dev->u.wav.out); - const char *wav_path = wopts->has_path ? wopts->path : "qemu.wav"; + const char *wav_path = wopts->path ?: "qemu.wav"; stereo = wav_as.nchannels == 2; switch (wav_as.fmt) { diff --git a/backends/tpm/tpm_passthrough.c b/backends/tpm/tpm_passthrough.c index 5a2f74db1b..179697a3a9 100644 --- a/backends/tpm/tpm_passthrough.c +++ b/backends/tpm/tpm_passthrough.c @@ -259,12 +259,10 @@ tpm_passthrough_handle_device_opts(TPMPassthruState *tpm_pt, QemuOpts *opts) value = qemu_opt_get(opts, "cancel-path"); if (value) { tpm_pt->options->cancel_path = g_strdup(value); - tpm_pt->options->has_cancel_path = true; } value = qemu_opt_get(opts, "path"); if (value) { - tpm_pt->options->has_path = true; tpm_pt->options->path = g_strdup(value); } @@ -93,8 +93,6 @@ static bool bdrv_recurse_has_child(BlockDriverState *bs, static void bdrv_replace_child_noperm(BdrvChild *child, BlockDriverState *new_bs); static void bdrv_remove_child(BdrvChild *child, Transaction *tran); -static void bdrv_remove_filter_or_cow_child(BlockDriverState *bs, - Transaction *tran); static int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue, @@ -528,65 +526,24 @@ typedef struct CreateCo { Error *err; } CreateCo; -static void coroutine_fn bdrv_create_co_entry(void *opaque) -{ - Error *local_err = NULL; - int ret; - - CreateCo *cco = opaque; - assert(cco->drv); - GLOBAL_STATE_CODE(); - - ret = cco->drv->bdrv_co_create_opts(cco->drv, - cco->filename, cco->opts, &local_err); - error_propagate(&cco->err, local_err); - cco->ret = ret; -} - -int bdrv_create(BlockDriver *drv, const char* filename, - QemuOpts *opts, Error **errp) +int coroutine_fn bdrv_co_create(BlockDriver *drv, const char *filename, + QemuOpts *opts, Error **errp) { int ret; - GLOBAL_STATE_CODE(); - - Coroutine *co; - CreateCo cco = { - .drv = drv, - .filename = g_strdup(filename), - .opts = opts, - .ret = NOT_DONE, - .err = NULL, - }; + ERRP_GUARD(); if (!drv->bdrv_co_create_opts) { - error_setg(errp, "Driver '%s' does not support image creation", drv->format_name); - ret = -ENOTSUP; - goto out; - } - - if (qemu_in_coroutine()) { - /* Fast-path if already in coroutine context */ - bdrv_create_co_entry(&cco); - } else { - co = qemu_coroutine_create(bdrv_create_co_entry, &cco); - qemu_coroutine_enter(co); - while (cco.ret == NOT_DONE) { - aio_poll(qemu_get_aio_context(), true); - } + error_setg(errp, "Driver '%s' does not support image creation", + drv->format_name); + return -ENOTSUP; } - ret = cco.ret; - if (ret < 0) { - if (cco.err) { - error_propagate(errp, cco.err); - } else { - error_setg_errno(errp, -ret, "Could not create image"); - } + ret = drv->bdrv_co_create_opts(drv, filename, opts, errp); + if (ret < 0 && !*errp) { + error_setg_errno(errp, -ret, "Could not create image"); } -out: - g_free(cco.filename); return ret; } @@ -725,7 +682,8 @@ out: return ret; } -int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp) +int coroutine_fn bdrv_co_create_file(const char *filename, QemuOpts *opts, + Error **errp) { QemuOpts *protocol_opts; BlockDriver *drv; @@ -766,7 +724,7 @@ int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp) goto out; } - ret = bdrv_create(drv, filename, protocol_opts, errp); + ret = bdrv_co_create(drv, filename, protocol_opts, errp); out: qemu_opts_del(protocol_opts); qobject_unref(qdict); @@ -1228,20 +1186,19 @@ static char *bdrv_child_get_parent_desc(BdrvChild *c) static void bdrv_child_cb_drained_begin(BdrvChild *child) { BlockDriverState *bs = child->opaque; - bdrv_do_drained_begin_quiesce(bs, NULL, false); + bdrv_do_drained_begin_quiesce(bs, NULL); } static bool bdrv_child_cb_drained_poll(BdrvChild *child) { BlockDriverState *bs = child->opaque; - return bdrv_drain_poll(bs, false, NULL, false); + return bdrv_drain_poll(bs, NULL, false); } -static void bdrv_child_cb_drained_end(BdrvChild *child, - int *drained_end_counter) +static void bdrv_child_cb_drained_end(BdrvChild *child) { BlockDriverState *bs = child->opaque; - bdrv_drained_end_no_poll(bs, drained_end_counter); + bdrv_drained_end(bs); } static int bdrv_child_cb_inactivate(BdrvChild *child) @@ -1445,11 +1402,11 @@ static void bdrv_inherited_options(BdrvChildRole role, bool parent_is_format, *child_flags = flags; } -static void bdrv_child_cb_attach(BdrvChild *child) +static void GRAPH_WRLOCK bdrv_child_cb_attach(BdrvChild *child) { BlockDriverState *bs = child->opaque; - assert_bdrv_graph_writable(bs); + assert_bdrv_graph_writable(); QLIST_INSERT_HEAD(&bs->children, child, next); if (bs->drv->is_filter || (child->role & BDRV_CHILD_FILTERED)) { /* @@ -1485,11 +1442,9 @@ static void bdrv_child_cb_attach(BdrvChild *child) assert(!bs->file); bs->file = child; } - - bdrv_apply_subtree_drain(child, bs); } -static void bdrv_child_cb_detach(BdrvChild *child) +static void GRAPH_WRLOCK bdrv_child_cb_detach(BdrvChild *child) { BlockDriverState *bs = child->opaque; @@ -1497,9 +1452,7 @@ static void bdrv_child_cb_detach(BdrvChild *child) bdrv_backing_detach(child); } - bdrv_unapply_subtree_drain(child, bs); - - assert_bdrv_graph_writable(bs); + assert_bdrv_graph_writable(); QLIST_REMOVE(child, next); if (child == bs->backing) { assert(child != bs->file); @@ -1715,8 +1668,8 @@ static int bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, assert(is_power_of_2(bs->bl.request_alignment)); for (i = 0; i < bs->quiesce_counter; i++) { - if (drv->bdrv_co_drain_begin) { - drv->bdrv_co_drain_begin(bs); + if (drv->bdrv_drain_begin) { + drv->bdrv_drain_begin(bs); } } @@ -2414,6 +2367,20 @@ static void bdrv_replace_child_abort(void *opaque) GLOBAL_STATE_CODE(); /* old_bs reference is transparently moved from @s to @s->child */ + if (!s->child->bs) { + /* + * The parents were undrained when removing old_bs from the child. New + * requests can't have been made, though, because the child was empty. + * + * TODO Make bdrv_replace_child_noperm() transactionable to avoid + * undraining the parent in the first place. Once this is done, having + * new_bs drained when calling bdrv_replace_child_tran() is not a + * requirement any more. + */ + bdrv_parent_drained_begin_single(s->child); + assert(!bdrv_parent_drained_poll_single(s->child)); + } + assert(s->child->quiesced_parent); bdrv_replace_child_noperm(s->child, s->old_bs); bdrv_unref(new_bs); } @@ -2429,12 +2396,19 @@ static TransactionActionDrv bdrv_replace_child_drv = { * * Note: real unref of old_bs is done only on commit. * + * Both @child->bs and @new_bs (if non-NULL) must be drained. @new_bs must be + * kept drained until the transaction is completed. + * * The function doesn't update permissions, caller is responsible for this. */ static void bdrv_replace_child_tran(BdrvChild *child, BlockDriverState *new_bs, Transaction *tran) { BdrvReplaceChildState *s = g_new(BdrvReplaceChildState, 1); + + assert(child->quiesced_parent); + assert(!new_bs || new_bs->quiesce_counter); + *s = (BdrvReplaceChildState) { .child = child, .old_bs = child->bs, @@ -2523,8 +2497,12 @@ static int bdrv_node_refresh_perm(BlockDriverState *bs, BlockReopenQueue *q, return 0; } -static int bdrv_list_refresh_perms(GSList *list, BlockReopenQueue *q, - Transaction *tran, Error **errp) +/* + * @list is a product of bdrv_topological_dfs() (may be called several times) - + * a topologically sorted subgraph. + */ +static int bdrv_do_refresh_perms(GSList *list, BlockReopenQueue *q, + Transaction *tran, Error **errp) { int ret; BlockDriverState *bs; @@ -2546,6 +2524,24 @@ static int bdrv_list_refresh_perms(GSList *list, BlockReopenQueue *q, return 0; } +/* + * @list is any list of nodes. List is completed by all subtrees and + * topologically sorted. It's not a problem if some node occurs in the @list + * several times. + */ +static int bdrv_list_refresh_perms(GSList *list, BlockReopenQueue *q, + Transaction *tran, Error **errp) +{ + g_autoptr(GHashTable) found = g_hash_table_new(NULL, NULL); + g_autoptr(GSList) refresh_list = NULL; + + for ( ; list; list = list->next) { + refresh_list = bdrv_topological_dfs(refresh_list, found, list->data); + } + + return bdrv_do_refresh_perms(refresh_list, q, tran, errp); +} + void bdrv_get_cumulative_perm(BlockDriverState *bs, uint64_t *perm, uint64_t *shared_perm) { @@ -2593,15 +2589,24 @@ char *bdrv_perm_names(uint64_t perm) } -static int bdrv_refresh_perms(BlockDriverState *bs, Error **errp) +/* @tran is allowed to be NULL. In this case no rollback is possible */ +static int bdrv_refresh_perms(BlockDriverState *bs, Transaction *tran, + Error **errp) { int ret; - Transaction *tran = tran_new(); + Transaction *local_tran = NULL; g_autoptr(GSList) list = bdrv_topological_dfs(NULL, NULL, bs); GLOBAL_STATE_CODE(); - ret = bdrv_list_refresh_perms(list, NULL, tran, errp); - tran_finalize(tran, ret); + if (!tran) { + tran = local_tran = tran_new(); + } + + ret = bdrv_do_refresh_perms(list, NULL, tran, errp); + + if (local_tran) { + tran_finalize(local_tran, ret); + } return ret; } @@ -2617,7 +2622,7 @@ int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared, bdrv_child_set_perm(c, perm, shared, tran); - ret = bdrv_refresh_perms(c->bs, &local_err); + ret = bdrv_refresh_perms(c->bs, tran, &local_err); tran_finalize(tran, ret); @@ -2826,14 +2831,41 @@ uint64_t bdrv_qapi_perm_to_blk_perm(BlockPermission qapi_perm) return permissions[qapi_perm]; } +/* + * Replaces the node that a BdrvChild points to without updating permissions. + * + * If @new_bs is non-NULL, the parent of @child must already be drained through + * @child. + */ static void bdrv_replace_child_noperm(BdrvChild *child, BlockDriverState *new_bs) { BlockDriverState *old_bs = child->bs; int new_bs_quiesce_counter; - int drain_saldo; assert(!child->frozen); + + /* + * If we want to change the BdrvChild to point to a drained node as its new + * child->bs, we need to make sure that its new parent is drained, too. In + * other words, either child->quiesce_parent must already be true or we must + * be able to set it and keep the parent's quiesce_counter consistent with + * that, but without polling or starting new requests (this function + * guarantees that it doesn't poll, and starting new requests would be + * against the invariants of drain sections). + * + * To keep things simple, we pick the first option (child->quiesce_parent + * must already be true). We also generalise the rule a bit to make it + * easier to verify in callers and more likely to be covered in test cases: + * The parent must be quiesced through this child even if new_bs isn't + * currently drained. + * + * The only exception is for callers that always pass new_bs == NULL. In + * this case, we obviously never need to consider the case of a drained + * new_bs, so we can keep the callers simpler by allowing them not to drain + * the parent. + */ + assert(!new_bs || child->quiesced_parent); assert(old_bs != new_bs); GLOBAL_STATE_CODE(); @@ -2841,59 +2873,33 @@ static void bdrv_replace_child_noperm(BdrvChild *child, assert(bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs)); } - new_bs_quiesce_counter = (new_bs ? new_bs->quiesce_counter : 0); - drain_saldo = new_bs_quiesce_counter - child->parent_quiesce_counter; - - /* - * If the new child node is drained but the old one was not, flush - * all outstanding requests to the old child node. - */ - while (drain_saldo > 0 && child->klass->drained_begin) { - bdrv_parent_drained_begin_single(child, true); - drain_saldo--; - } - + /* TODO Pull this up into the callers to avoid polling here */ + bdrv_graph_wrlock(); if (old_bs) { - /* Detach first so that the recursive drain sections coming from @child - * are already gone and we only end the drain sections that came from - * elsewhere. */ if (child->klass->detach) { child->klass->detach(child); } - assert_bdrv_graph_writable(old_bs); QLIST_REMOVE(child, next_parent); } child->bs = new_bs; if (new_bs) { - assert_bdrv_graph_writable(new_bs); QLIST_INSERT_HEAD(&new_bs->parents, child, next_parent); - - /* - * Detaching the old node may have led to the new node's - * quiesce_counter having been decreased. Not a problem, we - * just need to recognize this here and then invoke - * drained_end appropriately more often. - */ - assert(new_bs->quiesce_counter <= new_bs_quiesce_counter); - drain_saldo += new_bs->quiesce_counter - new_bs_quiesce_counter; - - /* Attach only after starting new drained sections, so that recursive - * drain sections coming from @child don't get an extra .drained_begin - * callback. */ if (child->klass->attach) { child->klass->attach(child); } } + bdrv_graph_wrunlock(); /* - * If the old child node was drained but the new one is not, allow - * requests to come in only after the new node has been attached. + * If the parent was drained through this BdrvChild previously, but new_bs + * is not drained, allow requests to come in only after the new node has + * been attached. */ - while (drain_saldo < 0 && child->klass->drained_end) { + new_bs_quiesce_counter = (new_bs ? new_bs->quiesce_counter : 0); + if (!new_bs_quiesce_counter && child->quiesced_parent) { bdrv_parent_drained_end_single(child); - drain_saldo++; } } @@ -3026,6 +3032,24 @@ static BdrvChild *bdrv_attach_child_common(BlockDriverState *child_bs, } bdrv_ref(child_bs); + /* + * Let every new BdrvChild start with a drained parent. Inserting the child + * in the graph with bdrv_replace_child_noperm() will undrain it if + * @child_bs is not drained. + * + * The child was only just created and is not yet visible in global state + * until bdrv_replace_child_noperm() inserts it into the graph, so nobody + * could have sent requests and polling is not necessary. + * + * Note that this means that the parent isn't fully drained yet, we only + * stop new requests from coming in. This is fine, we don't care about the + * old requests here, they are not for this child. If another place enters a + * drain section for the same parent, but wants it to be fully quiesced, it + * will not run most of the the code in .drained_begin() again (which is not + * a problem, we already did this), but it will still poll until the parent + * is fully quiesced, so it will not be negatively affected either. + */ + bdrv_parent_drained_begin_single(new_child); bdrv_replace_child_noperm(new_child, child_bs); BdrvAttachChildCommonState *s = g_new(BdrvAttachChildCommonState, 1); @@ -3070,30 +3094,6 @@ static BdrvChild *bdrv_attach_child_noperm(BlockDriverState *parent_bs, tran, errp); } -static void bdrv_detach_child(BdrvChild *child) -{ - BlockDriverState *old_bs = child->bs; - - GLOBAL_STATE_CODE(); - bdrv_replace_child_noperm(child, NULL); - bdrv_child_free(child); - - if (old_bs) { - /* - * Update permissions for old node. We're just taking a parent away, so - * we're loosening restrictions. Errors of permission update are not - * fatal in this case, ignore them. - */ - bdrv_refresh_perms(old_bs, NULL); - - /* - * When the parent requiring a non-default AioContext is removed, the - * node moves back to the main AioContext - */ - bdrv_try_change_aio_context(old_bs, qemu_get_aio_context(), NULL, NULL); - } -} - /* * This function steals the reference to child_bs from the caller. * That reference is later dropped by bdrv_root_unref_child(). @@ -3125,7 +3125,7 @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, goto out; } - ret = bdrv_refresh_perms(child_bs, errp); + ret = bdrv_refresh_perms(child_bs, tran, errp); out: tran_finalize(tran, ret); @@ -3166,7 +3166,7 @@ BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs, goto out; } - ret = bdrv_refresh_perms(parent_bs, errp); + ret = bdrv_refresh_perms(parent_bs, tran, errp); if (ret < 0) { goto out; } @@ -3182,12 +3182,28 @@ out: /* Callers must ensure that child->frozen is false. */ void bdrv_root_unref_child(BdrvChild *child) { - BlockDriverState *child_bs; + BlockDriverState *child_bs = child->bs; GLOBAL_STATE_CODE(); + bdrv_replace_child_noperm(child, NULL); + bdrv_child_free(child); + + if (child_bs) { + /* + * Update permissions for old node. We're just taking a parent away, so + * we're loosening restrictions. Errors of permission update are not + * fatal in this case, ignore them. + */ + bdrv_refresh_perms(child_bs, NULL, NULL); + + /* + * When the parent requiring a non-default AioContext is removed, the + * node moves back to the main AioContext + */ + bdrv_try_change_aio_context(child_bs, qemu_get_aio_context(), NULL, + NULL); + } - child_bs = child->bs; - bdrv_detach_child(child); bdrv_unref(child_bs); } @@ -3406,24 +3422,35 @@ static int bdrv_set_backing_noperm(BlockDriverState *bs, return bdrv_set_file_or_backing_noperm(bs, backing_hd, true, tran, errp); } -int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, - Error **errp) +int bdrv_set_backing_hd_drained(BlockDriverState *bs, + BlockDriverState *backing_hd, + Error **errp) { int ret; Transaction *tran = tran_new(); GLOBAL_STATE_CODE(); - bdrv_drained_begin(bs); + assert(bs->quiesce_counter > 0); ret = bdrv_set_backing_noperm(bs, backing_hd, tran, errp); if (ret < 0) { goto out; } - ret = bdrv_refresh_perms(bs, errp); + ret = bdrv_refresh_perms(bs, tran, errp); out: tran_finalize(tran, ret); + return ret; +} + +int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, + Error **errp) +{ + int ret; + GLOBAL_STATE_CODE(); + bdrv_drained_begin(bs); + ret = bdrv_set_backing_hd_drained(bs, backing_hd, errp); bdrv_drained_end(bs); return ret; @@ -4153,7 +4180,9 @@ static bool bdrv_recurse_has_child(BlockDriverState *bs, * returns a pointer to bs_queue, which is either the newly allocated * bs_queue, or the existing bs_queue being used. * - * bs must be drained between bdrv_reopen_queue() and bdrv_reopen_multiple(). + * bs is drained here and undrained by bdrv_reopen_queue_free(). + * + * To be called with bs->aio_context locked. */ static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue, BlockDriverState *bs, @@ -4173,12 +4202,10 @@ static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue, int flags; QemuOpts *opts; - /* Make sure that the caller remembered to use a drained section. This is - * important to avoid graph changes between the recursive queuing here and - * bdrv_reopen_multiple(). */ - assert(bs->quiesce_counter > 0); GLOBAL_STATE_CODE(); + bdrv_drained_begin(bs); + if (bs_queue == NULL) { bs_queue = g_new0(BlockReopenQueue, 1); QTAILQ_INIT(bs_queue); @@ -4312,6 +4339,7 @@ static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue, return bs_queue; } +/* To be called with bs->aio_context locked */ BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, BlockDriverState *bs, QDict *options, bool keep_old_opts) @@ -4328,6 +4356,12 @@ void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue) if (bs_queue) { BlockReopenQueueEntry *bs_entry, *next; QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { + AioContext *ctx = bdrv_get_aio_context(bs_entry->state.bs); + + aio_context_acquire(ctx); + bdrv_drained_end(bs_entry->state.bs); + aio_context_release(ctx); + qobject_unref(bs_entry->state.explicit_options); qobject_unref(bs_entry->state.options); g_free(bs_entry); @@ -4361,7 +4395,6 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) BlockReopenQueueEntry *bs_entry, *next; AioContext *ctx; Transaction *tran = tran_new(); - g_autoptr(GHashTable) found = NULL; g_autoptr(GSList) refresh_list = NULL; assert(qemu_get_current_aio_context() == qemu_get_aio_context()); @@ -4391,18 +4424,15 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) bs_entry->prepared = true; } - found = g_hash_table_new(NULL, NULL); QTAILQ_FOREACH(bs_entry, bs_queue, entry) { BDRVReopenState *state = &bs_entry->state; - refresh_list = bdrv_topological_dfs(refresh_list, found, state->bs); + refresh_list = g_slist_prepend(refresh_list, state->bs); if (state->old_backing_bs) { - refresh_list = bdrv_topological_dfs(refresh_list, found, - state->old_backing_bs); + refresh_list = g_slist_prepend(refresh_list, state->old_backing_bs); } if (state->old_file_bs) { - refresh_list = bdrv_topological_dfs(refresh_list, found, - state->old_file_bs); + refresh_list = g_slist_prepend(refresh_list, state->old_file_bs); } } @@ -4475,18 +4505,16 @@ int bdrv_reopen(BlockDriverState *bs, QDict *opts, bool keep_old_opts, GLOBAL_STATE_CODE(); - bdrv_subtree_drained_begin(bs); + queue = bdrv_reopen_queue(NULL, bs, opts, keep_old_opts); + if (ctx != qemu_get_aio_context()) { aio_context_release(ctx); } - - queue = bdrv_reopen_queue(NULL, bs, opts, keep_old_opts); ret = bdrv_reopen_multiple(queue, errp); if (ctx != qemu_get_aio_context()) { aio_context_acquire(ctx); } - bdrv_subtree_drained_end(bs); return ret; } @@ -5067,23 +5095,24 @@ static void bdrv_remove_child(BdrvChild *child, Transaction *tran) } if (child->bs) { + BlockDriverState *bs = child->bs; + bdrv_drained_begin(bs); bdrv_replace_child_tran(child, NULL, tran); + bdrv_drained_end(bs); } tran_add(tran, &bdrv_remove_child_drv, child); } -/* - * A function to remove backing-chain child of @bs if exists: cow child for - * format nodes (always .backing) and filter child for filters (may be .file or - * .backing) - */ -static void bdrv_remove_filter_or_cow_child(BlockDriverState *bs, - Transaction *tran) +static void undrain_on_clean_cb(void *opaque) { - bdrv_remove_child(bdrv_filter_or_cow_child(bs), tran); + bdrv_drained_end(opaque); } +static TransactionActionDrv undrain_on_clean = { + .clean = undrain_on_clean_cb, +}; + static int bdrv_replace_node_noperm(BlockDriverState *from, BlockDriverState *to, bool auto_skip, Transaction *tran, @@ -5093,6 +5122,11 @@ static int bdrv_replace_node_noperm(BlockDriverState *from, GLOBAL_STATE_CODE(); + bdrv_drained_begin(from); + bdrv_drained_begin(to); + tran_add(tran, &undrain_on_clean, from); + tran_add(tran, &undrain_on_clean, to); + QLIST_FOREACH_SAFE(c, &from->parents, next_parent, next) { assert(c->bs == from); if (!should_update_child(c, to)) { @@ -5130,7 +5164,6 @@ static int bdrv_replace_node_common(BlockDriverState *from, Error **errp) { Transaction *tran = tran_new(); - g_autoptr(GHashTable) found = NULL; g_autoptr(GSList) refresh_list = NULL; BlockDriverState *to_cow_parent = NULL; int ret; @@ -5168,13 +5201,11 @@ static int bdrv_replace_node_common(BlockDriverState *from, } if (detach_subchain) { - bdrv_remove_filter_or_cow_child(to_cow_parent, tran); + bdrv_remove_child(bdrv_filter_or_cow_child(to_cow_parent), tran); } - found = g_hash_table_new(NULL, NULL); - - refresh_list = bdrv_topological_dfs(refresh_list, found, to); - refresh_list = bdrv_topological_dfs(refresh_list, found, from); + refresh_list = g_slist_prepend(refresh_list, to); + refresh_list = g_slist_prepend(refresh_list, from); ret = bdrv_list_refresh_perms(refresh_list, NULL, tran, errp); if (ret < 0) { @@ -5244,7 +5275,7 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, goto out; } - ret = bdrv_refresh_perms(bs_new, errp); + ret = bdrv_refresh_perms(bs_new, tran, errp); out: tran_finalize(tran, ret); @@ -5259,7 +5290,6 @@ int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs, { int ret; Transaction *tran = tran_new(); - g_autoptr(GHashTable) found = NULL; g_autoptr(GSList) refresh_list = NULL; BlockDriverState *old_bs = child->bs; @@ -5271,9 +5301,8 @@ int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs, bdrv_replace_child_tran(child, new_bs, tran); - found = g_hash_table_new(NULL, NULL); - refresh_list = bdrv_topological_dfs(refresh_list, found, old_bs); - refresh_list = bdrv_topological_dfs(refresh_list, found, new_bs); + refresh_list = g_slist_prepend(refresh_list, old_bs); + refresh_list = g_slist_prepend(refresh_list, new_bs); ret = bdrv_list_refresh_perms(refresh_list, NULL, tran, errp); @@ -5373,6 +5402,7 @@ int coroutine_fn bdrv_co_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix) { IO_CODE(); + assert_bdrv_graph_readable(); if (bs->drv == NULL) { return -ENOMEDIUM; } @@ -5595,7 +5625,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, GLOBAL_STATE_CODE(); bdrv_ref(top); - bdrv_subtree_drained_begin(top); + bdrv_drained_begin(base); if (!top->drv || !base->drv) { goto exit; @@ -5668,7 +5698,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, ret = 0; exit: - bdrv_subtree_drained_end(top); + bdrv_drained_end(base); bdrv_unref(top); return ret; } @@ -6544,7 +6574,7 @@ int bdrv_activate(BlockDriverState *bs, Error **errp) */ if (bs->open_flags & BDRV_O_INACTIVE) { bs->open_flags &= ~BDRV_O_INACTIVE; - ret = bdrv_refresh_perms(bs, errp); + ret = bdrv_refresh_perms(bs, NULL, errp); if (ret < 0) { bs->open_flags |= BDRV_O_INACTIVE; return ret; @@ -6588,6 +6618,7 @@ int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp) IO_CODE(); assert(!(bs->open_flags & BDRV_O_INACTIVE)); + assert_bdrv_graph_readable(); if (bs->drv->bdrv_co_invalidate_cache) { bs->drv->bdrv_co_invalidate_cache(bs, &local_err); @@ -6689,7 +6720,7 @@ static int bdrv_inactivate_recurse(BlockDriverState *bs) * We only tried to loosen restrictions, so errors are not fatal, ignore * them. */ - bdrv_refresh_perms(bs, NULL); + bdrv_refresh_perms(bs, NULL, NULL); /* Recursively inactivate children */ QLIST_FOREACH(child, &bs->children, next) { @@ -6894,6 +6925,10 @@ bool bdrv_op_blocker_is_empty(BlockDriverState *bs) return true; } +/* + * Must not be called while holding the lock of an AioContext other than the + * current one. + */ void bdrv_img_create(const char *filename, const char *fmt, const char *base_filename, const char *base_fmt, char *options, uint64_t img_size, int flags, bool quiet, @@ -7181,7 +7216,6 @@ static void bdrv_detach_aio_context(BlockDriverState *bs) if (bs->quiesce_counter) { aio_enable_external(bs->aio_context); } - assert_bdrv_graph_writable(bs); bs->aio_context = NULL; } @@ -7195,7 +7229,6 @@ static void bdrv_attach_aio_context(BlockDriverState *bs, aio_disable_external(new_context); } - assert_bdrv_graph_writable(bs); bs->aio_context = new_context; if (bs->drv && bs->drv->bdrv_attach_aio_context) { @@ -7276,7 +7309,6 @@ static void bdrv_set_aio_context_commit(void *opaque) BlockDriverState *bs = (BlockDriverState *) state->bs; AioContext *new_context = state->new_ctx; AioContext *old_context = bdrv_get_aio_context(bs); - assert_bdrv_graph_writable(bs); /* * Take the old AioContex when detaching it from bs. diff --git a/block/blkdebug.c b/block/blkdebug.c index 4265ca125e..ca65b043f0 100644 --- a/block/blkdebug.c +++ b/block/blkdebug.c @@ -297,9 +297,7 @@ static int read_config(BDRVBlkdebugState *s, const char *filename, } } - qemu_config_parse_qdict(options, config_groups, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!qemu_config_parse_qdict(options, config_groups, errp)) { ret = -EINVAL; goto fail; } diff --git a/block/block-backend.c b/block/block-backend.c index d98a96ff37..ba7bf1d6bc 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -129,7 +129,7 @@ static void blk_root_inherit_options(BdrvChildRole role, bool parent_is_format, } static void blk_root_drained_begin(BdrvChild *child); static bool blk_root_drained_poll(BdrvChild *child); -static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter); +static void blk_root_drained_end(BdrvChild *child); static void blk_root_change_media(BdrvChild *child, bool load); static void blk_root_resize(BdrvChild *child); @@ -1424,6 +1424,27 @@ int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset, return blk_co_pwritev_part(blk, offset, bytes, qiov, 0, flags); } +int coroutine_fn blk_co_block_status_above(BlockBackend *blk, + BlockDriverState *base, + int64_t offset, int64_t bytes, + int64_t *pnum, int64_t *map, + BlockDriverState **file) +{ + IO_CODE(); + return bdrv_co_block_status_above(blk_bs(blk), base, offset, bytes, pnum, + map, file); +} + +int coroutine_fn blk_co_is_allocated_above(BlockBackend *blk, + BlockDriverState *base, + bool include_base, int64_t offset, + int64_t bytes, int64_t *pnum) +{ + IO_CODE(); + return bdrv_co_is_allocated_above(blk_bs(blk), base, include_base, offset, + bytes, pnum); +} + typedef struct BlkRwCo { BlockBackend *blk; int64_t offset; @@ -1860,7 +1881,7 @@ static void send_qmp_error_event(BlockBackend *blk, BlockDriverState *bs = blk_bs(blk); optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE; - qapi_event_send_block_io_error(blk_name(blk), !!bs, + qapi_event_send_block_io_error(blk_name(blk), bs ? bdrv_get_node_name(bs) : NULL, optype, action, blk_iostatus_is_enabled(blk), error == ENOSPC, strerror(error)); @@ -2556,7 +2577,7 @@ static bool blk_root_drained_poll(BdrvChild *child) return busy || !!blk->in_flight; } -static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter) +static void blk_root_drained_end(BdrvChild *child) { BlockBackend *blk = child->opaque; assert(blk->quiesce_counter); diff --git a/block/block-copy.c b/block/block-copy.c index bb947afdda..5e59d6262f 100644 --- a/block/block-copy.c +++ b/block/block-copy.c @@ -577,8 +577,9 @@ static coroutine_fn int block_copy_task_entry(AioTask *task) return ret; } -static int block_copy_block_status(BlockCopyState *s, int64_t offset, - int64_t bytes, int64_t *pnum) +static coroutine_fn int block_copy_block_status(BlockCopyState *s, + int64_t offset, + int64_t bytes, int64_t *pnum) { int64_t num; BlockDriverState *base; @@ -590,8 +591,8 @@ static int block_copy_block_status(BlockCopyState *s, int64_t offset, base = NULL; } - ret = bdrv_block_status_above(s->source->bs, base, offset, bytes, &num, - NULL, NULL); + ret = bdrv_co_block_status_above(s->source->bs, base, offset, bytes, &num, + NULL, NULL); if (ret < 0 || num < s->cluster_size) { /* * On error or if failed to obtain large enough chunk just fallback to @@ -613,8 +614,9 @@ static int block_copy_block_status(BlockCopyState *s, int64_t offset, * Check if the cluster starting at offset is allocated or not. * return via pnum the number of contiguous clusters sharing this allocation. */ -static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset, - int64_t *pnum) +static int coroutine_fn block_copy_is_cluster_allocated(BlockCopyState *s, + int64_t offset, + int64_t *pnum) { BlockDriverState *bs = s->source->bs; int64_t count, total_count = 0; @@ -624,7 +626,7 @@ static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset, assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); while (true) { - ret = bdrv_is_allocated(bs, offset, bytes, &count); + ret = bdrv_co_is_allocated(bs, offset, bytes, &count); if (ret < 0) { return ret; } @@ -669,8 +671,9 @@ void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes) * @return 0 when the cluster at @offset was unallocated, * 1 otherwise, and -ret on error. */ -int64_t block_copy_reset_unallocated(BlockCopyState *s, - int64_t offset, int64_t *count) +int64_t coroutine_fn block_copy_reset_unallocated(BlockCopyState *s, + int64_t offset, + int64_t *count) { int ret; int64_t clusters, bytes; diff --git a/block/block-gen.h b/block/block-gen.h index f80cf4897d..89b7daaa1f 100644 --- a/block/block-gen.h +++ b/block/block-gen.h @@ -30,20 +30,17 @@ /* Base structure for argument packing structures */ typedef struct BdrvPollCo { - BlockDriverState *bs; + AioContext *ctx; bool in_progress; - int ret; Coroutine *co; /* Keep pointer here for debugging */ } BdrvPollCo; -static inline int bdrv_poll_co(BdrvPollCo *s) +static inline void bdrv_poll_co(BdrvPollCo *s) { assert(!qemu_in_coroutine()); - bdrv_coroutine_enter(s->bs, s->co); - BDRV_POLL_WHILE(s->bs, s->in_progress); - - return s->ret; + aio_co_enter(s->ctx, s->co); + AIO_WAIT_WHILE(s->ctx, s->in_progress); } #endif /* BLOCK_BLOCK_GEN_H */ diff --git a/block/commit.c b/block/commit.c index 0029b31944..b346341767 100644 --- a/block/commit.c +++ b/block/commit.c @@ -155,8 +155,8 @@ static int coroutine_fn commit_run(Job *job, Error **errp) break; } /* Copy if allocated above the base */ - ret = bdrv_is_allocated_above(blk_bs(s->top), s->base_overlay, true, - offset, COMMIT_BUFFER_SIZE, &n); + ret = blk_co_is_allocated_above(s->top, s->base_overlay, true, + offset, COMMIT_BUFFER_SIZE, &n); copy = (ret > 0); trace_commit_one_iteration(s, offset, n, ret); if (copy) { diff --git a/block/copy-before-write.c b/block/copy-before-write.c index 4abaa7339e..70c4ba7432 100644 --- a/block/copy-before-write.c +++ b/block/copy-before-write.c @@ -432,7 +432,7 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags, return -EINVAL; } - if (opts->has_bitmap) { + if (opts->bitmap) { bitmap = block_dirty_bitmap_lookup(opts->bitmap->node, opts->bitmap->name, NULL, errp); if (!bitmap) { @@ -522,7 +522,6 @@ BlockDriverState *bdrv_cbw_append(BlockDriverState *source, BlockCopyState **bcs, Error **errp) { - ERRP_GUARD(); BDRVCopyBeforeWriteState *state; BlockDriverState *top; QDict *opts; diff --git a/block/coroutines.h b/block/coroutines.h index 3a2bad564f..2a1e0b3c9d 100644 --- a/block/coroutines.h +++ b/block/coroutines.h @@ -37,9 +37,11 @@ * the I/O API. */ -int coroutine_fn bdrv_co_check(BlockDriverState *bs, - BdrvCheckResult *res, BdrvCheckMode fix); -int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp); +int coroutine_fn GRAPH_RDLOCK +bdrv_co_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix); + +int coroutine_fn GRAPH_RDLOCK +bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp); int coroutine_fn bdrv_co_common_block_status_above(BlockDriverState *bs, @@ -53,10 +55,11 @@ bdrv_co_common_block_status_above(BlockDriverState *bs, BlockDriverState **file, int *depth); -int coroutine_fn bdrv_co_readv_vmstate(BlockDriverState *bs, - QEMUIOVector *qiov, int64_t pos); -int coroutine_fn bdrv_co_writev_vmstate(BlockDriverState *bs, - QEMUIOVector *qiov, int64_t pos); +int coroutine_fn GRAPH_RDLOCK +bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); + +int coroutine_fn GRAPH_RDLOCK +bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs, bool blocking, @@ -71,7 +74,7 @@ nbd_co_do_establish_connection(BlockDriverState *bs, bool blocking, * the "I/O or GS" API. */ -int generated_co_wrapper +int co_wrapper_mixed_bdrv_rdlock bdrv_common_block_status_above(BlockDriverState *bs, BlockDriverState *base, bool include_base, @@ -82,7 +85,7 @@ bdrv_common_block_status_above(BlockDriverState *bs, int64_t *map, BlockDriverState **file, int *depth); -int generated_co_wrapper +int co_wrapper_mixed nbd_do_establish_connection(BlockDriverState *bs, bool blocking, Error **errp); #endif /* BLOCK_COROUTINES_H */ diff --git a/block/crypto.c b/block/crypto.c index 2fb8add458..bbeb9f437c 100644 --- a/block/crypto.c +++ b/block/crypto.c @@ -703,7 +703,7 @@ static int coroutine_fn block_crypto_co_create_opts_luks(BlockDriver *drv, } /* Create protocol layer */ - ret = bdrv_create_file(filename, opts, errp); + ret = bdrv_co_create_file(filename, opts, errp); if (ret < 0) { goto fail; } diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c index bf3dc0512a..956feeb2ae 100644 --- a/block/dirty-bitmap.c +++ b/block/dirty-bitmap.c @@ -388,7 +388,7 @@ void bdrv_release_named_dirty_bitmaps(BlockDriverState *bs) * not fail. * This function doesn't release corresponding BdrvDirtyBitmap. */ -static int coroutine_fn +int coroutine_fn bdrv_co_remove_persistent_dirty_bitmap(BlockDriverState *bs, const char *name, Error **errp) { @@ -399,45 +399,6 @@ bdrv_co_remove_persistent_dirty_bitmap(BlockDriverState *bs, const char *name, return 0; } -typedef struct BdrvRemovePersistentDirtyBitmapCo { - BlockDriverState *bs; - const char *name; - Error **errp; - int ret; -} BdrvRemovePersistentDirtyBitmapCo; - -static void coroutine_fn -bdrv_co_remove_persistent_dirty_bitmap_entry(void *opaque) -{ - BdrvRemovePersistentDirtyBitmapCo *s = opaque; - - s->ret = bdrv_co_remove_persistent_dirty_bitmap(s->bs, s->name, s->errp); - aio_wait_kick(); -} - -int bdrv_remove_persistent_dirty_bitmap(BlockDriverState *bs, const char *name, - Error **errp) -{ - if (qemu_in_coroutine()) { - return bdrv_co_remove_persistent_dirty_bitmap(bs, name, errp); - } else { - Coroutine *co; - BdrvRemovePersistentDirtyBitmapCo s = { - .bs = bs, - .name = name, - .errp = errp, - .ret = -EINPROGRESS, - }; - - co = qemu_coroutine_create(bdrv_co_remove_persistent_dirty_bitmap_entry, - &s); - bdrv_coroutine_enter(bs, co); - BDRV_POLL_WHILE(bs, s.ret == -EINPROGRESS); - - return s.ret; - } -} - bool bdrv_supports_persistent_dirty_bitmap(BlockDriverState *bs) { @@ -447,7 +408,7 @@ bdrv_supports_persistent_dirty_bitmap(BlockDriverState *bs) return false; } -static bool coroutine_fn +bool coroutine_fn bdrv_co_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name, uint32_t granularity, Error **errp) { @@ -470,51 +431,6 @@ bdrv_co_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name, return drv->bdrv_co_can_store_new_dirty_bitmap(bs, name, granularity, errp); } -typedef struct BdrvCanStoreNewDirtyBitmapCo { - BlockDriverState *bs; - const char *name; - uint32_t granularity; - Error **errp; - bool ret; - - bool in_progress; -} BdrvCanStoreNewDirtyBitmapCo; - -static void coroutine_fn bdrv_co_can_store_new_dirty_bitmap_entry(void *opaque) -{ - BdrvCanStoreNewDirtyBitmapCo *s = opaque; - - s->ret = bdrv_co_can_store_new_dirty_bitmap(s->bs, s->name, s->granularity, - s->errp); - s->in_progress = false; - aio_wait_kick(); -} - -bool bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name, - uint32_t granularity, Error **errp) -{ - IO_CODE(); - if (qemu_in_coroutine()) { - return bdrv_co_can_store_new_dirty_bitmap(bs, name, granularity, errp); - } else { - Coroutine *co; - BdrvCanStoreNewDirtyBitmapCo s = { - .bs = bs, - .name = name, - .granularity = granularity, - .errp = errp, - .in_progress = true, - }; - - co = qemu_coroutine_create(bdrv_co_can_store_new_dirty_bitmap_entry, - &s); - bdrv_coroutine_enter(bs, co); - BDRV_POLL_WHILE(bs, s.in_progress); - - return s.ret; - } -} - void bdrv_disable_dirty_bitmap(BdrvDirtyBitmap *bitmap) { bdrv_dirty_bitmaps_lock(bitmap->bs); @@ -541,7 +457,6 @@ BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs) info->count = bdrv_get_dirty_count(bm); info->granularity = bdrv_dirty_bitmap_granularity(bm); - info->has_name = !!bm->name; info->name = g_strdup(bm->name); info->recording = bdrv_dirty_bitmap_recording(bm); info->busy = bdrv_dirty_bitmap_busy(bm); diff --git a/block/export/export.c b/block/export/export.c index 7cc0c25c1c..28a91c9c42 100644 --- a/block/export/export.c +++ b/block/export/export.c @@ -114,7 +114,7 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp) ctx = bdrv_get_aio_context(bs); aio_context_acquire(ctx); - if (export->has_iothread) { + if (export->iothread) { IOThread *iothread; AioContext *new_ctx; Error **set_context_errp; diff --git a/block/export/vduse-blk.c b/block/export/vduse-blk.c index f101c24c3f..350d6fdaf0 100644 --- a/block/export/vduse-blk.c +++ b/block/export/vduse-blk.c @@ -265,8 +265,7 @@ static int vduse_blk_exp_create(BlockExport *exp, BlockExportOptions *opts, } vblk_exp->num_queues = num_queues; vblk_exp->handler.blk = exp->blk; - vblk_exp->handler.serial = g_strdup(vblk_opts->has_serial ? - vblk_opts->serial : ""); + vblk_exp->handler.serial = g_strdup(vblk_opts->serial ?: ""); vblk_exp->handler.logical_block_size = logical_block_size; vblk_exp->handler.writable = opts->writable; diff --git a/block/gluster.c b/block/gluster.c index 7c90f7ba4b..7efc296399 100644 --- a/block/gluster.c +++ b/block/gluster.c @@ -830,7 +830,6 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options, s->logfile = g_strdup(logfile ? logfile : GLUSTER_LOGFILE_DEFAULT); gconf->logfile = g_strdup(s->logfile); - gconf->has_logfile = true; s->glfs = qemu_gluster_init(gconf, filename, options, errp); if (!s->glfs) { @@ -917,7 +916,6 @@ static int qemu_gluster_reopen_prepare(BDRVReopenState *state, gconf->debug = s->debug; gconf->has_debug = true; gconf->logfile = g_strdup(s->logfile); - gconf->has_logfile = true; /* * If 'state->bs->exact_filename' is empty, 'state->options' should contain @@ -1162,7 +1160,6 @@ static int coroutine_fn qemu_gluster_co_create_opts(BlockDriver *drv, if (!gconf->logfile) { gconf->logfile = g_strdup(GLUSTER_LOGFILE_DEFAULT); } - gconf->has_logfile = true; ret = qemu_gluster_parse(gconf, filename, NULL, errp); if (ret < 0) { diff --git a/block/graph-lock.c b/block/graph-lock.c new file mode 100644 index 0000000000..454c31e691 --- /dev/null +++ b/block/graph-lock.c @@ -0,0 +1,275 @@ +/* + * Graph lock: rwlock to protect block layer graph manipulations (add/remove + * edges and nodes) + * + * Copyright (c) 2022 Red Hat + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "block/graph-lock.h" +#include "block/block.h" +#include "block/block_int.h" + +/* Dummy lock object to use for Thread Safety Analysis (TSA) */ +BdrvGraphLock graph_lock; + +/* Protects the list of aiocontext and orphaned_reader_count */ +static QemuMutex aio_context_list_lock; + +/* Written and read with atomic operations. */ +static int has_writer; + +/* + * A reader coroutine could move from an AioContext to another. + * If this happens, there is no problem from the point of view of + * counters. The problem is that the total count becomes + * unbalanced if one of the two AioContexts gets deleted. + * The count of readers must remain correct, so the AioContext's + * balance is transferred to this glboal variable. + * Protected by aio_context_list_lock. + */ +static uint32_t orphaned_reader_count; + +/* Queue of readers waiting for the writer to finish */ +static CoQueue reader_queue; + +struct BdrvGraphRWlock { + /* How many readers are currently reading the graph. */ + uint32_t reader_count; + + /* + * List of BdrvGraphRWlock kept in graph-lock.c + * Protected by aio_context_list_lock + */ + QTAILQ_ENTRY(BdrvGraphRWlock) next_aio; +}; + +/* + * List of BdrvGraphRWlock. This list ensures that each BdrvGraphRWlock + * can safely modify only its own counter, avoid reading/writing + * others and thus improving performances by avoiding cacheline bounces. + */ +static QTAILQ_HEAD(, BdrvGraphRWlock) aio_context_list = + QTAILQ_HEAD_INITIALIZER(aio_context_list); + +static void __attribute__((__constructor__)) bdrv_init_graph_lock(void) +{ + qemu_mutex_init(&aio_context_list_lock); + qemu_co_queue_init(&reader_queue); +} + +void register_aiocontext(AioContext *ctx) +{ + ctx->bdrv_graph = g_new0(BdrvGraphRWlock, 1); + QEMU_LOCK_GUARD(&aio_context_list_lock); + assert(ctx->bdrv_graph->reader_count == 0); + QTAILQ_INSERT_TAIL(&aio_context_list, ctx->bdrv_graph, next_aio); +} + +void unregister_aiocontext(AioContext *ctx) +{ + QEMU_LOCK_GUARD(&aio_context_list_lock); + orphaned_reader_count += ctx->bdrv_graph->reader_count; + QTAILQ_REMOVE(&aio_context_list, ctx->bdrv_graph, next_aio); + g_free(ctx->bdrv_graph); +} + +static uint32_t reader_count(void) +{ + BdrvGraphRWlock *brdv_graph; + uint32_t rd; + + QEMU_LOCK_GUARD(&aio_context_list_lock); + + /* rd can temporarly be negative, but the total will *always* be >= 0 */ + rd = orphaned_reader_count; + QTAILQ_FOREACH(brdv_graph, &aio_context_list, next_aio) { + rd += qatomic_read(&brdv_graph->reader_count); + } + + /* shouldn't overflow unless there are 2^31 readers */ + assert((int32_t)rd >= 0); + return rd; +} + +void bdrv_graph_wrlock(void) +{ + GLOBAL_STATE_CODE(); + assert(!qatomic_read(&has_writer)); + + /* Make sure that constantly arriving new I/O doesn't cause starvation */ + bdrv_drain_all_begin_nopoll(); + + /* + * reader_count == 0: this means writer will read has_reader as 1 + * reader_count >= 1: we don't know if writer read has_writer == 0 or 1, + * but we need to wait. + * Wait by allowing other coroutine (and possible readers) to continue. + */ + do { + /* + * has_writer must be 0 while polling, otherwise we get a deadlock if + * any callback involved during AIO_WAIT_WHILE() tries to acquire the + * reader lock. + */ + qatomic_set(&has_writer, 0); + AIO_WAIT_WHILE(qemu_get_aio_context(), reader_count() >= 1); + qatomic_set(&has_writer, 1); + + /* + * We want to only check reader_count() after has_writer = 1 is visible + * to other threads. That way no more readers can sneak in after we've + * determined reader_count() == 0. + */ + smp_mb(); + } while (reader_count() >= 1); + + bdrv_drain_all_end(); +} + +void bdrv_graph_wrunlock(void) +{ + GLOBAL_STATE_CODE(); + QEMU_LOCK_GUARD(&aio_context_list_lock); + assert(qatomic_read(&has_writer)); + + /* + * No need for memory barriers, this works in pair with + * the slow path of rdlock() and both take the lock. + */ + qatomic_store_release(&has_writer, 0); + + /* Wake up all coroutine that are waiting to read the graph */ + qemu_co_enter_all(&reader_queue, &aio_context_list_lock); +} + +void coroutine_fn bdrv_graph_co_rdlock(void) +{ + BdrvGraphRWlock *bdrv_graph; + bdrv_graph = qemu_get_current_aio_context()->bdrv_graph; + + /* Do not lock if in main thread */ + if (qemu_in_main_thread()) { + return; + } + + for (;;) { + qatomic_set(&bdrv_graph->reader_count, + bdrv_graph->reader_count + 1); + /* make sure writer sees reader_count before we check has_writer */ + smp_mb(); + + /* + * has_writer == 0: this means writer will read reader_count as >= 1 + * has_writer == 1: we don't know if writer read reader_count == 0 + * or > 0, but we need to wait anyways because + * it will write. + */ + if (!qatomic_read(&has_writer)) { + break; + } + + /* + * Synchronize access with reader_count() in bdrv_graph_wrlock(). + * Case 1: + * If this critical section gets executed first, reader_count will + * decrease and the reader will go to sleep. + * Then the writer will read reader_count that does not take into + * account this reader, and if there's no other reader it will + * enter the write section. + * Case 2: + * If reader_count() critical section gets executed first, + * then writer will read reader_count >= 1. + * It will wait in AIO_WAIT_WHILE(), but once it releases the lock + * we will enter this critical section and call aio_wait_kick(). + */ + WITH_QEMU_LOCK_GUARD(&aio_context_list_lock) { + /* + * Additional check when we use the above lock to synchronize + * with bdrv_graph_wrunlock(). + * Case 1: + * If this gets executed first, has_writer is still 1, so we reduce + * reader_count and go to sleep. + * Then the writer will set has_writer to 0 and wake up all readers, + * us included. + * Case 2: + * If bdrv_graph_wrunlock() critical section gets executed first, + * then it will set has_writer to 0 and wake up all other readers. + * Then we execute this critical section, and therefore must check + * again for has_writer, otherwise we sleep without any writer + * actually running. + */ + if (!qatomic_read(&has_writer)) { + return; + } + + /* slow path where reader sleeps */ + bdrv_graph->reader_count--; + aio_wait_kick(); + qemu_co_queue_wait(&reader_queue, &aio_context_list_lock); + } + } +} + +void coroutine_fn bdrv_graph_co_rdunlock(void) +{ + BdrvGraphRWlock *bdrv_graph; + bdrv_graph = qemu_get_current_aio_context()->bdrv_graph; + + /* Do not lock if in main thread */ + if (qemu_in_main_thread()) { + return; + } + + qatomic_store_release(&bdrv_graph->reader_count, + bdrv_graph->reader_count - 1); + /* make sure writer sees reader_count before we check has_writer */ + smp_mb(); + + /* + * has_writer == 0: this means reader will read reader_count decreased + * has_writer == 1: we don't know if writer read reader_count old or + * new. Therefore, kick again so on next iteration + * writer will for sure read the updated value. + */ + if (qatomic_read(&has_writer)) { + aio_wait_kick(); + } +} + +void bdrv_graph_rdlock_main_loop(void) +{ + GLOBAL_STATE_CODE(); + assert(!qemu_in_coroutine()); +} + +void bdrv_graph_rdunlock_main_loop(void) +{ + GLOBAL_STATE_CODE(); + assert(!qemu_in_coroutine()); +} + +void assert_bdrv_graph_readable(void) +{ + assert(qemu_in_main_thread() || reader_count()); +} + +void assert_bdrv_graph_writable(void) +{ + assert(qemu_in_main_thread()); + assert(qatomic_read(&has_writer)); +} diff --git a/block/io.c b/block/io.c index b9424024f9..d87788dfbb 100644 --- a/block/io.c +++ b/block/io.c @@ -45,53 +45,43 @@ static void bdrv_parent_cb_resize(BlockDriverState *bs); static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes, BdrvRequestFlags flags); -static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore, - bool ignore_bds_parents) +static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore) { BdrvChild *c, *next; QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { - if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { + if (c == ignore) { continue; } - bdrv_parent_drained_begin_single(c, false); - } -} - -static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c, - int *drained_end_counter) -{ - assert(c->parent_quiesce_counter > 0); - c->parent_quiesce_counter--; - if (c->klass->drained_end) { - c->klass->drained_end(c, drained_end_counter); + bdrv_parent_drained_begin_single(c); } } void bdrv_parent_drained_end_single(BdrvChild *c) { - int drained_end_counter = 0; - AioContext *ctx = bdrv_child_get_parent_aio_context(c); IO_OR_GS_CODE(); - bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter); - AIO_WAIT_WHILE(ctx, qatomic_read(&drained_end_counter) > 0); + + assert(c->quiesced_parent); + c->quiesced_parent = false; + + if (c->klass->drained_end) { + c->klass->drained_end(c); + } } -static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore, - bool ignore_bds_parents, - int *drained_end_counter) +static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore) { BdrvChild *c; QLIST_FOREACH(c, &bs->parents, next_parent) { - if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { + if (c == ignore) { continue; } - bdrv_parent_drained_end_single_no_poll(c, drained_end_counter); + bdrv_parent_drained_end_single(c); } } -static bool bdrv_parent_drained_poll_single(BdrvChild *c) +bool bdrv_parent_drained_poll_single(BdrvChild *c) { if (c->klass->drained_poll) { return c->klass->drained_poll(c); @@ -115,17 +105,16 @@ static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, return busy; } -void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll) +void bdrv_parent_drained_begin_single(BdrvChild *c) { - AioContext *ctx = bdrv_child_get_parent_aio_context(c); IO_OR_GS_CODE(); - c->parent_quiesce_counter++; + + assert(!c->quiesced_parent); + c->quiesced_parent = true; + if (c->klass->drained_begin) { c->klass->drained_begin(c); } - if (poll) { - AIO_WAIT_WHILE(ctx, bdrv_parent_drained_poll_single(c)); - } } static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) @@ -245,69 +234,14 @@ typedef struct { BlockDriverState *bs; bool done; bool begin; - bool recursive; bool poll; BdrvChild *parent; - bool ignore_bds_parents; - int *drained_end_counter; } BdrvCoDrainData; -static void coroutine_fn bdrv_drain_invoke_entry(void *opaque) -{ - BdrvCoDrainData *data = opaque; - BlockDriverState *bs = data->bs; - - if (data->begin) { - bs->drv->bdrv_co_drain_begin(bs); - } else { - bs->drv->bdrv_co_drain_end(bs); - } - - /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */ - qatomic_mb_set(&data->done, true); - if (!data->begin) { - qatomic_dec(data->drained_end_counter); - } - bdrv_dec_in_flight(bs); - - g_free(data); -} - -/* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */ -static void bdrv_drain_invoke(BlockDriverState *bs, bool begin, - int *drained_end_counter) -{ - BdrvCoDrainData *data; - - if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) || - (!begin && !bs->drv->bdrv_co_drain_end)) { - return; - } - - data = g_new(BdrvCoDrainData, 1); - *data = (BdrvCoDrainData) { - .bs = bs, - .done = false, - .begin = begin, - .drained_end_counter = drained_end_counter, - }; - - if (!begin) { - qatomic_inc(drained_end_counter); - } - - /* Make sure the driver callback completes during the polling phase for - * drain_begin. */ - bdrv_inc_in_flight(bs); - data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data); - aio_co_schedule(bdrv_get_aio_context(bs), data->co); -} - /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ -bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, - BdrvChild *ignore_parent, bool ignore_bds_parents) +bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent, + bool ignore_bds_parents) { - BdrvChild *child, *next; IO_OR_GS_CODE(); if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { @@ -318,30 +252,18 @@ bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, return true; } - if (recursive) { - assert(!ignore_bds_parents); - QLIST_FOREACH_SAFE(child, &bs->children, next, next) { - if (bdrv_drain_poll(child->bs, recursive, child, false)) { - return true; - } - } - } - return false; } -static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive, +static bool bdrv_drain_poll_top_level(BlockDriverState *bs, BdrvChild *ignore_parent) { - return bdrv_drain_poll(bs, recursive, ignore_parent, false); + return bdrv_drain_poll(bs, ignore_parent, false); } -static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, - BdrvChild *parent, bool ignore_bds_parents, +static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent, bool poll); -static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, - BdrvChild *parent, bool ignore_bds_parents, - int *drained_end_counter); +static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent); static void bdrv_co_drain_bh_cb(void *opaque) { @@ -354,14 +276,10 @@ static void bdrv_co_drain_bh_cb(void *opaque) aio_context_acquire(ctx); bdrv_dec_in_flight(bs); if (data->begin) { - assert(!data->drained_end_counter); - bdrv_do_drained_begin(bs, data->recursive, data->parent, - data->ignore_bds_parents, data->poll); + bdrv_do_drained_begin(bs, data->parent, data->poll); } else { assert(!data->poll); - bdrv_do_drained_end(bs, data->recursive, data->parent, - data->ignore_bds_parents, - data->drained_end_counter); + bdrv_do_drained_end(bs, data->parent); } aio_context_release(ctx); } else { @@ -374,11 +292,9 @@ static void bdrv_co_drain_bh_cb(void *opaque) } static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, - bool begin, bool recursive, + bool begin, BdrvChild *parent, - bool ignore_bds_parents, - bool poll, - int *drained_end_counter) + bool poll) { BdrvCoDrainData data; Coroutine *self = qemu_coroutine_self(); @@ -394,11 +310,8 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, .bs = bs, .done = false, .begin = begin, - .recursive = recursive, .parent = parent, - .ignore_bds_parents = ignore_bds_parents, .poll = poll, - .drained_end_counter = drained_end_counter, }; if (bs) { @@ -429,41 +342,22 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, } } -void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, - BdrvChild *parent, bool ignore_bds_parents) -{ - IO_OR_GS_CODE(); - assert(!qemu_in_coroutine()); - - /* Stop things in parent-to-child order */ - if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { - aio_disable_external(bdrv_get_aio_context(bs)); - } - - bdrv_parent_drained_begin(bs, parent, ignore_bds_parents); - bdrv_drain_invoke(bs, true, NULL); -} - -static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, - BdrvChild *parent, bool ignore_bds_parents, +static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent, bool poll) { - BdrvChild *child, *next; + IO_OR_GS_CODE(); if (qemu_in_coroutine()) { - bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents, - poll, NULL); + bdrv_co_yield_to_drain(bs, true, parent, poll); return; } - bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents); - - if (recursive) { - assert(!ignore_bds_parents); - bs->recursive_quiesce_counter++; - QLIST_FOREACH_SAFE(child, &bs->children, next, next) { - bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents, - false); + /* Stop things in parent-to-child order */ + if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { + aio_disable_external(bdrv_get_aio_context(bs)); + bdrv_parent_drained_begin(bs, parent); + if (bs->drv && bs->drv->bdrv_drain_begin) { + bs->drv->bdrv_drain_begin(bs); } } @@ -477,117 +371,50 @@ static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, * nodes. */ if (poll) { - assert(!ignore_bds_parents); - BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent)); + BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent)); } } -void bdrv_drained_begin(BlockDriverState *bs) +void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent) { - IO_OR_GS_CODE(); - bdrv_do_drained_begin(bs, false, NULL, false, true); + bdrv_do_drained_begin(bs, parent, false); } -void bdrv_subtree_drained_begin(BlockDriverState *bs) +void bdrv_drained_begin(BlockDriverState *bs) { IO_OR_GS_CODE(); - bdrv_do_drained_begin(bs, true, NULL, false, true); + bdrv_do_drained_begin(bs, NULL, true); } /** * This function does not poll, nor must any of its recursively called - * functions. The *drained_end_counter pointee will be incremented - * once for every background operation scheduled, and decremented once - * the operation settles. Therefore, the pointer must remain valid - * until the pointee reaches 0. That implies that whoever sets up the - * pointee has to poll until it is 0. - * - * We use atomic operations to access *drained_end_counter, because - * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of - * @bs may contain nodes in different AioContexts, - * (2) bdrv_drain_all_end() uses the same counter for all nodes, - * regardless of which AioContext they are in. + * functions. */ -static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, - BdrvChild *parent, bool ignore_bds_parents, - int *drained_end_counter) +static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent) { - BdrvChild *child; int old_quiesce_counter; - assert(drained_end_counter != NULL); - if (qemu_in_coroutine()) { - bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents, - false, drained_end_counter); + bdrv_co_yield_to_drain(bs, false, parent, false); return; } assert(bs->quiesce_counter > 0); /* Re-enable things in child-to-parent order */ - bdrv_drain_invoke(bs, false, drained_end_counter); - bdrv_parent_drained_end(bs, parent, ignore_bds_parents, - drained_end_counter); - old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter); if (old_quiesce_counter == 1) { - aio_enable_external(bdrv_get_aio_context(bs)); - } - - if (recursive) { - assert(!ignore_bds_parents); - bs->recursive_quiesce_counter--; - QLIST_FOREACH(child, &bs->children, next) { - bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents, - drained_end_counter); + if (bs->drv && bs->drv->bdrv_drain_end) { + bs->drv->bdrv_drain_end(bs); } + bdrv_parent_drained_end(bs, parent); + aio_enable_external(bdrv_get_aio_context(bs)); } } void bdrv_drained_end(BlockDriverState *bs) { - int drained_end_counter = 0; - IO_OR_GS_CODE(); - bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter); - BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); -} - -void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter) -{ - IO_CODE(); - bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter); -} - -void bdrv_subtree_drained_end(BlockDriverState *bs) -{ - int drained_end_counter = 0; - IO_OR_GS_CODE(); - bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter); - BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); -} - -void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent) -{ - int i; - IO_OR_GS_CODE(); - - for (i = 0; i < new_parent->recursive_quiesce_counter; i++) { - bdrv_do_drained_begin(child->bs, true, child, false, true); - } -} - -void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent) -{ - int drained_end_counter = 0; - int i; IO_OR_GS_CODE(); - - for (i = 0; i < old_parent->recursive_quiesce_counter; i++) { - bdrv_do_drained_end(child->bs, true, child, false, - &drained_end_counter); - } - - BDRV_POLL_WHILE(child->bs, qatomic_read(&drained_end_counter) > 0); + bdrv_do_drained_end(bs, NULL); } void bdrv_drain(BlockDriverState *bs) @@ -620,7 +447,7 @@ static bool bdrv_drain_all_poll(void) while ((bs = bdrv_next_all_states(bs))) { AioContext *aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); - result |= bdrv_drain_poll(bs, false, NULL, true); + result |= bdrv_drain_poll(bs, NULL, true); aio_context_release(aio_context); } @@ -639,16 +466,11 @@ static bool bdrv_drain_all_poll(void) * NOTE: no new block jobs or BlockDriverStates can be created between * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls. */ -void bdrv_drain_all_begin(void) +void bdrv_drain_all_begin_nopoll(void) { BlockDriverState *bs = NULL; GLOBAL_STATE_CODE(); - if (qemu_in_coroutine()) { - bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL); - return; - } - /* * bdrv queue is managed by record/replay, * waiting for finishing the I/O requests may @@ -670,9 +492,21 @@ void bdrv_drain_all_begin(void) AioContext *aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); - bdrv_do_drained_begin(bs, false, NULL, true, false); + bdrv_do_drained_begin(bs, NULL, false); aio_context_release(aio_context); } +} + +void bdrv_drain_all_begin(void) +{ + BlockDriverState *bs = NULL; + + if (qemu_in_coroutine()) { + bdrv_co_yield_to_drain(NULL, true, NULL, true); + return; + } + + bdrv_drain_all_begin_nopoll(); /* Now poll the in-flight requests */ AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll()); @@ -684,22 +518,19 @@ void bdrv_drain_all_begin(void) void bdrv_drain_all_end_quiesce(BlockDriverState *bs) { - int drained_end_counter = 0; GLOBAL_STATE_CODE(); g_assert(bs->quiesce_counter > 0); g_assert(!bs->refcnt); while (bs->quiesce_counter) { - bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter); + bdrv_do_drained_end(bs, NULL); } - BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); } void bdrv_drain_all_end(void) { BlockDriverState *bs = NULL; - int drained_end_counter = 0; GLOBAL_STATE_CODE(); /* @@ -715,13 +546,11 @@ void bdrv_drain_all_end(void) AioContext *aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); - bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter); + bdrv_do_drained_end(bs, NULL); aio_context_release(aio_context); } assert(qemu_get_current_aio_context() == qemu_get_aio_context()); - AIO_WAIT_WHILE(NULL, qatomic_read(&drained_end_counter) > 0); - assert(bdrv_drain_all_count > 0); bdrv_drain_all_count--; } @@ -2711,6 +2540,17 @@ bdrv_co_common_block_status_above(BlockDriverState *bs, return ret; } +int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs, + BlockDriverState *base, + int64_t offset, int64_t bytes, + int64_t *pnum, int64_t *map, + BlockDriverState **file) +{ + IO_CODE(); + return bdrv_co_common_block_status_above(bs, base, false, true, offset, + bytes, pnum, map, file, NULL); +} + int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file) @@ -2756,6 +2596,22 @@ int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO); } +int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset, + int64_t bytes, int64_t *pnum) +{ + int ret; + int64_t dummy; + IO_CODE(); + + ret = bdrv_co_common_block_status_above(bs, bs, true, false, offset, + bytes, pnum ? pnum : &dummy, NULL, + NULL, NULL); + if (ret < 0) { + return ret; + } + return !!(ret & BDRV_BLOCK_ALLOCATED); +} + int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes, int64_t *pnum) { @@ -2772,6 +2628,29 @@ int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes, return !!(ret & BDRV_BLOCK_ALLOCATED); } +/* See bdrv_is_allocated_above for documentation */ +int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top, + BlockDriverState *base, + bool include_base, int64_t offset, + int64_t bytes, int64_t *pnum) +{ + int depth; + int ret; + IO_CODE(); + + ret = bdrv_co_common_block_status_above(top, base, include_base, false, + offset, bytes, pnum, NULL, NULL, + &depth); + if (ret < 0) { + return ret; + } + + if (ret & BDRV_BLOCK_ALLOCATED) { + return depth; + } + return 0; +} + /* * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] * @@ -2795,10 +2674,12 @@ int bdrv_is_allocated_above(BlockDriverState *top, int64_t bytes, int64_t *pnum) { int depth; - int ret = bdrv_common_block_status_above(top, base, include_base, false, - offset, bytes, pnum, NULL, NULL, - &depth); + int ret; IO_CODE(); + + ret = bdrv_common_block_status_above(top, base, include_base, false, + offset, bytes, pnum, NULL, NULL, + &depth); if (ret < 0) { return ret; } @@ -2816,6 +2697,7 @@ bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) BlockDriverState *child_bs = bdrv_primary_bs(bs); int ret; IO_CODE(); + assert_bdrv_graph_readable(); ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); if (ret < 0) { @@ -2848,6 +2730,7 @@ bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) BlockDriverState *child_bs = bdrv_primary_bs(bs); int ret; IO_CODE(); + assert_bdrv_graph_readable(); ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); if (ret < 0) { diff --git a/block/meson.build b/block/meson.build index b7c68b83a3..90011a2805 100644 --- a/block/meson.build +++ b/block/meson.build @@ -10,6 +10,7 @@ block_ss.add(files( 'blkverify.c', 'block-backend.c', 'block-copy.c', + 'graph-lock.c', 'commit.c', 'copy-on-read.c', 'preallocate.c', @@ -137,6 +138,7 @@ block_gen_c = custom_target('block-gen.c', output: 'block-gen.c', input: files( '../include/block/block-io.h', + '../include/block/dirty-bitmap.h', '../include/block/block-global-state.h', '../include/sysemu/block-backend-io.h', 'coroutines.h' diff --git a/block/monitor/block-hmp-cmds.c b/block/monitor/block-hmp-cmds.c index b6135e9bfe..0ff7c84039 100644 --- a/block/monitor/block-hmp-cmds.c +++ b/block/monitor/block-hmp-cmds.c @@ -241,7 +241,6 @@ void hmp_drive_mirror(Monitor *mon, const QDict *qdict) DriveMirror mirror = { .device = (char *)qdict_get_str(qdict, "device"), .target = (char *)filename, - .has_format = !!format, .format = (char *)format, .sync = full ? MIRROR_SYNC_MODE_FULL : MIRROR_SYNC_MODE_TOP, .has_mode = true, @@ -270,7 +269,6 @@ void hmp_drive_backup(Monitor *mon, const QDict *qdict) DriveBackup backup = { .device = (char *)device, .target = (char *)filename, - .has_format = !!format, .format = (char *)format, .sync = full ? MIRROR_SYNC_MODE_FULL : MIRROR_SYNC_MODE_TOP, .has_mode = true, @@ -360,9 +358,7 @@ void hmp_snapshot_blkdev(Monitor *mon, const QDict *qdict) } mode = reuse ? NEW_IMAGE_MODE_EXISTING : NEW_IMAGE_MODE_ABSOLUTE_PATHS; - qmp_blockdev_snapshot_sync(true, device, false, NULL, - filename, false, NULL, - !!format, format, + qmp_blockdev_snapshot_sync(device, NULL, filename, NULL, format, true, mode, &err); end: hmp_handle_error(mon, err); @@ -385,8 +381,7 @@ void hmp_snapshot_delete_blkdev_internal(Monitor *mon, const QDict *qdict) const char *id = qdict_get_try_str(qdict, "id"); Error *err = NULL; - qmp_blockdev_snapshot_delete_internal_sync(device, !!id, id, - true, name, &err); + qmp_blockdev_snapshot_delete_internal_sync(device, id, name, &err); hmp_handle_error(mon, err); } @@ -427,7 +422,7 @@ void hmp_nbd_server_start(Monitor *mon, const QDict *qdict) block_list = qmp_query_block(NULL); for (info = block_list; info; info = info->next) { - if (!info->value->has_inserted) { + if (!info->value->inserted) { continue; } @@ -460,7 +455,6 @@ void hmp_nbd_server_add(Monitor *mon, const QDict *qdict) NbdServerAddOptions export = { .device = (char *) device, - .has_name = !!name, .name = (char *) name, .has_writable = true, .writable = writable, @@ -495,7 +489,7 @@ void coroutine_fn hmp_block_resize(Monitor *mon, const QDict *qdict) int64_t size = qdict_get_int(qdict, "size"); Error *err = NULL; - qmp_block_resize(true, device, false, NULL, size, &err); + qmp_block_resize(device, NULL, size, &err); hmp_handle_error(mon, err); } @@ -506,11 +500,10 @@ void hmp_block_stream(Monitor *mon, const QDict *qdict) const char *base = qdict_get_try_str(qdict, "base"); int64_t speed = qdict_get_try_int(qdict, "speed", 0); - qmp_block_stream(true, device, device, base != NULL, base, false, NULL, - false, NULL, false, NULL, - qdict_haskey(qdict, "speed"), speed, true, - BLOCKDEV_ON_ERROR_REPORT, false, NULL, false, false, false, - false, &error); + qmp_block_stream(device, device, base, NULL, NULL, NULL, + qdict_haskey(qdict, "speed"), speed, + true, BLOCKDEV_ON_ERROR_REPORT, NULL, + false, false, false, false, &error); hmp_handle_error(mon, error); } @@ -534,10 +527,8 @@ void hmp_block_set_io_throttle(Monitor *mon, const QDict *qdict) * version has only one, so we must decide which one to pass. */ if (blk_by_name(device)) { - throttle.has_device = true; throttle.device = device; } else { - throttle.has_id = true; throttle.id = device; } @@ -551,7 +542,7 @@ void hmp_eject(Monitor *mon, const QDict *qdict) const char *device = qdict_get_str(qdict, "device"); Error *err = NULL; - qmp_eject(true, device, false, NULL, true, force, &err); + qmp_eject(device, NULL, true, force, &err); hmp_handle_error(mon, err); } @@ -635,18 +626,18 @@ static void print_block_info(Monitor *mon, BlockInfo *info, { ImageInfo *image_info; - assert(!info || !info->has_inserted || info->inserted == inserted); + assert(!info || !info->inserted || info->inserted == inserted); if (info && *info->device) { monitor_puts(mon, info->device); - if (inserted && inserted->has_node_name) { + if (inserted && inserted->node_name) { monitor_printf(mon, " (%s)", inserted->node_name); } } else { assert(info || inserted); monitor_puts(mon, - inserted && inserted->has_node_name ? inserted->node_name - : info && info->has_qdev ? info->qdev + inserted && inserted->node_name ? inserted->node_name + : info && info->qdev ? info->qdev : "<anonymous>"); } @@ -661,7 +652,7 @@ static void print_block_info(Monitor *mon, BlockInfo *info, } if (info) { - if (info->has_qdev) { + if (info->qdev) { monitor_printf(mon, " Attached to: %s\n", info->qdev); } if (info->has_io_status && info->io_status != BLOCK_DEVICE_IO_STATUS_OK) { @@ -686,7 +677,7 @@ static void print_block_info(Monitor *mon, BlockInfo *info, inserted->cache->direct ? ", direct" : "", inserted->cache->no_flush ? ", ignore flushes" : ""); - if (inserted->has_backing_file) { + if (inserted->backing_file) { monitor_printf(mon, " Backing file: %s " "(chain depth: %" PRId64 ")\n", @@ -735,7 +726,7 @@ static void print_block_info(Monitor *mon, BlockInfo *info, image_info = inserted->image; while (1) { bdrv_image_info_dump(image_info); - if (image_info->has_backing_image) { + if (image_info->backing_image) { image_info = image_info->backing_image; } else { break; @@ -769,8 +760,7 @@ void hmp_info_block(Monitor *mon, const QDict *qdict) monitor_printf(mon, "\n"); } - print_block_info(mon, info->value, info->value->has_inserted - ? info->value->inserted : NULL, + print_block_info(mon, info->value, info->value->inserted, verbose); printed = true; } @@ -784,7 +774,7 @@ void hmp_info_block(Monitor *mon, const QDict *qdict) /* Print node information */ blockdev_list = qmp_query_named_block_nodes(false, false, NULL); for (blockdev = blockdev_list; blockdev; blockdev = blockdev->next) { - assert(blockdev->value->has_node_name); + assert(blockdev->value->node_name); if (device && strcmp(device, blockdev->value->node_name)) { continue; } @@ -805,7 +795,7 @@ void hmp_info_blockstats(Monitor *mon, const QDict *qdict) stats_list = qmp_query_blockstats(false, false, NULL); for (stats = stats_list; stats; stats = stats->next) { - if (!stats->value->has_device) { + if (!stats->value->device) { continue; } diff --git a/block/parallels.c b/block/parallels.c index fa08c1104b..bbea2f2221 100644 --- a/block/parallels.c +++ b/block/parallels.c @@ -646,7 +646,7 @@ static int coroutine_fn parallels_co_create_opts(BlockDriver *drv, } /* Create and open the file (protocol layer) */ - ret = bdrv_create_file(filename, opts, errp); + ret = bdrv_co_create_file(filename, opts, errp); if (ret < 0) { goto done; } diff --git a/block/qapi-sysemu.c b/block/qapi-sysemu.c index 680c7ee342..0c7a1423de 100644 --- a/block/qapi-sysemu.c +++ b/block/qapi-sysemu.c @@ -116,8 +116,8 @@ static int do_open_tray(const char *blk_name, const char *qdev_id, return 0; } -void qmp_blockdev_open_tray(bool has_device, const char *device, - bool has_id, const char *id, +void qmp_blockdev_open_tray(const char *device, + const char *id, bool has_force, bool force, Error **errp) { @@ -127,9 +127,7 @@ void qmp_blockdev_open_tray(bool has_device, const char *device, if (!has_force) { force = false; } - rc = do_open_tray(has_device ? device : NULL, - has_id ? id : NULL, - force, &local_err); + rc = do_open_tray(device, id, force, &local_err); if (rc && rc != -ENOSYS && rc != -EINPROGRESS) { error_propagate(errp, local_err); return; @@ -137,16 +135,13 @@ void qmp_blockdev_open_tray(bool has_device, const char *device, error_free(local_err); } -void qmp_blockdev_close_tray(bool has_device, const char *device, - bool has_id, const char *id, +void qmp_blockdev_close_tray(const char *device, + const char *id, Error **errp) { BlockBackend *blk; Error *local_err = NULL; - device = has_device ? device : NULL; - id = has_id ? id : NULL; - blk = qmp_get_blk(device, id, errp); if (!blk) { return; @@ -173,17 +168,14 @@ void qmp_blockdev_close_tray(bool has_device, const char *device, } } -static void blockdev_remove_medium(bool has_device, const char *device, - bool has_id, const char *id, Error **errp) +static void blockdev_remove_medium(const char *device, const char *id, + Error **errp) { BlockBackend *blk; BlockDriverState *bs; AioContext *aio_context; bool has_attached_device; - device = has_device ? device : NULL; - id = has_id ? id : NULL; - blk = qmp_get_blk(device, id, errp); if (!blk) { return; @@ -232,7 +224,7 @@ out: void qmp_blockdev_remove_medium(const char *id, Error **errp) { - blockdev_remove_medium(false, NULL, true, id, errp); + blockdev_remove_medium(NULL, id, errp); } static void qmp_blockdev_insert_anon_medium(BlockBackend *blk, @@ -280,16 +272,13 @@ static void qmp_blockdev_insert_anon_medium(BlockBackend *blk, } } -static void blockdev_insert_medium(bool has_device, const char *device, - bool has_id, const char *id, +static void blockdev_insert_medium(const char *device, const char *id, const char *node_name, Error **errp) { BlockBackend *blk; BlockDriverState *bs; - blk = qmp_get_blk(has_device ? device : NULL, - has_id ? id : NULL, - errp); + blk = qmp_get_blk(device, id, errp); if (!blk) { return; } @@ -311,13 +300,13 @@ static void blockdev_insert_medium(bool has_device, const char *device, void qmp_blockdev_insert_medium(const char *id, const char *node_name, Error **errp) { - blockdev_insert_medium(false, NULL, true, id, node_name, errp); + blockdev_insert_medium(NULL, id, node_name, errp); } -void qmp_blockdev_change_medium(bool has_device, const char *device, - bool has_id, const char *id, +void qmp_blockdev_change_medium(const char *device, + const char *id, const char *filename, - bool has_format, const char *format, + const char *format, bool has_force, bool force, bool has_read_only, BlockdevChangeReadOnlyMode read_only, @@ -331,9 +320,7 @@ void qmp_blockdev_change_medium(bool has_device, const char *device, QDict *options = NULL; Error *err = NULL; - blk = qmp_get_blk(has_device ? device : NULL, - has_id ? id : NULL, - errp); + blk = qmp_get_blk(device, id, errp); if (!blk) { goto fail; } @@ -370,7 +357,7 @@ void qmp_blockdev_change_medium(bool has_device, const char *device, detect_zeroes = blk_get_detect_zeroes_from_root_state(blk); qdict_put_str(options, "detect-zeroes", detect_zeroes ? "on" : "off"); - if (has_format) { + if (format) { qdict_put_str(options, "driver", format); } @@ -379,9 +366,7 @@ void qmp_blockdev_change_medium(bool has_device, const char *device, goto fail; } - rc = do_open_tray(has_device ? device : NULL, - has_id ? id : NULL, - force, &err); + rc = do_open_tray(device, id, force, &err); if (rc && rc != -ENOSYS) { error_propagate(errp, err); goto fail; @@ -389,7 +374,7 @@ void qmp_blockdev_change_medium(bool has_device, const char *device, error_free(err); err = NULL; - blockdev_remove_medium(has_device, device, has_id, id, &err); + blockdev_remove_medium(device, id, &err); if (err) { error_propagate(errp, err); goto fail; @@ -401,7 +386,7 @@ void qmp_blockdev_change_medium(bool has_device, const char *device, goto fail; } - qmp_blockdev_close_tray(has_device, device, has_id, id, errp); + qmp_blockdev_close_tray(device, id, errp); fail: /* If the medium has been inserted, the device has its own reference, so @@ -410,8 +395,7 @@ fail: bdrv_unref(medium_bs); } -void qmp_eject(bool has_device, const char *device, - bool has_id, const char *id, +void qmp_eject(const char *device, const char *id, bool has_force, bool force, Error **errp) { Error *local_err = NULL; @@ -421,16 +405,14 @@ void qmp_eject(bool has_device, const char *device, force = false; } - rc = do_open_tray(has_device ? device : NULL, - has_id ? id : NULL, - force, &local_err); + rc = do_open_tray(device, id, force, &local_err); if (rc && rc != -ENOSYS) { error_propagate(errp, local_err); return; } error_free(local_err); - blockdev_remove_medium(has_device, device, has_id, id, errp); + blockdev_remove_medium(device, id, errp); } /* throttling disk I/O limits */ @@ -441,9 +423,7 @@ void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp) BlockBackend *blk; AioContext *aio_context; - blk = qmp_get_blk(arg->has_device ? arg->device : NULL, - arg->has_id ? arg->id : NULL, - errp); + blk = qmp_get_blk(arg->device, arg->id, errp); if (!blk) { return; } @@ -516,11 +496,8 @@ void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp) /* Enable I/O limits if they're not enabled yet, otherwise * just update the throttling group. */ if (!blk_get_public(blk)->throttle_group_member.throttle_state) { - blk_io_limits_enable(blk, - arg->has_group ? arg->group : - arg->has_device ? arg->device : - arg->id); - } else if (arg->has_group) { + blk_io_limits_enable(blk, arg->group ?: arg->device ?: arg->id); + } else if (arg->group) { blk_io_limits_update_group(blk, arg->group); } /* Set the new throttling configuration */ diff --git a/block/qapi.c b/block/qapi.c index cf557e3aea..fea808425b 100644 --- a/block/qapi.c +++ b/block/qapi.c @@ -71,13 +71,11 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk, }; if (bs->node_name[0]) { - info->has_node_name = true; info->node_name = g_strdup(bs->node_name); } backing = bdrv_cow_bs(bs); if (backing) { - info->has_backing_file = true; info->backing_file = g_strdup(backing->filename); } @@ -139,7 +137,6 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk, info->has_iops_size = cfg.op_size; info->iops_size = cfg.op_size; - info->has_group = true; info->group = g_strdup(throttle_group_get_name(&blkp->throttle_group_member)); } @@ -170,7 +167,6 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk, */ info->backing_file_depth++; bs0 = bdrv_filter_or_cow_bs(bs0); - (*p_image_info)->has_backing_image = true; p_image_info = &((*p_image_info)->backing_image); } else { break; @@ -301,26 +297,21 @@ void bdrv_query_image_info(BlockDriverState *bs, qapi_free_ImageInfo(info); goto out; } - info->has_format_specific = info->format_specific != NULL; - backing_filename = bs->backing_file; if (backing_filename[0] != '\0') { char *backing_filename2; info->backing_filename = g_strdup(backing_filename); - info->has_backing_filename = true; backing_filename2 = bdrv_get_full_backing_filename(bs, NULL); /* Always report the full_backing_filename if present, even if it's the * same as backing_filename. That they are same is useful info. */ if (backing_filename2) { info->full_backing_filename = g_strdup(backing_filename2); - info->has_full_backing_filename = true; } if (bs->backing_format[0]) { info->backing_filename_format = g_strdup(bs->backing_format); - info->has_backing_filename_format = true; } g_free(backing_filename2); } @@ -367,7 +358,6 @@ static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info, qdev = blk_get_attached_dev_id(blk); if (qdev && *qdev) { - info->has_qdev = true; info->qdev = qdev; } else { g_free(qdev); @@ -384,7 +374,6 @@ static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info, } if (bs && bs->drv) { - info->has_inserted = true; info->inserted = bdrv_block_device_info(blk, bs, false, errp); if (info->inserted == NULL) { goto err; @@ -411,23 +400,26 @@ static uint64List *uint64_list(uint64_t *list, int size) return out_list; } -static void bdrv_latency_histogram_stats(BlockLatencyHistogram *hist, - bool *not_null, - BlockLatencyHistogramInfo **info) +static BlockLatencyHistogramInfo * +bdrv_latency_histogram_stats(BlockLatencyHistogram *hist) { - *not_null = hist->bins != NULL; - if (*not_null) { - *info = g_new0(BlockLatencyHistogramInfo, 1); + BlockLatencyHistogramInfo *info; - (*info)->boundaries = uint64_list(hist->boundaries, hist->nbins - 1); - (*info)->bins = uint64_list(hist->bins, hist->nbins); + if (!hist->bins) { + return NULL; } + + info = g_new0(BlockLatencyHistogramInfo, 1); + info->boundaries = uint64_list(hist->boundaries, hist->nbins - 1); + info->bins = uint64_list(hist->bins, hist->nbins); + return info; } static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk) { BlockAcctStats *stats = blk_get_stats(blk); BlockAcctTimedStats *ts = NULL; + BlockLatencyHistogram *hgram; ds->rd_bytes = stats->nr_bytes[BLOCK_ACCT_READ]; ds->wr_bytes = stats->nr_bytes[BLOCK_ACCT_WRITE]; @@ -493,15 +485,13 @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk) QAPI_LIST_PREPEND(ds->timed_stats, dev_stats); } - bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_READ], - &ds->has_rd_latency_histogram, - &ds->rd_latency_histogram); - bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_WRITE], - &ds->has_wr_latency_histogram, - &ds->wr_latency_histogram); - bdrv_latency_histogram_stats(&stats->latency_histogram[BLOCK_ACCT_FLUSH], - &ds->has_flush_latency_histogram, - &ds->flush_latency_histogram); + hgram = stats->latency_histogram; + ds->rd_latency_histogram + = bdrv_latency_histogram_stats(&hgram[BLOCK_ACCT_READ]); + ds->wr_latency_histogram + = bdrv_latency_histogram_stats(&hgram[BLOCK_ACCT_WRITE]); + ds->flush_latency_histogram + = bdrv_latency_histogram_stats(&hgram[BLOCK_ACCT_FLUSH]); } static BlockStats *bdrv_query_bds_stats(BlockDriverState *bs, @@ -526,16 +516,12 @@ static BlockStats *bdrv_query_bds_stats(BlockDriverState *bs, } if (bdrv_get_node_name(bs)[0]) { - s->has_node_name = true; s->node_name = g_strdup(bdrv_get_node_name(bs)); } s->stats->wr_highest_offset = stat64_get(&bs->wr_highest_offset); s->driver_specific = bdrv_get_specific_stats(bs); - if (s->driver_specific) { - s->has_driver_specific = true; - } parent_child = bdrv_primary_child(bs); if (!parent_child || @@ -564,7 +550,6 @@ static BlockStats *bdrv_query_bds_stats(BlockDriverState *bs, } } if (parent_child) { - s->has_parent = true; s->parent = bdrv_query_bds_stats(parent_child->bs, blk_level); } @@ -575,7 +560,6 @@ static BlockStats *bdrv_query_bds_stats(BlockDriverState *bs, * compatibility to when we put bs0->backing here, which might * be either) */ - s->has_backing = true; s->backing = bdrv_query_bds_stats(filter_or_cow_bs, blk_level); } @@ -640,12 +624,10 @@ BlockStatsList *qmp_query_blockstats(bool has_query_nodes, aio_context_acquire(ctx); s = bdrv_query_bds_stats(blk_bs(blk), true); - s->has_device = true; s->device = g_strdup(blk_name(blk)); qdev = blk_get_attached_dev_id(blk); if (qdev && *qdev) { - s->has_qdev = true; s->qdev = qdev; } else { g_free(qdev); @@ -822,16 +804,16 @@ void bdrv_image_info_dump(ImageInfo *info) qemu_printf("cleanly shut down: no\n"); } - if (info->has_backing_filename) { + if (info->backing_filename) { qemu_printf("backing file: %s", info->backing_filename); - if (!info->has_full_backing_filename) { + if (!info->full_backing_filename) { qemu_printf(" (cannot determine actual path)"); } else if (strcmp(info->backing_filename, info->full_backing_filename) != 0) { qemu_printf(" (actual path: %s)", info->full_backing_filename); } qemu_printf("\n"); - if (info->has_backing_filename_format) { + if (info->backing_filename_format) { qemu_printf("backing file format: %s\n", info->backing_filename_format); } @@ -865,7 +847,7 @@ void bdrv_image_info_dump(ImageInfo *info) } } - if (info->has_format_specific) { + if (info->format_specific) { qemu_printf("Format specific information:\n"); bdrv_image_info_specific_dump(info->format_specific); } diff --git a/block/qcow.c b/block/qcow.c index daa38839ab..5d99f00411 100644 --- a/block/qcow.c +++ b/block/qcow.c @@ -825,7 +825,7 @@ static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts, return -EINVAL; } - if (qcow_opts->has_encrypt && + if (qcow_opts->encrypt && qcow_opts->encrypt->format != Q_CRYPTO_BLOCK_FORMAT_QCOW) { error_setg(errp, "Unsupported encryption format"); @@ -853,7 +853,7 @@ static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts, header.size = cpu_to_be64(total_size); header_size = sizeof(header); backing_filename_len = 0; - if (qcow_opts->has_backing_file) { + if (qcow_opts->backing_file) { if (strcmp(qcow_opts->backing_file, "fat:")) { header.backing_file_offset = cpu_to_be64(header_size); backing_filename_len = strlen(qcow_opts->backing_file); @@ -861,7 +861,7 @@ static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts, header_size += backing_filename_len; } else { /* special backing file for vvfat */ - qcow_opts->has_backing_file = false; + qcow_opts->backing_file = NULL; } header.cluster_bits = 9; /* 512 byte cluster to avoid copying unmodified sectors */ @@ -876,7 +876,7 @@ static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts, header.l1_table_offset = cpu_to_be64(header_size); - if (qcow_opts->has_encrypt) { + if (qcow_opts->encrypt) { header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES); crypto = qcrypto_block_create(qcow_opts->encrypt, "encrypt.", @@ -895,7 +895,7 @@ static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts, goto exit; } - if (qcow_opts->has_backing_file) { + if (qcow_opts->backing_file) { ret = blk_co_pwrite(qcow_blk, sizeof(header), backing_filename_len, qcow_opts->backing_file, 0); if (ret < 0) { @@ -973,7 +973,7 @@ static int coroutine_fn qcow_co_create_opts(BlockDriver *drv, } /* Create and open the file (protocol layer) */ - ret = bdrv_create_file(filename, opts, errp); + ret = bdrv_co_create_file(filename, opts, errp); if (ret < 0) { goto fail; } diff --git a/block/qcow2.c b/block/qcow2.c index 4d6666d3ff..bafbd077b9 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -3508,7 +3508,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) if (!qcow2_opts->has_preallocation) { qcow2_opts->preallocation = PREALLOC_MODE_OFF; } - if (qcow2_opts->has_backing_file && + if (qcow2_opts->backing_file && qcow2_opts->preallocation != PREALLOC_MODE_OFF && !qcow2_opts->extended_l2) { @@ -3517,7 +3517,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) ret = -EINVAL; goto out; } - if (qcow2_opts->has_backing_fmt && !qcow2_opts->has_backing_file) { + if (qcow2_opts->has_backing_fmt && !qcow2_opts->backing_file) { error_setg(errp, "Backing format cannot be used without backing file"); ret = -EINVAL; goto out; @@ -3558,7 +3558,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) ret = -EINVAL; goto out; } - if (qcow2_opts->data_file_raw && qcow2_opts->has_backing_file) { + if (qcow2_opts->data_file_raw && qcow2_opts->backing_file) { error_setg(errp, "Backing file and data-file-raw cannot be used at " "the same time"); ret = -EINVAL; @@ -3584,7 +3584,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) * backing file when specifying data_file_raw is an error * anyway. */ - assert(!qcow2_opts->has_backing_file); + assert(!qcow2_opts->backing_file); } if (qcow2_opts->data_file) { @@ -3752,7 +3752,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) } /* Want a backing file? There you go. */ - if (qcow2_opts->has_backing_file) { + if (qcow2_opts->backing_file) { const char *backing_format = NULL; if (qcow2_opts->has_backing_fmt) { @@ -3770,7 +3770,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) } /* Want encryption? There you go. */ - if (qcow2_opts->has_encrypt) { + if (qcow2_opts->encrypt) { ret = qcow2_set_up_encryption(blk_bs(blk), qcow2_opts->encrypt, errp); if (ret < 0) { goto out; @@ -3871,7 +3871,7 @@ static int coroutine_fn qcow2_co_create_opts(BlockDriver *drv, } /* Create and open the file (protocol layer) */ - ret = bdrv_create_file(filename, opts, errp); + ret = bdrv_co_create_file(filename, opts, errp); if (ret < 0) { goto finish; } @@ -3886,7 +3886,7 @@ static int coroutine_fn qcow2_co_create_opts(BlockDriver *drv, /* Create and open an external data file (protocol layer) */ val = qdict_get_try_str(qdict, BLOCK_OPT_DATA_FILE); if (val) { - ret = bdrv_create_file(val, opts, errp); + ret = bdrv_co_create_file(val, opts, errp); if (ret < 0) { goto finish; } @@ -5195,7 +5195,6 @@ static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs, .refcount_bits = s->refcount_bits, .has_bitmaps = !!bitmaps, .bitmaps = bitmaps, - .has_data_file = !!s->image_data_file, .data_file = g_strdup(s->image_data_file), .has_data_file_raw = has_data_file(bs), .data_file_raw = data_file_is_raw(bs), @@ -5226,7 +5225,6 @@ static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs, memset(&encrypt_info->u, 0, sizeof(encrypt_info->u)); qapi_free_QCryptoBlockInfo(encrypt_info); - spec_info->u.qcow2.data->has_encrypt = true; spec_info->u.qcow2.data->encrypt = qencrypt; } @@ -5846,7 +5844,7 @@ static int coroutine_fn qcow2_co_amend(BlockDriverState *bs, BDRVQcow2State *s = bs->opaque; int ret = 0; - if (qopts->has_encrypt) { + if (qopts->encrypt) { if (!s->crypto) { error_setg(errp, "image is not encrypted, can't amend"); return -EOPNOTSUPP; @@ -5911,7 +5909,7 @@ void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset, node_name = bdrv_get_node_name(bs); qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs), - *node_name != '\0', node_name, + *node_name ? node_name : NULL, message, offset >= 0, offset, size >= 0, size, fatal); diff --git a/block/qed.c b/block/qed.c index 2f36ad342c..faa606618e 100644 --- a/block/qed.c +++ b/block/qed.c @@ -262,7 +262,7 @@ static bool coroutine_fn qed_plug_allocating_write_reqs(BDRVQEDState *s) assert(!s->allocating_write_reqs_plugged); if (s->allocating_acb != NULL) { /* Another allocating write came concurrently. This cannot happen - * from bdrv_qed_co_drain_begin, but it can happen when the timer runs. + * from bdrv_qed_drain_begin, but it can happen when the timer runs. */ qemu_co_mutex_unlock(&s->table_lock); return false; @@ -282,9 +282,8 @@ static void coroutine_fn qed_unplug_allocating_write_reqs(BDRVQEDState *s) qemu_co_mutex_unlock(&s->table_lock); } -static void coroutine_fn qed_need_check_timer_entry(void *opaque) +static void coroutine_fn qed_need_check_timer(BDRVQEDState *s) { - BDRVQEDState *s = opaque; int ret; trace_qed_need_check_timer_cb(s); @@ -310,9 +309,20 @@ static void coroutine_fn qed_need_check_timer_entry(void *opaque) (void) ret; } +static void coroutine_fn qed_need_check_timer_entry(void *opaque) +{ + BDRVQEDState *s = opaque; + + qed_need_check_timer(opaque); + bdrv_dec_in_flight(s->bs); +} + static void qed_need_check_timer_cb(void *opaque) { + BDRVQEDState *s = opaque; Coroutine *co = qemu_coroutine_create(qed_need_check_timer_entry, opaque); + + bdrv_inc_in_flight(s->bs); qemu_coroutine_enter(co); } @@ -355,7 +365,7 @@ static void bdrv_qed_attach_aio_context(BlockDriverState *bs, } } -static void coroutine_fn bdrv_qed_co_drain_begin(BlockDriverState *bs) +static void bdrv_qed_drain_begin(BlockDriverState *bs) { BDRVQEDState *s = bs->opaque; @@ -363,8 +373,12 @@ static void coroutine_fn bdrv_qed_co_drain_begin(BlockDriverState *bs) * header is flushed. */ if (s->need_check_timer && timer_pending(s->need_check_timer)) { + Coroutine *co; + qed_cancel_need_check_timer(s); - qed_need_check_timer_entry(s); + co = qemu_coroutine_create(qed_need_check_timer_entry, s); + bdrv_inc_in_flight(bs); + aio_co_enter(bdrv_get_aio_context(bs), co); } } @@ -698,7 +712,7 @@ static int coroutine_fn bdrv_qed_co_create(BlockdevCreateOptions *opts, goto out; } - if (qed_opts->has_backing_file) { + if (qed_opts->backing_file) { header.features |= QED_F_BACKING_FILE; header.backing_filename_offset = sizeof(le_header); header.backing_filename_size = strlen(qed_opts->backing_file); @@ -764,7 +778,7 @@ static int coroutine_fn bdrv_qed_co_create_opts(BlockDriver *drv, } /* Create and open the file (protocol layer) */ - ret = bdrv_create_file(filename, opts, errp); + ret = bdrv_co_create_file(filename, opts, errp); if (ret < 0) { goto fail; } @@ -1647,7 +1661,7 @@ static BlockDriver bdrv_qed = { .bdrv_co_check = bdrv_qed_co_check, .bdrv_detach_aio_context = bdrv_qed_detach_aio_context, .bdrv_attach_aio_context = bdrv_qed_attach_aio_context, - .bdrv_co_drain_begin = bdrv_qed_co_drain_begin, + .bdrv_drain_begin = bdrv_qed_drain_begin, }; static void bdrv_qed_init(void) diff --git a/block/quorum.c b/block/quorum.c index f9e6539ceb..7f21c03f1f 100644 --- a/block/quorum.c +++ b/block/quorum.c @@ -202,7 +202,7 @@ static void quorum_report_bad(QuorumOpType type, uint64_t offset, msg = strerror(-ret); } - qapi_event_send_quorum_report_bad(type, !!msg, msg, node_name, start_sector, + qapi_event_send_quorum_report_bad(type, msg, node_name, start_sector, end_sector - start_sector); } diff --git a/block/raw-format.c b/block/raw-format.c index a68014ef0b..28905b09ee 100644 --- a/block/raw-format.c +++ b/block/raw-format.c @@ -433,7 +433,7 @@ static int coroutine_fn raw_co_create_opts(BlockDriver *drv, QemuOpts *opts, Error **errp) { - return bdrv_create_file(filename, opts, errp); + return bdrv_co_create_file(filename, opts, errp); } static int raw_open(BlockDriverState *bs, QDict *options, int flags, diff --git a/block/rbd.c b/block/rbd.c index f826410f40..3aa6aae0e0 100644 --- a/block/rbd.c +++ b/block/rbd.c @@ -536,13 +536,13 @@ static int qemu_rbd_do_create(BlockdevCreateOptions *options, int ret; assert(options->driver == BLOCKDEV_DRIVER_RBD); - if (opts->location->has_snapshot) { + if (opts->location->snapshot) { error_setg(errp, "Can't use snapshot name for image creation"); return -EINVAL; } #ifndef LIBRBD_SUPPORTS_ENCRYPTION - if (opts->has_encrypt) { + if (opts->encrypt) { error_setg(errp, "RBD library does not support image encryption"); return -ENOTSUP; } @@ -574,7 +574,7 @@ static int qemu_rbd_do_create(BlockdevCreateOptions *options, } #ifdef LIBRBD_SUPPORTS_ENCRYPTION - if (opts->has_encrypt) { + if (opts->encrypt) { rbd_image_t image; ret = rbd_open(io_ctx, opts->location->image, &image, NULL); @@ -686,7 +686,6 @@ static int coroutine_fn qemu_rbd_co_create_opts(BlockDriver *drv, goto exit; } rbd_opts->encrypt = encrypt; - rbd_opts->has_encrypt = !!encrypt; /* * Caution: while qdict_get_try_str() is fine, getting non-string @@ -697,11 +696,8 @@ static int coroutine_fn qemu_rbd_co_create_opts(BlockDriver *drv, loc = rbd_opts->location; loc->pool = g_strdup(qdict_get_try_str(options, "pool")); loc->conf = g_strdup(qdict_get_try_str(options, "conf")); - loc->has_conf = !!loc->conf; loc->user = g_strdup(qdict_get_try_str(options, "user")); - loc->has_user = !!loc->user; loc->q_namespace = g_strdup(qdict_get_try_str(options, "namespace")); - loc->has_q_namespace = !!loc->q_namespace; loc->image = g_strdup(qdict_get_try_str(options, "image")); keypairs = qdict_get_try_str(options, "=keyvalue-pairs"); @@ -767,7 +763,6 @@ static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx, return -EINVAL; } opts->key_secret = g_strdup(secretid); - opts->has_key_secret = true; } mon_host = qemu_rbd_mon_host(opts, &local_err); @@ -785,7 +780,7 @@ static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx, /* try default location when conf=NULL, but ignore failure */ r = rados_conf_read_file(*cluster, opts->conf); - if (opts->has_conf && r < 0) { + if (opts->conf && r < 0) { error_setg_errno(errp, -r, "error reading conf file %s", opts->conf); goto failed_shutdown; } @@ -833,7 +828,7 @@ static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx, } #ifdef HAVE_RBD_NAMESPACE_EXISTS - if (opts->has_q_namespace && strlen(opts->q_namespace) > 0) { + if (opts->q_namespace && strlen(opts->q_namespace) > 0) { bool exists; r = rbd_namespace_exists(*io_ctx, opts->q_namespace, &exists); @@ -991,7 +986,7 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags, goto failed_open; } - if (opts->has_encrypt) { + if (opts->encrypt) { #ifdef LIBRBD_SUPPORTS_ENCRYPTION r = qemu_rbd_encryption_load(s->image, opts->encrypt, errp); if (r < 0) { diff --git a/block/replication.c b/block/replication.c index f1eed25e43..c62f48a874 100644 --- a/block/replication.c +++ b/block/replication.c @@ -374,9 +374,6 @@ static void reopen_backing_file(BlockDriverState *bs, bool writable, s->orig_secondary_read_only = bdrv_is_read_only(secondary_disk->bs); } - bdrv_subtree_drained_begin(hidden_disk->bs); - bdrv_subtree_drained_begin(secondary_disk->bs); - if (s->orig_hidden_read_only) { QDict *opts = qdict_new(); qdict_put_bool(opts, BDRV_OPT_READ_ONLY, !writable); @@ -401,9 +398,6 @@ static void reopen_backing_file(BlockDriverState *bs, bool writable, aio_context_acquire(ctx); } } - - bdrv_subtree_drained_end(hidden_disk->bs); - bdrv_subtree_drained_end(secondary_disk->bs); } static void backup_job_cleanup(BlockDriverState *bs) diff --git a/block/ssh.c b/block/ssh.c index 04726d4ecb..8508710f2f 100644 --- a/block/ssh.c +++ b/block/ssh.c @@ -643,7 +643,7 @@ static int connect_to_ssh(BDRVSSHState *s, BlockdevOptionsSsh *opts, unsigned int port = 0; int new_sock = -1; - if (opts->has_user) { + if (opts->user) { s->user = g_strdup(opts->user); } else { s->user = g_strdup(g_get_user_name()); diff --git a/block/stream.c b/block/stream.c index 694709bd25..8744ad103f 100644 --- a/block/stream.c +++ b/block/stream.c @@ -64,13 +64,16 @@ static int stream_prepare(Job *job) bdrv_cor_filter_drop(s->cor_filter_bs); s->cor_filter_bs = NULL; - bdrv_subtree_drained_begin(s->above_base); + /* + * bdrv_set_backing_hd() requires that unfiltered_bs is drained. Drain + * already here and use bdrv_set_backing_hd_drained() instead because + * the polling during drained_begin() might change the graph, and if we do + * this only later, we may end up working with the wrong base node (or it + * might even have gone away by the time we want to use it). + */ + bdrv_drained_begin(unfiltered_bs); base = bdrv_filter_or_cow_bs(s->above_base); - if (base) { - bdrv_ref(base); - } - unfiltered_base = bdrv_skip_filters(base); if (bdrv_cow_child(unfiltered_bs)) { @@ -82,7 +85,13 @@ static int stream_prepare(Job *job) } } - bdrv_set_backing_hd(unfiltered_bs, base, &local_err); + bdrv_set_backing_hd_drained(unfiltered_bs, base, &local_err); + + /* + * This call will do I/O, so the graph can change again from here on. + * We have already completed the graph change, so we are not in danger + * of operating on the wrong node any more if this happens. + */ ret = bdrv_change_backing_file(unfiltered_bs, base_id, base_fmt, false); if (local_err) { error_report_err(local_err); @@ -92,10 +101,7 @@ static int stream_prepare(Job *job) } out: - if (base) { - bdrv_unref(base); - } - bdrv_subtree_drained_end(s->above_base); + bdrv_drained_end(unfiltered_bs); return ret; } diff --git a/block/throttle.c b/block/throttle.c index 131eba3ab4..88851c84f4 100644 --- a/block/throttle.c +++ b/block/throttle.c @@ -214,7 +214,7 @@ static void throttle_reopen_abort(BDRVReopenState *reopen_state) reopen_state->opaque = NULL; } -static void coroutine_fn throttle_co_drain_begin(BlockDriverState *bs) +static void throttle_drain_begin(BlockDriverState *bs) { ThrottleGroupMember *tgm = bs->opaque; if (qatomic_fetch_inc(&tgm->io_limits_disabled) == 0) { @@ -222,7 +222,7 @@ static void coroutine_fn throttle_co_drain_begin(BlockDriverState *bs) } } -static void coroutine_fn throttle_co_drain_end(BlockDriverState *bs) +static void throttle_drain_end(BlockDriverState *bs) { ThrottleGroupMember *tgm = bs->opaque; assert(tgm->io_limits_disabled); @@ -261,8 +261,8 @@ static BlockDriver bdrv_throttle = { .bdrv_reopen_commit = throttle_reopen_commit, .bdrv_reopen_abort = throttle_reopen_abort, - .bdrv_co_drain_begin = throttle_co_drain_begin, - .bdrv_co_drain_end = throttle_co_drain_end, + .bdrv_drain_begin = throttle_drain_begin, + .bdrv_drain_end = throttle_drain_end, .is_filter = true, .strong_runtime_opts = throttle_strong_runtime_opts, diff --git a/block/vdi.c b/block/vdi.c index c0c111c4b9..479bcfe820 100644 --- a/block/vdi.c +++ b/block/vdi.c @@ -934,7 +934,7 @@ static int coroutine_fn vdi_co_create_opts(BlockDriver *drv, qdict = qemu_opts_to_qdict_filtered(opts, NULL, &vdi_create_opts, true); /* Create and open the file (protocol layer) */ - ret = bdrv_create_file(filename, opts, errp); + ret = bdrv_co_create_file(filename, opts, errp); if (ret < 0) { goto done; } diff --git a/block/vhdx.c b/block/vhdx.c index bad9ca691b..4c929800fe 100644 --- a/block/vhdx.c +++ b/block/vhdx.c @@ -2084,7 +2084,7 @@ static int coroutine_fn vhdx_co_create_opts(BlockDriver *drv, } /* Create and open the file (protocol layer) */ - ret = bdrv_create_file(filename, opts, errp); + ret = bdrv_co_create_file(filename, opts, errp); if (ret < 0) { goto fail; } diff --git a/block/vmdk.c b/block/vmdk.c index 26376352b9..8894dac2d4 100644 --- a/block/vmdk.c +++ b/block/vmdk.c @@ -2285,15 +2285,16 @@ exit: return ret; } -static int vmdk_create_extent(const char *filename, int64_t filesize, - bool flat, bool compress, bool zeroed_grain, - BlockBackend **pbb, - QemuOpts *opts, Error **errp) +static int coroutine_fn vmdk_create_extent(const char *filename, + int64_t filesize, bool flat, + bool compress, bool zeroed_grain, + BlockBackend **pbb, + QemuOpts *opts, Error **errp) { int ret; BlockBackend *blk = NULL; - ret = bdrv_create_file(filename, opts, errp); + ret = bdrv_co_create_file(filename, opts, errp); if (ret < 0) { goto exit; } @@ -2366,14 +2367,14 @@ static int filename_decompose(const char *filename, char *path, char *prefix, * non-split format. * idx >= 1: get the n-th extent if in a split subformat */ -typedef BlockBackend *(*vmdk_create_extent_fn)(int64_t size, - int idx, - bool flat, - bool split, - bool compress, - bool zeroed_grain, - void *opaque, - Error **errp); +typedef BlockBackend * coroutine_fn (*vmdk_create_extent_fn)(int64_t size, + int idx, + bool flat, + bool split, + bool compress, + bool zeroed_grain, + void *opaque, + Error **errp); static void vmdk_desc_add_extent(GString *desc, const char *extent_line_fmt, @@ -2616,7 +2617,7 @@ typedef struct { QemuOpts *opts; } VMDKCreateOptsData; -static BlockBackend *vmdk_co_create_opts_cb(int64_t size, int idx, +static BlockBackend * coroutine_fn vmdk_co_create_opts_cb(int64_t size, int idx, bool flat, bool split, bool compress, bool zeroed_grain, void *opaque, Error **errp) @@ -2768,10 +2769,11 @@ exit: return ret; } -static BlockBackend *vmdk_co_create_cb(int64_t size, int idx, - bool flat, bool split, bool compress, - bool zeroed_grain, void *opaque, - Error **errp) +static BlockBackend * coroutine_fn vmdk_co_create_cb(int64_t size, int idx, + bool flat, bool split, + bool compress, + bool zeroed_grain, + void *opaque, Error **errp) { int ret; BlockDriverState *bs; @@ -2821,7 +2823,6 @@ static BlockBackend *vmdk_co_create_cb(int64_t size, int idx, static int coroutine_fn vmdk_co_create(BlockdevCreateOptions *create_options, Error **errp) { - int ret; BlockdevCreateOptionsVmdk *opts; opts = &create_options->u.vmdk; @@ -2829,24 +2830,19 @@ static int coroutine_fn vmdk_co_create(BlockdevCreateOptions *create_options, /* Validate options */ if (!QEMU_IS_ALIGNED(opts->size, BDRV_SECTOR_SIZE)) { error_setg(errp, "Image size must be a multiple of 512 bytes"); - ret = -EINVAL; - goto out; + return -EINVAL; } - ret = vmdk_co_do_create(opts->size, - opts->subformat, - opts->adapter_type, - opts->backing_file, - opts->hwversion, - opts->toolsversion, - false, - opts->zeroed_grain, - vmdk_co_create_cb, - opts, errp); - return ret; - -out: - return ret; + return vmdk_co_do_create(opts->size, + opts->subformat, + opts->adapter_type, + opts->backing_file, + opts->hwversion, + opts->toolsversion, + false, + opts->zeroed_grain, + vmdk_co_create_cb, + opts, errp); } static void vmdk_close(BlockDriverState *bs) diff --git a/block/vpc.c b/block/vpc.c index 95841f259a..6ee95dcb96 100644 --- a/block/vpc.c +++ b/block/vpc.c @@ -1111,7 +1111,7 @@ static int coroutine_fn vpc_co_create_opts(BlockDriver *drv, } /* Create and open the file (protocol layer) */ - ret = bdrv_create_file(filename, opts, errp); + ret = bdrv_co_create_file(filename, opts, errp); if (ret < 0) { goto fail; } diff --git a/blockdev-nbd.c b/blockdev-nbd.c index 012256bb02..213012435f 100644 --- a/blockdev-nbd.c +++ b/blockdev-nbd.c @@ -173,8 +173,8 @@ void nbd_server_start_options(NbdServerOptions *arg, Error **errp) } void qmp_nbd_server_start(SocketAddressLegacy *addr, - bool has_tls_creds, const char *tls_creds, - bool has_tls_authz, const char *tls_authz, + const char *tls_creds, + const char *tls_authz, bool has_max_connections, uint32_t max_connections, Error **errp) { @@ -200,8 +200,7 @@ void qmp_nbd_server_add(NbdServerAddOptions *arg, Error **errp) * block-export-add would default to the node-name, but we may have to use * the device name as a default here for compatibility. */ - if (!arg->has_name) { - arg->has_name = true; + if (!arg->name) { arg->name = g_strdup(arg->device); } @@ -215,7 +214,7 @@ void qmp_nbd_server_add(NbdServerAddOptions *arg, Error **errp) }; QAPI_CLONE_MEMBERS(BlockExportOptionsNbdBase, &export_opts->u.nbd, qapi_NbdServerAddOptions_base(arg)); - if (arg->has_bitmap) { + if (arg->bitmap) { BlockDirtyBitmapOrStr *el = g_new(BlockDirtyBitmapOrStr, 1); *el = (BlockDirtyBitmapOrStr) { diff --git a/blockdev.c b/blockdev.c index 3f1dec6242..ebf952cd21 100644 --- a/blockdev.c +++ b/blockdev.c @@ -1048,26 +1048,20 @@ static void blockdev_do_action(TransactionAction *action, Error **errp) list.value = action; list.next = NULL; - qmp_transaction(&list, false, NULL, errp); + qmp_transaction(&list, NULL, errp); } -void qmp_blockdev_snapshot_sync(bool has_device, const char *device, - bool has_node_name, const char *node_name, +void qmp_blockdev_snapshot_sync(const char *device, const char *node_name, const char *snapshot_file, - bool has_snapshot_node_name, const char *snapshot_node_name, - bool has_format, const char *format, + const char *format, bool has_mode, NewImageMode mode, Error **errp) { BlockdevSnapshotSync snapshot = { - .has_device = has_device, .device = (char *) device, - .has_node_name = has_node_name, .node_name = (char *) node_name, .snapshot_file = (char *) snapshot_file, - .has_snapshot_node_name = has_snapshot_node_name, .snapshot_node_name = (char *) snapshot_node_name, - .has_format = has_format, .format = (char *) format, .has_mode = has_mode, .mode = mode, @@ -1109,9 +1103,7 @@ void qmp_blockdev_snapshot_internal_sync(const char *device, } SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device, - bool has_id, const char *id, - bool has_name, const char *name, Error **errp) { @@ -1129,14 +1121,6 @@ SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device, aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); - if (!has_id) { - id = NULL; - } - - if (!has_name) { - name = NULL; - } - if (!id && !name) { error_setg(errp, "Name or id must be provided"); goto out_aio_context; @@ -1450,8 +1434,8 @@ static void external_snapshot_prepare(BlkActionState *common, case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC: { BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync.data; - device = s->has_device ? s->device : NULL; - node_name = s->has_node_name ? s->node_name : NULL; + device = s->device; + node_name = s->node_name; new_image_file = s->snapshot_file; snapshot_ref = NULL; } @@ -1495,10 +1479,9 @@ static void external_snapshot_prepare(BlkActionState *common, if (action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC) { BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync.data; - const char *format = s->has_format ? s->format : "qcow2"; + const char *format = s->format ?: "qcow2"; enum NewImageMode mode; - const char *snapshot_node_name = - s->has_snapshot_node_name ? s->snapshot_node_name : NULL; + const char *snapshot_node_name = s->snapshot_node_name; if (node_name && !snapshot_node_name) { error_setg(errp, "New overlay node-name missing"); @@ -1524,10 +1507,14 @@ static void external_snapshot_prepare(BlkActionState *common, goto out; } bdrv_refresh_filename(state->old_bs); + + aio_context_release(aio_context); bdrv_img_create(new_image_file, format, state->old_bs->filename, state->old_bs->drv->format_name, NULL, size, flags, false, &local_err); + aio_context_acquire(aio_context); + if (local_err) { error_propagate(errp, local_err); goto out; @@ -1686,6 +1673,7 @@ static void drive_backup_prepare(BlkActionState *common, Error **errp) BlockDriverState *source = NULL; AioContext *aio_context; AioContext *old_context; + const char *format; QDict *options; Error *local_err = NULL; int flags; @@ -1717,9 +1705,9 @@ static void drive_backup_prepare(BlkActionState *common, Error **errp) /* Paired with .clean() */ bdrv_drained_begin(bs); - if (!backup->has_format) { - backup->format = backup->mode == NEW_IMAGE_MODE_EXISTING ? - NULL : (char *) bs->drv->format_name; + format = backup->format; + if (!format && backup->mode != NEW_IMAGE_MODE_EXISTING) { + format = bs->drv->format_name; } /* Early check to avoid creating target */ @@ -1758,19 +1746,19 @@ static void drive_backup_prepare(BlkActionState *common, Error **errp) } if (backup->mode != NEW_IMAGE_MODE_EXISTING) { - assert(backup->format); + assert(format); if (source) { /* Implicit filters should not appear in the filename */ BlockDriverState *explicit_backing = bdrv_skip_implicit_filters(source); bdrv_refresh_filename(explicit_backing); - bdrv_img_create(backup->target, backup->format, + bdrv_img_create(backup->target, format, explicit_backing->filename, explicit_backing->drv->format_name, NULL, size, flags, false, &local_err); } else { - bdrv_img_create(backup->target, backup->format, NULL, NULL, NULL, + bdrv_img_create(backup->target, format, NULL, NULL, NULL, size, flags, false, &local_err); } } @@ -1783,8 +1771,8 @@ static void drive_backup_prepare(BlkActionState *common, Error **errp) options = qdict_new(); qdict_put_str(options, "discard", "unmap"); qdict_put_str(options, "detect-zeroes", "unmap"); - if (backup->format) { - qdict_put_str(options, "driver", backup->format); + if (format) { + qdict_put_str(options, "driver", format); } target_bs = bdrv_open(backup->target, NULL, options, flags, errp); @@ -2305,11 +2293,11 @@ static TransactionProperties *get_transaction_properties( * Always run under BQL. */ void qmp_transaction(TransactionActionList *dev_list, - bool has_props, struct TransactionProperties *props, Error **errp) { TransactionActionList *dev_entry = dev_list; + bool has_props = !!props; JobTxn *block_job_txn = NULL; BlkActionState *state, *next; Error *local_err = NULL; @@ -2411,8 +2399,7 @@ BlockDirtyBitmapSha256 *qmp_x_debug_block_dirty_bitmap_sha256(const char *node, return ret; } -void coroutine_fn qmp_block_resize(bool has_device, const char *device, - bool has_node_name, const char *node_name, +void coroutine_fn qmp_block_resize(const char *device, const char *node_name, int64_t size, Error **errp) { Error *local_err = NULL; @@ -2420,9 +2407,7 @@ void coroutine_fn qmp_block_resize(bool has_device, const char *device, BlockDriverState *bs; AioContext *old_ctx; - bs = bdrv_lookup_bs(has_device ? device : NULL, - has_node_name ? node_name : NULL, - &local_err); + bs = bdrv_lookup_bs(device, node_name, &local_err); if (local_err) { error_propagate(errp, local_err); return; @@ -2457,14 +2442,14 @@ void coroutine_fn qmp_block_resize(bool has_device, const char *device, bdrv_co_unlock(bs); } -void qmp_block_stream(bool has_job_id, const char *job_id, const char *device, - bool has_base, const char *base, - bool has_base_node, const char *base_node, - bool has_backing_file, const char *backing_file, - bool has_bottom, const char *bottom, +void qmp_block_stream(const char *job_id, const char *device, + const char *base, + const char *base_node, + const char *backing_file, + const char *bottom, bool has_speed, int64_t speed, bool has_on_error, BlockdevOnError on_error, - bool has_filter_node_name, const char *filter_node_name, + const char *filter_node_name, bool has_auto_finalize, bool auto_finalize, bool has_auto_dismiss, bool auto_dismiss, Error **errp) @@ -2476,19 +2461,19 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device, Error *local_err = NULL; int job_flags = JOB_DEFAULT; - if (has_base && has_base_node) { + if (base && base_node) { error_setg(errp, "'base' and 'base-node' cannot be specified " "at the same time"); return; } - if (has_base && has_bottom) { + if (base && bottom) { error_setg(errp, "'base' and 'bottom' cannot be specified " "at the same time"); return; } - if (has_bottom && has_base_node) { + if (bottom && base_node) { error_setg(errp, "'bottom' and 'base-node' cannot be specified " "at the same time"); return; @@ -2506,7 +2491,7 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device, aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); - if (has_base) { + if (base) { base_bs = bdrv_find_backing_image(bs, base); if (base_bs == NULL) { error_setg(errp, "Can't find '%s' in the backing chain", base); @@ -2515,7 +2500,7 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device, assert(bdrv_get_aio_context(base_bs) == aio_context); } - if (has_base_node) { + if (base_node) { base_bs = bdrv_lookup_bs(NULL, base_node, errp); if (!base_bs) { goto out; @@ -2529,7 +2514,7 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device, bdrv_refresh_filename(base_bs); } - if (has_bottom) { + if (bottom) { bottom_bs = bdrv_lookup_bs(NULL, bottom, errp); if (!bottom_bs) { goto out; @@ -2554,7 +2539,7 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device, /* * Check for op blockers in the whole chain between bs and base (or bottom) */ - iter_end = has_bottom ? bdrv_filter_or_cow_bs(bottom_bs) : base_bs; + iter_end = bottom ? bdrv_filter_or_cow_bs(bottom_bs) : base_bs; for (iter = bs; iter && iter != iter_end; iter = bdrv_filter_or_cow_bs(iter)) { @@ -2565,7 +2550,7 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device, /* if we are streaming the entire chain, the result will have no backing * file, and specifying one is therefore an error */ - if (base_bs == NULL && has_backing_file) { + if (!base_bs && backing_file) { error_setg(errp, "backing file specified, but streaming the " "entire chain"); goto out; @@ -2578,7 +2563,7 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device, job_flags |= JOB_MANUAL_DISMISS; } - stream_start(has_job_id ? job_id : NULL, bs, base_bs, backing_file, + stream_start(job_id, bs, base_bs, backing_file, bottom_bs, job_flags, has_speed ? speed : 0, on_error, filter_node_name, &local_err); if (local_err) { @@ -2592,15 +2577,15 @@ out: aio_context_release(aio_context); } -void qmp_block_commit(bool has_job_id, const char *job_id, const char *device, - bool has_base_node, const char *base_node, - bool has_base, const char *base, - bool has_top_node, const char *top_node, - bool has_top, const char *top, - bool has_backing_file, const char *backing_file, +void qmp_block_commit(const char *job_id, const char *device, + const char *base_node, + const char *base, + const char *top_node, + const char *top, + const char *backing_file, bool has_speed, int64_t speed, bool has_on_error, BlockdevOnError on_error, - bool has_filter_node_name, const char *filter_node_name, + const char *filter_node_name, bool has_auto_finalize, bool auto_finalize, bool has_auto_dismiss, bool auto_dismiss, Error **errp) @@ -2619,9 +2604,6 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device, if (!has_on_error) { on_error = BLOCKDEV_ON_ERROR_REPORT; } - if (!has_filter_node_name) { - filter_node_name = NULL; - } if (has_auto_finalize && !auto_finalize) { job_flags |= JOB_MANUAL_FINALIZE; } @@ -2657,10 +2639,10 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device, /* default top_bs is the active layer */ top_bs = bs; - if (has_top_node && has_top) { + if (top_node && top) { error_setg(errp, "'top-node' and 'top' are mutually exclusive"); goto out; - } else if (has_top_node) { + } else if (top_node) { top_bs = bdrv_lookup_bs(NULL, top_node, errp); if (top_bs == NULL) { goto out; @@ -2670,7 +2652,7 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device, top_node); goto out; } - } else if (has_top && top) { + } else if (top) { /* This strcmp() is just a shortcut, there is no need to * refresh @bs's filename. If it mismatches, * bdrv_find_backing_image() will do the refresh and may still @@ -2687,10 +2669,10 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device, assert(bdrv_get_aio_context(top_bs) == aio_context); - if (has_base_node && has_base) { + if (base_node && base) { error_setg(errp, "'base-node' and 'base' are mutually exclusive"); goto out; - } else if (has_base_node) { + } else if (base_node) { base_bs = bdrv_lookup_bs(NULL, base_node, errp); if (base_bs == NULL) { goto out; @@ -2700,7 +2682,7 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device, base_node); goto out; } - } else if (has_base && base) { + } else if (base) { base_bs = bdrv_find_backing_image(top_bs, base); if (base_bs == NULL) { error_setg(errp, "Can't find '%s' in the backing chain", base); @@ -2742,7 +2724,7 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device, if (top_perm & BLK_PERM_WRITE || bdrv_skip_filters(top_bs) == bdrv_skip_filters(bs)) { - if (has_backing_file) { + if (backing_file) { if (bdrv_skip_filters(top_bs) == bdrv_skip_filters(bs)) { error_setg(errp, "'backing-file' specified," " but 'top' is the active layer"); @@ -2752,7 +2734,7 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device, } goto out; } - if (!has_job_id) { + if (!job_id) { /* * Emulate here what block_job_create() does, because it * is possible that @bs != @top_bs (the block job should @@ -2768,8 +2750,8 @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device, if (bdrv_op_is_blocked(overlay_bs, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) { goto out; } - commit_start(has_job_id ? job_id : NULL, bs, base_bs, top_bs, job_flags, - speed, on_error, has_backing_file ? backing_file : NULL, + commit_start(job_id, bs, base_bs, top_bs, job_flags, + speed, on_error, backing_file, filter_node_name, &local_err); } if (local_err != NULL) { @@ -2802,9 +2784,6 @@ static BlockJob *do_backup_common(BackupCommon *backup, if (!backup->has_on_target_error) { backup->on_target_error = BLOCKDEV_ON_ERROR_REPORT; } - if (!backup->has_job_id) { - backup->job_id = NULL; - } if (!backup->has_auto_finalize) { backup->auto_finalize = true; } @@ -2830,7 +2809,7 @@ static BlockJob *do_backup_common(BackupCommon *backup, if ((backup->sync == MIRROR_SYNC_MODE_BITMAP) || (backup->sync == MIRROR_SYNC_MODE_INCREMENTAL)) { /* done before desugaring 'incremental' to print the right message */ - if (!backup->has_bitmap) { + if (!backup->bitmap) { error_setg(errp, "must provide a valid bitmap name for " "'%s' sync mode", MirrorSyncMode_str(backup->sync)); return NULL; @@ -2851,7 +2830,7 @@ static BlockJob *do_backup_common(BackupCommon *backup, backup->bitmap_mode = BITMAP_SYNC_MODE_ON_SUCCESS; } - if (backup->has_bitmap) { + if (backup->bitmap) { bmap = bdrv_find_dirty_bitmap(bs, backup->bitmap); if (!bmap) { error_setg(errp, "Bitmap '%s' could not be found", backup->bitmap); @@ -2884,7 +2863,7 @@ static BlockJob *do_backup_common(BackupCommon *backup, } } - if (!backup->has_bitmap && backup->has_bitmap_mode) { + if (!backup->bitmap && backup->has_bitmap_mode) { error_setg(errp, "Cannot specify bitmap sync mode without a bitmap"); return NULL; } @@ -2944,7 +2923,7 @@ void qmp_blockdev_backup(BlockdevBackup *backup, Error **errp) **/ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs, BlockDriverState *target, - bool has_replaces, const char *replaces, + const char *replaces, enum MirrorSyncMode sync, BlockMirrorBackingMode backing_mode, bool zero_target, @@ -2956,7 +2935,6 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs, bool has_on_target_error, BlockdevOnError on_target_error, bool has_unmap, bool unmap, - bool has_filter_node_name, const char *filter_node_name, bool has_copy_mode, MirrorCopyMode copy_mode, bool has_auto_finalize, bool auto_finalize, @@ -2984,9 +2962,6 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs, if (!has_unmap) { unmap = true; } - if (!has_filter_node_name) { - filter_node_name = NULL; - } if (!has_copy_mode) { copy_mode = MIRROR_COPY_MODE_BACKGROUND; } @@ -3019,16 +2994,15 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs, sync = MIRROR_SYNC_MODE_FULL; } - if (!has_replaces) { + if (!replaces) { /* We want to mirror from @bs, but keep implicit filters on top */ unfiltered_bs = bdrv_skip_implicit_filters(bs); if (unfiltered_bs != bs) { replaces = unfiltered_bs->node_name; - has_replaces = true; } } - if (has_replaces) { + if (replaces) { BlockDriverState *to_replace_bs; AioContext *replace_aio_context; int64_t bs_size, replace_size; @@ -3065,7 +3039,7 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs, * and will allow to check whether the node still exist at mirror completion */ mirror_start(job_id, bs, target, - has_replaces ? replaces : NULL, job_flags, + replaces, job_flags, speed, granularity, buf_size, sync, backing_mode, zero_target, on_source_error, on_target_error, unmap, filter_node_name, copy_mode, errp); @@ -3103,7 +3077,7 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) arg->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS; } - if (!arg->has_format) { + if (!arg->format) { format = (arg->mode == NEW_IMAGE_MODE_EXISTING ? NULL : bs->drv->format_name); } @@ -3123,8 +3097,8 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) goto out; } - if (arg->has_replaces) { - if (!arg->has_node_name) { + if (arg->replaces) { + if (!arg->node_name) { error_setg(errp, "a node-name must be provided when replacing a" " named node of the graph"); goto out; @@ -3174,7 +3148,7 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) } options = qdict_new(); - if (arg->has_node_name) { + if (arg->node_name) { qdict_put_str(options, "node-name", arg->node_name); } if (format) { @@ -3209,8 +3183,8 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) aio_context_release(old_context); aio_context_acquire(aio_context); - blockdev_mirror_common(arg->has_job_id ? arg->job_id : NULL, bs, target_bs, - arg->has_replaces, arg->replaces, arg->sync, + blockdev_mirror_common(arg->job_id, bs, target_bs, + arg->replaces, arg->sync, backing_mode, zero_target, arg->has_speed, arg->speed, arg->has_granularity, arg->granularity, @@ -3218,7 +3192,7 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) arg->has_on_source_error, arg->on_source_error, arg->has_on_target_error, arg->on_target_error, arg->has_unmap, arg->unmap, - false, NULL, + NULL, arg->has_copy_mode, arg->copy_mode, arg->has_auto_finalize, arg->auto_finalize, arg->has_auto_dismiss, arg->auto_dismiss, @@ -3228,9 +3202,9 @@ out: aio_context_release(aio_context); } -void qmp_blockdev_mirror(bool has_job_id, const char *job_id, +void qmp_blockdev_mirror(const char *job_id, const char *device, const char *target, - bool has_replaces, const char *replaces, + const char *replaces, MirrorSyncMode sync, bool has_speed, int64_t speed, bool has_granularity, uint32_t granularity, @@ -3239,7 +3213,6 @@ void qmp_blockdev_mirror(bool has_job_id, const char *job_id, BlockdevOnError on_source_error, bool has_on_target_error, BlockdevOnError on_target_error, - bool has_filter_node_name, const char *filter_node_name, bool has_copy_mode, MirrorCopyMode copy_mode, bool has_auto_finalize, bool auto_finalize, @@ -3280,15 +3253,14 @@ void qmp_blockdev_mirror(bool has_job_id, const char *job_id, goto out; } - blockdev_mirror_common(has_job_id ? job_id : NULL, bs, target_bs, - has_replaces, replaces, sync, backing_mode, + blockdev_mirror_common(job_id, bs, target_bs, + replaces, sync, backing_mode, zero_target, has_speed, speed, has_granularity, granularity, has_buf_size, buf_size, has_on_source_error, on_source_error, has_on_target_error, on_target_error, - true, true, - has_filter_node_name, filter_node_name, + true, true, filter_node_name, has_copy_mode, copy_mode, has_auto_finalize, auto_finalize, has_auto_dismiss, auto_dismiss, @@ -3547,8 +3519,6 @@ fail: void qmp_blockdev_reopen(BlockdevOptionsList *reopen_list, Error **errp) { BlockReopenQueue *queue = NULL; - GSList *drained = NULL; - GSList *p; /* Add each one of the BDS that we want to reopen to the queue */ for (; reopen_list != NULL; reopen_list = reopen_list->next) { @@ -3560,7 +3530,7 @@ void qmp_blockdev_reopen(BlockdevOptionsList *reopen_list, Error **errp) QDict *qdict; /* Check for the selected node name */ - if (!options->has_node_name) { + if (!options->node_name) { error_setg(errp, "node-name not specified"); goto fail; } @@ -3585,9 +3555,7 @@ void qmp_blockdev_reopen(BlockdevOptionsList *reopen_list, Error **errp) ctx = bdrv_get_aio_context(bs); aio_context_acquire(ctx); - bdrv_subtree_drained_begin(bs); queue = bdrv_reopen_queue(queue, bs, qdict, false); - drained = g_slist_prepend(drained, bs); aio_context_release(ctx); } @@ -3598,15 +3566,6 @@ void qmp_blockdev_reopen(BlockdevOptionsList *reopen_list, Error **errp) fail: bdrv_reopen_queue_free(queue); - for (p = drained; p; p = p->next) { - BlockDriverState *bs = p->data; - AioContext *ctx = bdrv_get_aio_context(bs); - - aio_context_acquire(ctx); - bdrv_subtree_drained_end(bs); - aio_context_release(ctx); - } - g_slist_free(drained); } void qmp_blockdev_del(const char *node_name, Error **errp) @@ -3665,8 +3624,7 @@ static BdrvChild *bdrv_find_child(BlockDriverState *parent_bs, return NULL; } -void qmp_x_blockdev_change(const char *parent, bool has_child, - const char *child, bool has_node, +void qmp_x_blockdev_change(const char *parent, const char *child, const char *node, Error **errp) { BlockDriverState *parent_bs, *new_bs = NULL; @@ -3677,8 +3635,8 @@ void qmp_x_blockdev_change(const char *parent, bool has_child, return; } - if (has_child == has_node) { - if (has_child) { + if (!child == !node) { + if (child) { error_setg(errp, "The parameters child and node are in conflict"); } else { error_setg(errp, "Either child or node must be specified"); @@ -3686,7 +3644,7 @@ void qmp_x_blockdev_change(const char *parent, bool has_child, return; } - if (has_child) { + if (child) { p_child = bdrv_find_child(parent_bs, child); if (!p_child) { error_setg(errp, "Node '%s' does not have child '%s'", @@ -3696,7 +3654,7 @@ void qmp_x_blockdev_change(const char *parent, bool has_child, bdrv_del_child(parent_bs, p_child, errp); } - if (has_node) { + if (node) { new_bs = bdrv_find_node(node); if (!new_bs) { error_setg(errp, "Node '%s' not found", node); diff --git a/blockjob.c b/blockjob.c index f51d4e18f3..b7daf2a9f6 100644 --- a/blockjob.c +++ b/blockjob.c @@ -120,7 +120,7 @@ static bool child_job_drained_poll(BdrvChild *c) } } -static void child_job_drained_end(BdrvChild *c, int *drained_end_counter) +static void child_job_drained_end(BdrvChild *c) { BlockJob *job = c->opaque; job_resume(&job->job); @@ -354,7 +354,6 @@ BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp) info->auto_finalize = job->job.auto_finalize; info->auto_dismiss = job->job.auto_dismiss; if (job->job.ret) { - info->has_error = true; info->error = job->job.err ? g_strdup(error_get_pretty(job->job.err)) : g_strdup(strerror(-job->job.ret)); @@ -414,7 +413,6 @@ static void block_job_event_completed_locked(Notifier *n, void *opaque) progress_total, progress_current, job->speed, - !!msg, msg); } diff --git a/bsd-user/elfload.c b/bsd-user/elfload.c index f8edb22f2a..fbcdc94b96 100644 --- a/bsd-user/elfload.c +++ b/bsd-user/elfload.c @@ -156,7 +156,7 @@ static abi_ulong copy_elf_strings(int argc, char **argv, void **page, --p; --tmp; --len; if (--offset < 0) { offset = p % TARGET_PAGE_SIZE; - pag = (char *)page[p / TARGET_PAGE_SIZE]; + pag = page[p / TARGET_PAGE_SIZE]; if (!pag) { pag = g_try_malloc0(TARGET_PAGE_SIZE); page[p / TARGET_PAGE_SIZE] = pag; diff --git a/chardev/char-file.c b/chardev/char-file.c index 2fd80707e5..3a7b9caf6f 100644 --- a/chardev/char-file.c +++ b/chardev/char-file.c @@ -45,7 +45,7 @@ static void qmp_chardev_open_file(Chardev *chr, DWORD accessmode; DWORD flags; - if (file->has_in) { + if (file->in) { error_setg(errp, "input file not supported"); return; } @@ -83,7 +83,7 @@ static void qmp_chardev_open_file(Chardev *chr, return; } - if (file->has_in) { + if (file->in) { flags = O_RDONLY; in = qmp_chardev_open_file_source(file->in, flags, errp); if (in < 0) { diff --git a/chardev/char-socket.c b/chardev/char-socket.c index 879564aa8a..29ffe5075e 100644 --- a/chardev/char-socket.c +++ b/chardev/char-socket.c @@ -1251,7 +1251,7 @@ static bool qmp_chardev_validate_socket(ChardevSocket *sock, "'fd' address type"); return false; } - if (sock->has_tls_creds && + if (sock->tls_creds && !(sock->has_server && sock->server)) { error_setg(errp, "'tls_creds' option is incompatible with " @@ -1261,7 +1261,7 @@ static bool qmp_chardev_validate_socket(ChardevSocket *sock, break; case SOCKET_ADDRESS_TYPE_UNIX: - if (sock->has_tls_creds) { + if (sock->tls_creds) { error_setg(errp, "'tls_creds' option is incompatible with " "'unix' address type"); @@ -1273,7 +1273,7 @@ static bool qmp_chardev_validate_socket(ChardevSocket *sock, break; case SOCKET_ADDRESS_TYPE_VSOCK: - if (sock->has_tls_creds) { + if (sock->tls_creds) { error_setg(errp, "'tls_creds' option is incompatible with " "'vsock' address type"); @@ -1284,7 +1284,7 @@ static bool qmp_chardev_validate_socket(ChardevSocket *sock, break; } - if (sock->has_tls_authz && !sock->has_tls_creds) { + if (sock->tls_authz && !sock->tls_creds) { error_setg(errp, "'tls_authz' option requires 'tls_creds' option"); return false; } @@ -1465,9 +1465,7 @@ static void qemu_chr_parse_socket(QemuOpts *opts, ChardevBackend *backend, sock->wait = qemu_opt_get_bool(opts, "wait", true); sock->has_reconnect = qemu_opt_find(opts, "reconnect"); sock->reconnect = qemu_opt_get_number(opts, "reconnect", 0); - sock->has_tls_creds = qemu_opt_get(opts, "tls-creds"); sock->tls_creds = g_strdup(qemu_opt_get(opts, "tls-creds")); - sock->has_tls_authz = qemu_opt_get(opts, "tls-authz"); sock->tls_authz = g_strdup(qemu_opt_get(opts, "tls-authz")); addr = g_new0(SocketAddressLegacy, 1); diff --git a/chardev/char-udp.c b/chardev/char-udp.c index 6756e69924..3d9a2d5e77 100644 --- a/chardev/char-udp.c +++ b/chardev/char-udp.c @@ -178,7 +178,6 @@ static void qemu_chr_parse_udp(QemuOpts *opts, ChardevBackend *backend, udp->remote = addr; if (has_local) { - udp->has_local = true; addr = g_new0(SocketAddressLegacy, 1); addr->type = SOCKET_ADDRESS_TYPE_INET; addr->u.inet.data = g_new(InetSocketAddress, 1); diff --git a/chardev/char.c b/chardev/char.c index b005df3ccf..4c5de16402 100644 --- a/chardev/char.c +++ b/chardev/char.c @@ -240,7 +240,7 @@ static void qemu_char_open(Chardev *chr, ChardevBackend *backend, /* Any ChardevCommon member would work */ ChardevCommon *common = backend ? backend->u.null.data : NULL; - if (common && common->has_logfile) { + if (common && common->logfile) { int flags = O_WRONLY; if (common->has_logappend && common->logappend) { @@ -496,9 +496,7 @@ void qemu_chr_parse_common(QemuOpts *opts, ChardevCommon *backend) { const char *logfile = qemu_opt_get(opts, "logfile"); - backend->has_logfile = logfile != NULL; backend->logfile = g_strdup(logfile); - backend->has_logappend = true; backend->logappend = qemu_opt_get_bool(opts, "logappend", false); } @@ -1057,7 +1055,6 @@ ChardevReturn *qmp_chardev_add(const char *id, ChardevBackend *backend, ret = g_new0(ChardevReturn, 1); if (CHARDEV_IS_PTY(chr)) { ret->pty = g_strdup(chr->filename + 4); - ret->has_pty = true; } return ret; @@ -1160,7 +1157,6 @@ ChardevReturn *qmp_chardev_change(const char *id, ChardevBackend *backend, ret = g_new0(ChardevReturn, 1); if (CHARDEV_IS_PTY(chr_new)) { ret->pty = g_strdup(chr_new->filename + 4); - ret->has_pty = true; } return ret; diff --git a/contrib/plugins/cache.c b/contrib/plugins/cache.c index ac1510aaa1..2e25184a7f 100644 --- a/contrib/plugins/cache.c +++ b/contrib/plugins/cache.c @@ -405,7 +405,7 @@ static void vcpu_mem_access(unsigned int vcpu_index, qemu_plugin_meminfo_t info, g_mutex_lock(&l1_dcache_locks[cache_idx]); hit_in_l1 = access_cache(l1_dcaches[cache_idx], effective_addr); if (!hit_in_l1) { - insn = (InsnData *) userdata; + insn = userdata; __atomic_fetch_add(&insn->l1_dmisses, 1, __ATOMIC_SEQ_CST); l1_dcaches[cache_idx]->misses++; } @@ -419,7 +419,7 @@ static void vcpu_mem_access(unsigned int vcpu_index, qemu_plugin_meminfo_t info, g_mutex_lock(&l2_ucache_locks[cache_idx]); if (!access_cache(l2_ucaches[cache_idx], effective_addr)) { - insn = (InsnData *) userdata; + insn = userdata; __atomic_fetch_add(&insn->l2_misses, 1, __ATOMIC_SEQ_CST); l2_ucaches[cache_idx]->misses++; } @@ -440,7 +440,7 @@ static void vcpu_insn_exec(unsigned int vcpu_index, void *userdata) g_mutex_lock(&l1_icache_locks[cache_idx]); hit_in_l1 = access_cache(l1_icaches[cache_idx], insn_addr); if (!hit_in_l1) { - insn = (InsnData *) userdata; + insn = userdata; __atomic_fetch_add(&insn->l1_imisses, 1, __ATOMIC_SEQ_CST); l1_icaches[cache_idx]->misses++; } @@ -454,7 +454,7 @@ static void vcpu_insn_exec(unsigned int vcpu_index, void *userdata) g_mutex_lock(&l2_ucache_locks[cache_idx]); if (!access_cache(l2_ucaches[cache_idx], insn_addr)) { - insn = (InsnData *) userdata; + insn = userdata; __atomic_fetch_add(&insn->l2_misses, 1, __ATOMIC_SEQ_CST); l2_ucaches[cache_idx]->misses++; } diff --git a/contrib/vhost-user-blk/vhost-user-blk.c b/contrib/vhost-user-blk/vhost-user-blk.c index d6932a2645..aa99877fcd 100644 --- a/contrib/vhost-user-blk/vhost-user-blk.c +++ b/contrib/vhost-user-blk/vhost-user-blk.c @@ -193,7 +193,7 @@ vub_discard_write_zeroes(VubReq *req, struct iovec *iov, uint32_t iovcnt, #if defined(__linux__) && defined(BLKDISCARD) && defined(BLKZEROOUT) VubDev *vdev_blk = req->vdev_blk; - desc = (struct virtio_blk_discard_write_zeroes *)buf; + desc = buf; uint64_t range[2] = { le64toh(desc->sector) << 9, le32toh(desc->num_sectors) << 9 }; if (type == VIRTIO_BLK_T_DISCARD) { diff --git a/crypto/block-luks.c b/crypto/block-luks.c index df2b4105d6..ff9e3945d1 100644 --- a/crypto/block-luks.c +++ b/crypto/block-luks.c @@ -1597,13 +1597,13 @@ qcrypto_block_luks_amend_add_keyslot(QCryptoBlock *block, g_autofree char *new_password = NULL; g_autofree uint8_t *master_key = NULL; - char *secret = opts_luks->has_secret ? opts_luks->secret : luks->secret; + char *secret = opts_luks->secret ?: luks->secret; - if (!opts_luks->has_new_secret) { + if (!opts_luks->new_secret) { error_setg(errp, "'new-secret' is required to activate a keyslot"); return -1; } - if (opts_luks->has_old_secret) { + if (opts_luks->old_secret) { error_setg(errp, "'old-secret' must not be given when activating keyslots"); return -1; @@ -1677,7 +1677,7 @@ qcrypto_block_luks_amend_erase_keyslots(QCryptoBlock *block, g_autofree uint8_t *tmpkey = NULL; g_autofree char *old_password = NULL; - if (opts_luks->has_new_secret) { + if (opts_luks->new_secret) { error_setg(errp, "'new-secret' must not be given when erasing keyslots"); return -1; @@ -1687,14 +1687,14 @@ qcrypto_block_luks_amend_erase_keyslots(QCryptoBlock *block, "'iter-time' must not be given when erasing keyslots"); return -1; } - if (opts_luks->has_secret) { + if (opts_luks->secret) { error_setg(errp, "'secret' must not be given when erasing keyslots"); return -1; } /* Load the old password if given */ - if (opts_luks->has_old_secret) { + if (opts_luks->old_secret) { old_password = qcrypto_secret_lookup_as_utf8(opts_luks->old_secret, errp); if (!old_password) { @@ -1719,7 +1719,7 @@ qcrypto_block_luks_amend_erase_keyslots(QCryptoBlock *block, return -1; } - if (opts_luks->has_old_secret) { + if (opts_luks->old_secret) { int rv = qcrypto_block_luks_load_key(block, keyslot, old_password, @@ -1761,7 +1761,7 @@ qcrypto_block_luks_amend_erase_keyslots(QCryptoBlock *block, } /* Erase all keyslots that match the given old password */ - } else if (opts_luks->has_old_secret) { + } else if (opts_luks->old_secret) { unsigned long slots_to_erase_bitmap = 0; size_t i; diff --git a/docs/devel/block-coroutine-wrapper.rst b/docs/devel/block-coroutine-wrapper.rst index 412851986b..6dd2cdcab3 100644 --- a/docs/devel/block-coroutine-wrapper.rst +++ b/docs/devel/block-coroutine-wrapper.rst @@ -26,12 +26,12 @@ called ``bdrv_foo(<same args>)``. In this case the script can help. To trigger the generation: 1. You need ``bdrv_foo`` declaration somewhere (for example, in - ``block/coroutines.h``) with the ``generated_co_wrapper`` mark, + ``block/coroutines.h``) with the ``co_wrapper`` mark, like this: .. code-block:: c - int generated_co_wrapper bdrv_foo(<some args>); + int co_wrapper bdrv_foo(<some args>); 2. You need to feed this declaration to block-coroutine-wrapper script. For this, add the .h (or .c) file with the declaration to the @@ -46,7 +46,7 @@ Links 1. The script location is ``scripts/block-coroutine-wrapper.py``. -2. Generic place for private ``generated_co_wrapper`` declarations is +2. Generic place for private ``co_wrapper`` declarations is ``block/coroutines.h``, for public declarations: ``include/block/block.h`` diff --git a/docs/devel/qapi-code-gen.rst b/docs/devel/qapi-code-gen.rst index cd9b544376..5edc49aa74 100644 --- a/docs/devel/qapi-code-gen.rst +++ b/docs/devel/qapi-code-gen.rst @@ -1357,7 +1357,7 @@ qmp_my_command(); everything else is produced by the generator. :: $ cat example-schema.json { 'struct': 'UserDefOne', - 'data': { 'integer': 'int', '*string': 'str' } } + 'data': { 'integer': 'int', '*string': 'str', '*flag': 'bool' } } { 'command': 'my-command', 'data': { 'arg1': ['UserDefOne'] }, @@ -1410,8 +1410,9 @@ Example:: struct UserDefOne { int64_t integer; - bool has_string; char *string; + bool has_flag; + bool flag; }; void qapi_free_UserDefOne(UserDefOne *obj); @@ -1523,14 +1524,21 @@ Example:: bool visit_type_UserDefOne_members(Visitor *v, UserDefOne *obj, Error **errp) { + bool has_string = !!obj->string; + if (!visit_type_int(v, "integer", &obj->integer, errp)) { return false; } - if (visit_optional(v, "string", &obj->has_string)) { + if (visit_optional(v, "string", &has_string)) { if (!visit_type_str(v, "string", &obj->string, errp)) { return false; } } + if (visit_optional(v, "flag", &obj->has_flag)) { + if (!visit_type_bool(v, "flag", &obj->flag, errp)) { + return false; + } + } return true; } @@ -1664,7 +1672,6 @@ Example:: $ cat qapi-generated/example-qapi-commands.c [Uninteresting stuff omitted...] - static void qmp_marshal_output_UserDefOne(UserDefOne *ret_in, QObject **ret_out, Error **errp) { @@ -1748,7 +1755,7 @@ Example:: QTAILQ_INIT(cmds); qmp_register_command(cmds, "my-command", - qmp_marshal_my_command, QCO_NO_OPTIONS); + qmp_marshal_my_command, 0, 0); } [Uninteresting stuff omitted...] @@ -1917,6 +1924,12 @@ Example:: { "type", QLIT_QSTR("str"), }, {} })), + QLIT_QDICT(((QLitDictEntry[]) { + { "default", QLIT_QNULL, }, + { "name", QLIT_QSTR("flag"), }, + { "type", QLIT_QSTR("bool"), }, + {} + })), {} })), }, { "meta-type", QLIT_QSTR("object"), }, @@ -1950,6 +1963,12 @@ Example:: { "name", QLIT_QSTR("str"), }, {} })), + QLIT_QDICT(((QLitDictEntry[]) { + { "json-type", QLIT_QSTR("boolean"), }, + { "meta-type", QLIT_QSTR("builtin"), }, + { "name", QLIT_QSTR("bool"), }, + {} + })), {} })); diff --git a/docs/devel/writing-monitor-commands.rst b/docs/devel/writing-monitor-commands.rst index 2fefedcd98..2c11e71665 100644 --- a/docs/devel/writing-monitor-commands.rst +++ b/docs/devel/writing-monitor-commands.rst @@ -166,9 +166,9 @@ and user defined types. Now, let's update our C implementation in monitor/qmp-cmds.c:: - void qmp_hello_world(bool has_message, const char *message, Error **errp) + void qmp_hello_world(const char *message, Error **errp) { - if (has_message) { + if (message) { printf("%s\n", message); } else { printf("Hello, world\n"); @@ -210,9 +210,9 @@ file. Basically, most errors are set by calling the error_setg() function. Let's say we don't accept the string "message" to contain the word "love". If it does contain it, we want the "hello-world" command to return an error:: - void qmp_hello_world(bool has_message, const char *message, Error **errp) + void qmp_hello_world(const char *message, Error **errp) { - if (has_message) { + if (message) { if (strstr(message, "love")) { error_setg(errp, "the word 'love' is not allowed"); return; @@ -467,9 +467,9 @@ There are a number of things to be noticed: allocated by the regular g_malloc0() function. Note that we chose to initialize the memory to zero. This is recommended for all QAPI types, as it helps avoiding bad surprises (specially with booleans) -4. Remember that "next_deadline" is optional? All optional members have a - 'has_TYPE_NAME' member that should be properly set by the implementation, - as shown above +4. Remember that "next_deadline" is optional? Non-pointer optional + members have a 'has_TYPE_NAME' member that should be properly set + by the implementation, as shown above 5. Even static strings, such as "alarm_timer->name", should be dynamically allocated by the implementation. This is so because the QAPI also generates a function to free its types and it cannot distinguish between dynamically diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst index e3af79bb8c..b33d7c28dc 100644 --- a/docs/system/arm/emulation.rst +++ b/docs/system/arm/emulation.rst @@ -26,6 +26,7 @@ the following architecture extensions: - FEAT_DoubleFault (Double Fault Extension) - FEAT_E0PD (Preventing EL0 access to halves of address maps) - FEAT_ETS (Enhanced Translation Synchronization) +- FEAT_EVT (Enhanced Virtualization Traps) - FEAT_FCMA (Floating-point complex number instructions) - FEAT_FHM (Floating-point half-precision multiplication instructions) - FEAT_FP16 (Half-precision floating-point data processing) diff --git a/docs/system/arm/virt.rst b/docs/system/arm/virt.rst index 20442ea2c1..1cab33f02e 100644 --- a/docs/system/arm/virt.rst +++ b/docs/system/arm/virt.rst @@ -54,6 +54,7 @@ Supported guest CPU types: - ``cortex-a15`` (32-bit; the default) - ``cortex-a35`` (64-bit) - ``cortex-a53`` (64-bit) +- ``cortex-a55`` (64-bit) - ``cortex-a57`` (64-bit) - ``cortex-a72`` (64-bit) - ``cortex-a76`` (64-bit) @@ -94,6 +95,23 @@ highmem address space above 32 bits. The default is ``on`` for machine types later than ``virt-2.12``. +compact-highmem + Set ``on``/``off`` to enable/disable the compact layout for high memory regions. + The default is ``on`` for machine types later than ``virt-7.2``. + +highmem-redists + Set ``on``/``off`` to enable/disable the high memory region for GICv3 or + GICv4 redistributor. The default is ``on``. Setting this to ``off`` will + limit the maximum number of CPUs when GICv3 or GICv4 is used. + +highmem-ecam + Set ``on``/``off`` to enable/disable the high memory region for PCI ECAM. + The default is ``on`` for machine types later than ``virt-3.0``. + +highmem-mmio + Set ``on``/``off`` to enable/disable the high memory region for PCI MMIO. + The default is ``on``. + gic-version Specify the version of the Generic Interrupt Controller (GIC) to provide. Valid values are: diff --git a/dump/dump.c b/dump/dump.c index df117c847f..279b07f09b 100644 --- a/dump/dump.c +++ b/dump/dump.c @@ -357,7 +357,6 @@ static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s, static void write_elf_phdr_note(DumpState *s, Error **errp) { - ERRP_GUARD(); Elf32_Phdr phdr32; Elf64_Phdr phdr64; void *phdr; @@ -773,7 +772,6 @@ static void dump_iterate(DumpState *s, Error **errp) static void dump_end(DumpState *s, Error **errp) { int rc; - ERRP_GUARD(); if (s->elf_section_data_size) { s->elf_section_data = g_malloc0(s->elf_section_data_size); @@ -2044,8 +2042,8 @@ static void dump_process(DumpState *s, Error **errp) result = qmp_query_dump(NULL); /* should never fail */ assert(result); - qapi_event_send_dump_completed(result, !!*errp, (*errp ? - error_get_pretty(*errp) : NULL)); + qapi_event_send_dump_completed(result, + *errp ? error_get_pretty(*errp) : NULL); qapi_free_DumpQueryResult(result); dump_cleanup(s); diff --git a/hw/9pfs/9p-synth.c b/hw/9pfs/9p-synth.c index 1c5813e4dd..38d787f494 100644 --- a/hw/9pfs/9p-synth.c +++ b/hw/9pfs/9p-synth.c @@ -72,7 +72,6 @@ static V9fsSynthNode *v9fs_add_dir_node(V9fsSynthNode *parent, int mode, int qemu_v9fs_synth_mkdir(V9fsSynthNode *parent, int mode, const char *name, V9fsSynthNode **result) { - int ret; V9fsSynthNode *node, *tmp; if (!synth_fs) { @@ -87,8 +86,7 @@ int qemu_v9fs_synth_mkdir(V9fsSynthNode *parent, int mode, QEMU_LOCK_GUARD(&synth_mutex); QLIST_FOREACH(tmp, &parent->child, sibling) { if (!strcmp(tmp->name, name)) { - ret = EEXIST; - return ret; + return EEXIST; } } /* Add the name */ @@ -98,15 +96,13 @@ int qemu_v9fs_synth_mkdir(V9fsSynthNode *parent, int mode, v9fs_add_dir_node(node, node->attr->mode, ".", node->attr, node->attr->inode); *result = node; - ret = 0; - return ret; + return 0; } int qemu_v9fs_synth_add_file(V9fsSynthNode *parent, int mode, const char *name, v9fs_synth_read read, v9fs_synth_write write, void *arg) { - int ret; V9fsSynthNode *node, *tmp; if (!synth_fs) { @@ -122,8 +118,7 @@ int qemu_v9fs_synth_add_file(V9fsSynthNode *parent, int mode, QEMU_LOCK_GUARD(&synth_mutex); QLIST_FOREACH(tmp, &parent->child, sibling) { if (!strcmp(tmp->name, name)) { - ret = EEXIST; - return ret; + return EEXIST; } } /* Add file type and remove write bits */ @@ -138,8 +133,7 @@ int qemu_v9fs_synth_add_file(V9fsSynthNode *parent, int mode, node->private = arg; pstrcpy(node->name, sizeof(node->name), name); QLIST_INSERT_HEAD_RCU(&parent->child, node, sibling); - ret = 0; - return ret; + return 0; } static void synth_fill_statbuf(V9fsSynthNode *node, struct stat *stbuf) diff --git a/hw/acpi/core.c b/hw/acpi/core.c index 3e811bf03c..6da275c599 100644 --- a/hw/acpi/core.c +++ b/hw/acpi/core.c @@ -185,7 +185,7 @@ static void acpi_table_install(const char unsigned *blob, size_t bloblen, changed_fields = 0; ext_hdr->_length = cpu_to_le16(acpi_payload_size); - if (hdrs->has_sig) { + if (hdrs->sig) { strncpy(ext_hdr->sig, hdrs->sig, sizeof ext_hdr->sig); ++changed_fields; } @@ -204,11 +204,11 @@ static void acpi_table_install(const char unsigned *blob, size_t bloblen, ext_hdr->checksum = 0; - if (hdrs->has_oem_id) { + if (hdrs->oem_id) { strncpy(ext_hdr->oem_id, hdrs->oem_id, sizeof ext_hdr->oem_id); ++changed_fields; } - if (hdrs->has_oem_table_id) { + if (hdrs->oem_table_id) { strncpy(ext_hdr->oem_table_id, hdrs->oem_table_id, sizeof ext_hdr->oem_table_id); ++changed_fields; @@ -217,7 +217,7 @@ static void acpi_table_install(const char unsigned *blob, size_t bloblen, ext_hdr->oem_revision = cpu_to_le32(hdrs->oem_rev); ++changed_fields; } - if (hdrs->has_asl_compiler_id) { + if (hdrs->asl_compiler_id) { strncpy(ext_hdr->asl_compiler_id, hdrs->asl_compiler_id, sizeof ext_hdr->asl_compiler_id); ++changed_fields; @@ -255,12 +255,12 @@ void acpi_table_add(const QemuOpts *opts, Error **errp) if (!hdrs) { goto out; } - if (hdrs->has_file == hdrs->has_data) { + if (!hdrs->file == !hdrs->data) { error_setg(errp, "'-acpitable' requires one of 'data' or 'file'"); goto out; } - pathnames = g_strsplit(hdrs->has_file ? hdrs->file : hdrs->data, ":", 0); + pathnames = g_strsplit(hdrs->file ?: hdrs->data, ":", 0); if (pathnames == NULL || pathnames[0] == NULL) { error_setg(errp, "'-acpitable' requires at least one pathname"); goto out; @@ -297,7 +297,7 @@ void acpi_table_add(const QemuOpts *opts, Error **errp) close(fd); } - acpi_table_install(blob, bloblen, hdrs->has_file, hdrs, errp); + acpi_table_install(blob, bloblen, !!hdrs->file, hdrs, errp); out: g_free(blob); diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c index 3646dbfe68..4e580959a2 100644 --- a/hw/acpi/cpu.c +++ b/hw/acpi/cpu.c @@ -35,7 +35,6 @@ static ACPIOSTInfo *acpi_cpu_device_status(int idx, AcpiCpuStatus *cdev) DeviceState *dev = DEVICE(cdev->cpu); if (dev->id) { info->device = g_strdup(dev->id); - info->has_device = true; } } return info; diff --git a/hw/acpi/memory_hotplug.c b/hw/acpi/memory_hotplug.c index 0a7e89a13e..d926f4f77d 100644 --- a/hw/acpi/memory_hotplug.c +++ b/hw/acpi/memory_hotplug.c @@ -44,7 +44,6 @@ static ACPIOSTInfo *acpi_memory_device_status(int slot, MemStatus *mdev) DeviceState *dev = DEVICE(mdev->dimm); if (dev->id) { info->device = g_strdup(dev->id); - info->has_device = true; } } return info; @@ -186,7 +185,7 @@ static void acpi_memory_hotplug_write(void *opaque, hwaddr addr, uint64_t data, */ qapi_event_send_mem_unplug_error(dev->id ? : "", error_get_pretty(local_err)); - qapi_event_send_device_unplug_guest_error(!!dev->id, dev->id, + qapi_event_send_device_unplug_guest_error(dev->id, dev->canonical_path); error_free(local_err); break; diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c index aecdeb9815..0202bad787 100644 --- a/hw/arm/armsse.c +++ b/hw/arm/armsse.c @@ -900,6 +900,7 @@ static qemu_irq armsse_get_common_irq_in(ARMSSE *s, int irqno) static void armsse_realize(DeviceState *dev, Error **errp) { + ERRP_GUARD(); ARMSSE *s = ARM_SSE(dev); ARMSSEClass *asc = ARM_SSE_GET_CLASS(dev); const ARMSSEInfo *info = asc->info; @@ -914,8 +915,6 @@ static void armsse_realize(DeviceState *dev, Error **errp) DeviceState *dev_splitter; uint32_t addr_width_max; - ERRP_GUARD(); - if (!s->board_memory) { error_setg(errp, "memory property was not set"); return; diff --git a/hw/arm/boot.c b/hw/arm/boot.c index 15c2bf1867..3d7d11f782 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -656,15 +656,17 @@ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo, } if (binfo->initrd_size) { - rc = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start", - binfo->initrd_start); + rc = qemu_fdt_setprop_sized_cells(fdt, "/chosen", "linux,initrd-start", + acells, binfo->initrd_start); if (rc < 0) { fprintf(stderr, "couldn't set /chosen/linux,initrd-start\n"); goto fail; } - rc = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end", - binfo->initrd_start + binfo->initrd_size); + rc = qemu_fdt_setprop_sized_cells(fdt, "/chosen", "linux,initrd-end", + acells, + binfo->initrd_start + + binfo->initrd_size); if (rc < 0) { fprintf(stderr, "couldn't set /chosen/linux,initrd-end\n"); goto fail; diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c index e09b9c13b7..220838525d 100644 --- a/hw/arm/smmu-common.c +++ b/hw/arm/smmu-common.c @@ -526,9 +526,9 @@ static void smmu_base_realize(DeviceState *dev, Error **errp) } } -static void smmu_base_reset(DeviceState *dev) +static void smmu_base_reset_hold(Object *obj) { - SMMUState *s = ARM_SMMU(dev); + SMMUState *s = ARM_SMMU(obj); g_hash_table_remove_all(s->configs); g_hash_table_remove_all(s->iotlb); @@ -543,12 +543,13 @@ static Property smmu_dev_properties[] = { static void smmu_base_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); SMMUBaseClass *sbc = ARM_SMMU_CLASS(klass); device_class_set_props(dc, smmu_dev_properties); device_class_set_parent_realize(dc, smmu_base_realize, &sbc->parent_realize); - dc->reset = smmu_base_reset; + rc->phases.hold = smmu_base_reset_hold; } static const TypeInfo smmu_base_info = { diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index daa80e9c7b..955b89c8d5 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -1431,12 +1431,14 @@ static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev) } } -static void smmu_reset(DeviceState *dev) +static void smmu_reset_hold(Object *obj) { - SMMUv3State *s = ARM_SMMUV3(dev); + SMMUv3State *s = ARM_SMMUV3(obj); SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); - c->parent_reset(dev); + if (c->parent_phases.hold) { + c->parent_phases.hold(obj); + } smmuv3_init_regs(s); } @@ -1520,10 +1522,12 @@ static void smmuv3_instance_init(Object *obj) static void smmuv3_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); SMMUv3Class *c = ARM_SMMUV3_CLASS(klass); dc->vmsd = &vmstate_smmuv3; - device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset); + resettable_class_set_parent_phases(rc, NULL, smmu_reset_hold, NULL, + &c->parent_phases); c->parent_realize = dc->realize; dc->realize = smmu_realize; } diff --git a/hw/arm/virt.c b/hw/arm/virt.c index b871350856..04eb6c201d 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -174,6 +174,12 @@ static const MemMapEntry base_memmap[] = { * Note the extended_memmap is sized so that it eventually also includes the * base_memmap entries (VIRT_HIGH_GIC_REDIST2 index is greater than the last * index of base_memmap). + * + * The memory map for these Highmem IO Regions can be in legacy or compact + * layout, depending on 'compact-highmem' property. With legacy layout, the + * PA space for one specific region is always reserved, even if the region + * has been disabled or doesn't fit into the PA space. However, the PA space + * for the region won't be reserved in these circumstances with compact layout. */ static MemMapEntry extended_memmap[] = { /* Additional 64 MB redist region (can contain up to 512 redistributors) */ @@ -201,6 +207,7 @@ static const char *valid_cpus[] = { ARM_CPU_TYPE_NAME("cortex-a15"), ARM_CPU_TYPE_NAME("cortex-a35"), ARM_CPU_TYPE_NAME("cortex-a53"), + ARM_CPU_TYPE_NAME("cortex-a55"), ARM_CPU_TYPE_NAME("cortex-a57"), ARM_CPU_TYPE_NAME("cortex-a72"), ARM_CPU_TYPE_NAME("cortex-a76"), @@ -1608,9 +1615,11 @@ static void *machvirt_dtb(const struct arm_boot_info *binfo, int *fdt_size) static void virt_build_smbios(VirtMachineState *vms) { MachineClass *mc = MACHINE_GET_CLASS(vms); + MachineState *ms = MACHINE(vms); VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); uint8_t *smbios_tables, *smbios_anchor; size_t smbios_tables_len, smbios_anchor_len; + struct smbios_phys_mem_area mem_array; const char *product = "QEMU Virtual Machine"; if (kvm_enabled()) { @@ -1621,7 +1630,11 @@ static void virt_build_smbios(VirtMachineState *vms) vmc->smbios_old_sys_ver ? "1.0" : mc->name, false, true, SMBIOS_ENTRY_POINT_TYPE_64); - smbios_get_tables(MACHINE(vms), NULL, 0, + /* build the array of physical mem area from base_memmap */ + mem_array.address = vms->memmap[VIRT_MEM].base; + mem_array.length = ms->ram_size; + + smbios_get_tables(ms, &mem_array, 1, &smbios_tables, &smbios_tables_len, &smbios_anchor, &smbios_anchor_len, &error_fatal); @@ -1690,6 +1703,58 @@ static uint64_t virt_cpu_mp_affinity(VirtMachineState *vms, int idx) return arm_cpu_mp_affinity(idx, clustersz); } +static inline bool *virt_get_high_memmap_enabled(VirtMachineState *vms, + int index) +{ + bool *enabled_array[] = { + &vms->highmem_redists, + &vms->highmem_ecam, + &vms->highmem_mmio, + }; + + assert(ARRAY_SIZE(extended_memmap) - VIRT_LOWMEMMAP_LAST == + ARRAY_SIZE(enabled_array)); + assert(index - VIRT_LOWMEMMAP_LAST < ARRAY_SIZE(enabled_array)); + + return enabled_array[index - VIRT_LOWMEMMAP_LAST]; +} + +static void virt_set_high_memmap(VirtMachineState *vms, + hwaddr base, int pa_bits) +{ + hwaddr region_base, region_size; + bool *region_enabled, fits; + int i; + + for (i = VIRT_LOWMEMMAP_LAST; i < ARRAY_SIZE(extended_memmap); i++) { + region_enabled = virt_get_high_memmap_enabled(vms, i); + region_base = ROUND_UP(base, extended_memmap[i].size); + region_size = extended_memmap[i].size; + + vms->memmap[i].base = region_base; + vms->memmap[i].size = region_size; + + /* + * Check each device to see if it fits in the PA space, + * moving highest_gpa as we go. For compatibility, move + * highest_gpa for disabled fitting devices as well, if + * the compact layout has been disabled. + * + * For each device that doesn't fit, disable it. + */ + fits = (region_base + region_size) <= BIT_ULL(pa_bits); + *region_enabled &= fits; + if (vms->highmem_compact && !*region_enabled) { + continue; + } + + base = region_base + region_size; + if (fits) { + vms->highest_gpa = base - 1; + } + } +} + static void virt_set_memmap(VirtMachineState *vms, int pa_bits) { MachineState *ms = MACHINE(vms); @@ -1745,39 +1810,7 @@ static void virt_set_memmap(VirtMachineState *vms, int pa_bits) /* We know for sure that at least the memory fits in the PA space */ vms->highest_gpa = memtop - 1; - for (i = VIRT_LOWMEMMAP_LAST; i < ARRAY_SIZE(extended_memmap); i++) { - hwaddr size = extended_memmap[i].size; - bool fits; - - base = ROUND_UP(base, size); - vms->memmap[i].base = base; - vms->memmap[i].size = size; - - /* - * Check each device to see if they fit in the PA space, - * moving highest_gpa as we go. - * - * For each device that doesn't fit, disable it. - */ - fits = (base + size) <= BIT_ULL(pa_bits); - if (fits) { - vms->highest_gpa = base + size - 1; - } - - switch (i) { - case VIRT_HIGH_GIC_REDIST2: - vms->highmem_redists &= fits; - break; - case VIRT_HIGH_PCIE_ECAM: - vms->highmem_ecam &= fits; - break; - case VIRT_HIGH_PCIE_MMIO: - vms->highmem_mmio &= fits; - break; - } - - base += size; - } + virt_set_high_memmap(vms, base, pa_bits); if (device_memory_size > 0) { ms->device_memory = g_malloc0(sizeof(*ms->device_memory)); @@ -2070,14 +2103,20 @@ static void machvirt_init(MachineState *machine) if (vms->gic_version == VIRT_GIC_VERSION_2) { virt_max_cpus = GIC_NCPU; } else { - virt_max_cpus = virt_redist_capacity(vms, VIRT_GIC_REDIST) + - virt_redist_capacity(vms, VIRT_HIGH_GIC_REDIST2); + virt_max_cpus = virt_redist_capacity(vms, VIRT_GIC_REDIST); + if (vms->highmem_redists) { + virt_max_cpus += virt_redist_capacity(vms, VIRT_HIGH_GIC_REDIST2); + } } if (max_cpus > virt_max_cpus) { error_report("Number of SMP CPUs requested (%d) exceeds max CPUs " "supported by machine 'mach-virt' (%d)", max_cpus, virt_max_cpus); + if (vms->gic_version != VIRT_GIC_VERSION_2 && !vms->highmem_redists) { + error_printf("Try 'highmem-redists=on' for more CPUs\n"); + } + exit(1); } @@ -2332,6 +2371,63 @@ static void virt_set_highmem(Object *obj, bool value, Error **errp) vms->highmem = value; } +static bool virt_get_compact_highmem(Object *obj, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + return vms->highmem_compact; +} + +static void virt_set_compact_highmem(Object *obj, bool value, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + vms->highmem_compact = value; +} + +static bool virt_get_highmem_redists(Object *obj, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + return vms->highmem_redists; +} + +static void virt_set_highmem_redists(Object *obj, bool value, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + vms->highmem_redists = value; +} + +static bool virt_get_highmem_ecam(Object *obj, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + return vms->highmem_ecam; +} + +static void virt_set_highmem_ecam(Object *obj, bool value, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + vms->highmem_ecam = value; +} + +static bool virt_get_highmem_mmio(Object *obj, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + return vms->highmem_mmio; +} + +static void virt_set_highmem_mmio(Object *obj, bool value, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + vms->highmem_mmio = value; +} + + static bool virt_get_its(Object *obj, Error **errp) { VirtMachineState *vms = VIRT_MACHINE(obj); @@ -2771,24 +2867,20 @@ static void virt_dimm_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { VirtMachineState *vms = VIRT_MACHINE(hotplug_dev); - Error *local_err = NULL; if (!vms->acpi_dev) { - error_setg(&local_err, + error_setg(errp, "memory hotplug is not enabled: missing acpi-ged device"); - goto out; + return; } if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) { - error_setg(&local_err, - "nvdimm device hot unplug is not supported yet."); - goto out; + error_setg(errp, "nvdimm device hot unplug is not supported yet."); + return; } hotplug_handler_unplug_request(HOTPLUG_HANDLER(vms->acpi_dev), dev, - &local_err); -out: - error_propagate(errp, local_err); + errp); } static void virt_dimm_unplug(HotplugHandler *hotplug_dev, @@ -2950,6 +3042,35 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) "Set on/off to enable/disable using " "physical address space above 32 bits"); + object_class_property_add_bool(oc, "compact-highmem", + virt_get_compact_highmem, + virt_set_compact_highmem); + object_class_property_set_description(oc, "compact-highmem", + "Set on/off to enable/disable compact " + "layout for high memory regions"); + + object_class_property_add_bool(oc, "highmem-redists", + virt_get_highmem_redists, + virt_set_highmem_redists); + object_class_property_set_description(oc, "highmem-redists", + "Set on/off to enable/disable high " + "memory region for GICv3 or GICv4 " + "redistributor"); + + object_class_property_add_bool(oc, "highmem-ecam", + virt_get_highmem_ecam, + virt_set_highmem_ecam); + object_class_property_set_description(oc, "highmem-ecam", + "Set on/off to enable/disable high " + "memory region for PCI ECAM"); + + object_class_property_add_bool(oc, "highmem-mmio", + virt_get_highmem_mmio, + virt_set_highmem_mmio); + object_class_property_set_description(oc, "highmem-mmio", + "Set on/off to enable/disable high " + "memory region for PCI MMIO"); + object_class_property_add_str(oc, "gic-version", virt_get_gic_version, virt_set_gic_version); object_class_property_set_description(oc, "gic-version", @@ -3034,6 +3155,7 @@ static void virt_instance_init(Object *obj) /* High memory is enabled by default */ vms->highmem = true; + vms->highmem_compact = !vmc->no_highmem_compact; vms->gic_version = VIRT_GIC_VERSION_NOSEL; vms->highmem_ecam = !vmc->no_highmem_ecam; @@ -3103,8 +3225,12 @@ DEFINE_VIRT_MACHINE_AS_LATEST(7, 2) static void virt_machine_7_1_options(MachineClass *mc) { + VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); + virt_machine_7_2_options(mc); compat_props_add(mc->compat_props, hw_compat_7_1, hw_compat_7_1_len); + /* Compact layout for high memory regions was introduced with 7.2 */ + vmc->no_highmem_compact = true; } DEFINE_VIRT_MACHINE(7, 1) diff --git a/hw/char/sifive_uart.c b/hw/char/sifive_uart.c index 1c75f792b3..f2684e57bc 100644 --- a/hw/char/sifive_uart.c +++ b/hw/char/sifive_uart.c @@ -274,7 +274,6 @@ SiFiveUARTState *sifive_uart_create(MemoryRegion *address_space, hwaddr base, { DeviceState *dev; SysBusDevice *s; - SiFiveUARTState *r; dev = qdev_new("riscv.sifive.uart"); s = SYS_BUS_DEVICE(dev); @@ -284,6 +283,5 @@ SiFiveUARTState *sifive_uart_create(MemoryRegion *address_space, hwaddr base, sysbus_mmio_get_region(s, 0)); sysbus_connect_irq(s, 0, irq); - r = SIFIVE_UART(dev); - return r; + return SIFIVE_UART(dev); } diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c index f9fdd46b9d..78b5f350a0 100644 --- a/hw/core/cpu-common.c +++ b/hw/core/cpu-common.c @@ -116,9 +116,9 @@ void cpu_reset(CPUState *cpu) trace_guest_cpu_reset(cpu); } -static void cpu_common_reset(DeviceState *dev) +static void cpu_common_reset_hold(Object *obj) { - CPUState *cpu = CPU(dev); + CPUState *cpu = CPU(obj); CPUClass *cc = CPU_GET_CLASS(cpu); if (qemu_loglevel_mask(CPU_LOG_RESET)) { @@ -259,6 +259,7 @@ static int64_t cpu_common_get_arch_id(CPUState *cpu) static void cpu_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); CPUClass *k = CPU_CLASS(klass); k->parse_features = cpu_common_parse_features; @@ -269,7 +270,7 @@ static void cpu_class_init(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_CPU, dc->categories); dc->realize = cpu_common_realizefn; dc->unrealize = cpu_common_unrealizefn; - dc->reset = cpu_common_reset; + rc->phases.hold = cpu_common_reset_hold; cpu_class_init_props(dc); /* * Reason: CPUs still need special care by board code: wiring up diff --git a/hw/core/machine-hmp-cmds.c b/hw/core/machine-hmp-cmds.c index 5cb5eecbfc..a1a51e9778 100644 --- a/hw/core/machine-hmp-cmds.c +++ b/hw/core/machine-hmp-cmds.c @@ -62,7 +62,7 @@ void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict) monitor_printf(mon, " type: \"%s\"\n", l->value->type); monitor_printf(mon, " vcpus_count: \"%" PRIu64 "\"\n", l->value->vcpus_count); - if (l->value->has_qom_path) { + if (l->value->qom_path) { monitor_printf(mon, " qom_path: \"%s\"\n", l->value->qom_path); } diff --git a/hw/core/machine-qmp-cmds.c b/hw/core/machine-qmp-cmds.c index 4f4ab30f8c..80d5e59651 100644 --- a/hw/core/machine-qmp-cmds.c +++ b/hw/core/machine-qmp-cmds.c @@ -55,8 +55,7 @@ CpuInfoFastList *qmp_query_cpus_fast(Error **errp) value->qom_path = object_get_canonical_path(OBJECT(cpu)); value->thread_id = cpu->thread_id; - value->has_props = !!mc->cpu_index_to_instance_props; - if (value->has_props) { + if (mc->cpu_index_to_instance_props) { CpuInstanceProperties *props; props = g_malloc0(sizeof(*props)); *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index); @@ -90,7 +89,6 @@ MachineInfoList *qmp_query_machines(Error **errp) } if (mc->alias) { - info->has_alias = true; info->alias = g_strdup(mc->alias); } @@ -101,11 +99,9 @@ MachineInfoList *qmp_query_machines(Error **errp) info->deprecated = !!mc->deprecation_reason; if (mc->default_cpu_type) { info->default_cpu_type = g_strdup(mc->default_cpu_type); - info->has_default_cpu_type = true; } if (mc->default_ram_id) { info->default_ram_id = g_strdup(mc->default_ram_id); - info->has_default_ram_id = true; } QAPI_LIST_PREPEND(mach_list, info); @@ -168,7 +164,6 @@ static int query_memdev(Object *obj, void *opaque) m = g_malloc0(sizeof(*m)); m->id = g_strdup(object_get_canonical_path_component(obj)); - m->has_id = !!m->id; m->size = object_property_get_uint(obj, "size", &error_abort); m->merge = object_property_get_bool(obj, "merge", &error_abort); @@ -227,7 +222,7 @@ HumanReadableText *qmp_x_query_numa(Error **errp) for (i = 0; i < nb_numa_nodes; i++) { g_string_append_printf(buf, "node %d cpus:", i); for (cpu = cpu_list; cpu; cpu = cpu->next) { - if (cpu->value->has_props && cpu->value->props->has_node_id && + if (cpu->value->props && cpu->value->props->has_node_id && cpu->value->props->node_id == i) { g_string_append_printf(buf, " %" PRIi64, cpu->value->cpu_index); } diff --git a/hw/core/machine.c b/hw/core/machine.c index 8d34caa31d..644b34cd24 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -554,12 +554,11 @@ static void machine_get_mem(Object *obj, Visitor *v, const char *name, static void machine_set_mem(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { + ERRP_GUARD(); MachineState *ms = MACHINE(obj); MachineClass *mc = MACHINE_GET_CLASS(obj); MemorySizeConfiguration *mem; - ERRP_GUARD(); - if (!visit_type_MemorySizeConfiguration(v, name, &mem, errp)) { return; } @@ -684,7 +683,6 @@ HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine) cpu = machine->possible_cpus->cpus[i].cpu; if (cpu) { - cpu_item->has_qom_path = true; cpu_item->qom_path = object_get_canonical_path(cpu); } QAPI_LIST_PREPEND(head, cpu_item); @@ -873,8 +871,7 @@ static void machine_copy_boot_config(MachineState *ms, BootConfiguration *config machine_free_boot_config(ms); ms->boot_config = *config; - if (!config->has_order) { - ms->boot_config.has_order = true; + if (!config->order) { ms->boot_config.order = g_strdup(machine_class->default_boot_order); } } @@ -889,13 +886,13 @@ static void machine_set_boot(Object *obj, Visitor *v, const char *name, if (!visit_type_BootConfiguration(v, name, &config, errp)) { return; } - if (config->has_order) { + if (config->order) { validate_bootdevices(config->order, errp); if (*errp) { goto out_free; } } - if (config->has_once) { + if (config->once) { validate_bootdevices(config->once, errp); if (*errp) { goto out_free; @@ -1424,7 +1421,7 @@ void qdev_machine_creation_done(void) { cpu_synchronize_all_post_init(); - if (current_machine->boot_config.has_once) { + if (current_machine->boot_config.once) { qemu_boot_set(current_machine->boot_config.once, &error_fatal); qemu_register_reset(restore_boot_order, g_strdup(current_machine->boot_config.order)); } diff --git a/hw/core/numa.c b/hw/core/numa.c index ea24a5fa8c..d8d36b16d8 100644 --- a/hw/core/numa.c +++ b/hw/core/numa.c @@ -130,9 +130,9 @@ static void parse_numa_node(MachineState *ms, NumaNodeOptions *node, } } - have_memdevs = have_memdevs ? : node->has_memdev; - have_mem = have_mem ? : node->has_mem; - if ((node->has_mem && have_memdevs) || (node->has_memdev && have_mem)) { + have_memdevs = have_memdevs || node->memdev; + have_mem = have_mem || node->has_mem; + if ((node->has_mem && have_memdevs) || (node->memdev && have_mem)) { error_setg(errp, "numa configuration should use either mem= or memdev=," "mixing both is not allowed"); return; @@ -152,7 +152,7 @@ static void parse_numa_node(MachineState *ms, NumaNodeOptions *node, " use -numa node,memdev instead"); } } - if (node->has_memdev) { + if (node->memdev) { Object *o; o = object_resolve_path_type(node->memdev, TYPE_MEMORY_BACKEND, NULL); if (!o) { diff --git a/hw/core/qdev-clock.c b/hw/core/qdev-clock.c index 117f4c6ea4..82799577f3 100644 --- a/hw/core/qdev-clock.c +++ b/hw/core/qdev-clock.c @@ -134,7 +134,7 @@ void qdev_init_clocks(DeviceState *dev, const ClockPortInitArray clocks) Clock **clkp; /* offset cannot be inside the DeviceState part */ assert(elem->offset > sizeof(DeviceState)); - clkp = (Clock **)(((void *) dev) + elem->offset); + clkp = ((void *)dev) + elem->offset; if (elem->is_output) { *clkp = qdev_init_clock_out(dev, elem->name); } else { diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c index a91f60567a..97a968f477 100644 --- a/hw/core/qdev-properties-system.c +++ b/hw/core/qdev-properties-system.c @@ -679,14 +679,11 @@ static void set_reserved_region(Object *obj, Visitor *v, const char *name, { Property *prop = opaque; ReservedRegion *rr = object_field_prop_ptr(obj, prop); - Error *local_err = NULL; const char *endptr; char *str; int ret; - visit_type_str(v, name, &str, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!visit_type_str(v, name, &str, errp)) { return; } diff --git a/hw/core/qdev.c b/hw/core/qdev.c index 0145501904..d759c4602c 100644 --- a/hw/core/qdev.c +++ b/hw/core/qdev.c @@ -250,60 +250,6 @@ void qdev_set_legacy_instance_id(DeviceState *dev, int alias_id, dev->alias_required_for_version = required_for_version; } -static int qdev_prereset(DeviceState *dev, void *opaque) -{ - trace_qdev_reset_tree(dev, object_get_typename(OBJECT(dev))); - return 0; -} - -static int qbus_prereset(BusState *bus, void *opaque) -{ - trace_qbus_reset_tree(bus, object_get_typename(OBJECT(bus))); - return 0; -} - -static int qdev_reset_one(DeviceState *dev, void *opaque) -{ - device_legacy_reset(dev); - - return 0; -} - -static int qbus_reset_one(BusState *bus, void *opaque) -{ - BusClass *bc = BUS_GET_CLASS(bus); - trace_qbus_reset(bus, object_get_typename(OBJECT(bus))); - if (bc->reset) { - bc->reset(bus); - } - return 0; -} - -void qdev_reset_all(DeviceState *dev) -{ - trace_qdev_reset_all(dev, object_get_typename(OBJECT(dev))); - qdev_walk_children(dev, qdev_prereset, qbus_prereset, - qdev_reset_one, qbus_reset_one, NULL); -} - -void qdev_reset_all_fn(void *opaque) -{ - qdev_reset_all(DEVICE(opaque)); -} - -void qbus_reset_all(BusState *bus) -{ - trace_qbus_reset_all(bus, object_get_typename(OBJECT(bus))); - qbus_walk_children(bus, qdev_prereset, qbus_prereset, - qdev_reset_one, qbus_reset_one, NULL); -} - -void qbus_reset_all_fn(void *opaque) -{ - BusState *bus = opaque; - qbus_reset_all(bus); -} - void device_cold_reset(DeviceState *dev) { resettable_reset(OBJECT(dev), RESET_TYPE_COLD); @@ -493,8 +439,6 @@ void qdev_del_unplug_blocker(DeviceState *dev, Error *reason) bool qdev_unplug_blocked(DeviceState *dev, Error **errp) { - ERRP_GUARD(); - if (dev->unplug_blockers) { error_propagate(errp, error_copy(dev->unplug_blockers->data)); return true; @@ -757,7 +701,7 @@ static void device_finalize(Object *obj) if (dev->pending_deleted_event) { g_assert(dev->canonical_path); - qapi_event_send_device_deleted(!!dev->id, dev->id, dev->canonical_path); + qapi_event_send_device_deleted(dev->id, dev->canonical_path); g_free(dev->canonical_path); dev->canonical_path = NULL; } @@ -924,16 +868,6 @@ void device_class_set_parent_unrealize(DeviceClass *dc, dc->unrealize = dev_unrealize; } -void device_legacy_reset(DeviceState *dev) -{ - DeviceClass *klass = DEVICE_GET_CLASS(dev); - - trace_qdev_reset(dev, object_get_typename(OBJECT(dev))); - if (klass->reset) { - klass->reset(dev); - } -} - Object *qdev_get_machine(void) { static Object *dev; diff --git a/hw/core/trace-events b/hw/core/trace-events index 9b3ecce3b2..56da55bd71 100644 --- a/hw/core/trace-events +++ b/hw/core/trace-events @@ -2,12 +2,6 @@ loader_write_rom(const char *name, uint64_t gpa, uint64_t size, bool isrom) "%s: @0x%"PRIx64" size=0x%"PRIx64" ROM=%d" # qdev.c -qdev_reset(void *obj, const char *objtype) "obj=%p(%s)" -qdev_reset_all(void *obj, const char *objtype) "obj=%p(%s)" -qdev_reset_tree(void *obj, const char *objtype) "obj=%p(%s)" -qbus_reset(void *obj, const char *objtype) "obj=%p(%s)" -qbus_reset_all(void *obj, const char *objtype) "obj=%p(%s)" -qbus_reset_tree(void *obj, const char *objtype) "obj=%p(%s)" qdev_update_parent_bus(void *obj, const char *objtype, void *oldp, const char *oldptype, void *newp, const char *newptype) "obj=%p(%s) old_parent=%p(%s) new_parent=%p(%s)" # resettable.c diff --git a/hw/display/Kconfig b/hw/display/Kconfig index a1b159becd..7b3da68d1c 100644 --- a/hw/display/Kconfig +++ b/hw/display/Kconfig @@ -55,7 +55,7 @@ config VGA_MMIO config VMWARE_VGA bool - default y if PCI_DEVICES + default y if PCI_DEVICES && PC_PCI depends on PCI select VGA diff --git a/hw/display/virtio-vga.c b/hw/display/virtio-vga.c index 4dcb34c4a7..e6fb0aa876 100644 --- a/hw/display/virtio-vga.c +++ b/hw/display/virtio-vga.c @@ -165,13 +165,15 @@ static void virtio_vga_base_realize(VirtIOPCIProxy *vpci_dev, Error **errp) } } -static void virtio_vga_base_reset(DeviceState *dev) +static void virtio_vga_base_reset_hold(Object *obj) { - VirtIOVGABaseClass *klass = VIRTIO_VGA_BASE_GET_CLASS(dev); - VirtIOVGABase *vvga = VIRTIO_VGA_BASE(dev); + VirtIOVGABaseClass *klass = VIRTIO_VGA_BASE_GET_CLASS(obj); + VirtIOVGABase *vvga = VIRTIO_VGA_BASE(obj); /* reset virtio-gpu */ - klass->parent_reset(dev); + if (klass->parent_phases.hold) { + klass->parent_phases.hold(obj); + } /* reset vga */ vga_common_reset(&vvga->vga); @@ -203,13 +205,14 @@ static void virtio_vga_base_class_init(ObjectClass *klass, void *data) VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); VirtIOVGABaseClass *v = VIRTIO_VGA_BASE_CLASS(klass); PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); device_class_set_props(dc, virtio_vga_base_properties); dc->vmsd = &vmstate_virtio_vga_base; dc->hotpluggable = false; - device_class_set_parent_reset(dc, virtio_vga_base_reset, - &v->parent_reset); + resettable_class_set_parent_phases(rc, NULL, virtio_vga_base_reset_hold, + NULL, &v->parent_phases); k->realize = virtio_vga_base_realize; pcidev_k->romfile = "vgabios-virtio.bin"; diff --git a/hw/display/virtio-vga.h b/hw/display/virtio-vga.h index 977ad5edc2..0bd9db1cee 100644 --- a/hw/display/virtio-vga.h +++ b/hw/display/virtio-vga.h @@ -23,7 +23,7 @@ struct VirtIOVGABase { struct VirtIOVGABaseClass { VirtioPCIClass parent_class; - DeviceReset parent_reset; + ResettablePhases parent_phases; }; #endif /* VIRTIO_VGA_H */ diff --git a/hw/hyperv/vmbus.c b/hw/hyperv/vmbus.c index 30bc04e1c4..271289f902 100644 --- a/hw/hyperv/vmbus.c +++ b/hw/hyperv/vmbus.c @@ -1578,7 +1578,7 @@ static bool vmbus_initialized(VMBus *vmbus) static void vmbus_reset_all(VMBus *vmbus) { - qbus_reset_all(BUS(vmbus)); + bus_cold_reset(BUS(vmbus)); } static void post_msg(VMBus *vmbus, void *msgdata, uint32_t msglen) @@ -2035,7 +2035,7 @@ static void vdev_reset_on_close(VMBusDevice *vdev) } /* all channels closed -- reset device */ - qdev_reset_all(DEVICE(vdev)); + device_cold_reset(DEVICE(vdev)); } static void handle_close_channel(VMBus *vmbus, vmbus_message_close_channel *msg, @@ -2104,7 +2104,7 @@ static void process_message(VMBus *vmbus) goto out; } msgdata = hv_msg->payload; - msg = (struct vmbus_message_header *)msgdata; + msg = msgdata; trace_vmbus_process_incoming_message(msg->message_type); @@ -2404,7 +2404,6 @@ static const TypeInfo vmbus_dev_type_info = { static void vmbus_realize(BusState *bus, Error **errp) { int ret = 0; - Error *local_err = NULL; VMBus *vmbus = VMBUS(bus); qemu_mutex_init(&vmbus->rx_queue_lock); @@ -2415,13 +2414,13 @@ static void vmbus_realize(BusState *bus, Error **errp) ret = hyperv_set_msg_handler(VMBUS_MESSAGE_CONNECTION_ID, vmbus_recv_message, vmbus); if (ret != 0) { - error_setg(&local_err, "hyperv set message handler failed: %d", ret); + error_setg(errp, "hyperv set message handler failed: %d", ret); goto error_out; } ret = event_notifier_init(&vmbus->notifier, 0); if (ret != 0) { - error_setg(&local_err, "event notifier failed to init with %d", ret); + error_setg(errp, "event notifier failed to init with %d", ret); goto remove_msg_handler; } @@ -2429,7 +2428,7 @@ static void vmbus_realize(BusState *bus, Error **errp) ret = hyperv_set_event_flag_handler(VMBUS_EVENT_CONNECTION_ID, &vmbus->notifier); if (ret != 0) { - error_setg(&local_err, "hyperv set event handler failed with %d", ret); + error_setg(errp, "hyperv set event handler failed with %d", ret); goto clear_event_notifier; } @@ -2441,7 +2440,6 @@ remove_msg_handler: hyperv_set_msg_handler(VMBUS_MESSAGE_CONNECTION_ID, NULL, NULL); error_out: qemu_mutex_destroy(&vmbus->rx_queue_lock); - error_propagate(errp, local_err); } static void vmbus_unrealize(BusState *bus) diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 546b703cb4..fa69b6f43e 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -1782,12 +1782,9 @@ static void pc_machine_set_max_fw_size(Object *obj, Visitor *v, Error **errp) { PCMachineState *pcms = PC_MACHINE(obj); - Error *error = NULL; uint64_t value; - visit_type_size(v, name, &value, &error); - if (error) { - error_propagate(errp, error); + if (!visit_type_size(v, name, &value, errp)) { return; } diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c index a64265cca0..7db0d94ec2 100644 --- a/hw/i386/xen/xen_platform.c +++ b/hw/i386/xen/xen_platform.c @@ -177,7 +177,7 @@ static void pci_xen_ide_unplug(DeviceState *dev, bool aux) blk_unref(blk); } } - qdev_reset_all(dev); + device_cold_reset(dev); } static void unplug_disks(PCIBus *b, PCIDevice *d, void *opaque) diff --git a/hw/input/adb.c b/hw/input/adb.c index 84331b9fce..214ae6f42b 100644 --- a/hw/input/adb.c +++ b/hw/input/adb.c @@ -43,7 +43,7 @@ static const char *adb_commands[] = { static void adb_device_reset(ADBDevice *d) { - qdev_reset_all(DEVICE(d)); + device_cold_reset(DEVICE(d)); } static int do_adb_request(ADBBusState *s, uint8_t *obuf, const uint8_t *buf, diff --git a/hw/input/ps2.c b/hw/input/ps2.c index 05cf7111e3..3253ab6a92 100644 --- a/hw/input/ps2.c +++ b/hw/input/ps2.c @@ -1001,12 +1001,18 @@ void ps2_write_mouse(PS2MouseState *s, int val) } } -static void ps2_reset(DeviceState *dev) +static void ps2_reset_hold(Object *obj) { - PS2State *s = PS2_DEVICE(dev); + PS2State *s = PS2_DEVICE(obj); s->write_cmd = -1; ps2_reset_queue(s); +} + +static void ps2_reset_exit(Object *obj) +{ + PS2State *s = PS2_DEVICE(obj); + ps2_lower_irq(s); } @@ -1036,13 +1042,16 @@ static void ps2_common_post_load(PS2State *s) q->cwptr = ccount ? (q->rptr + ccount) & (PS2_BUFFER_SIZE - 1) : -1; } -static void ps2_kbd_reset(DeviceState *dev) +static void ps2_kbd_reset_hold(Object *obj) { - PS2DeviceClass *ps2dc = PS2_DEVICE_GET_CLASS(dev); - PS2KbdState *s = PS2_KBD_DEVICE(dev); + PS2DeviceClass *ps2dc = PS2_DEVICE_GET_CLASS(obj); + PS2KbdState *s = PS2_KBD_DEVICE(obj); trace_ps2_kbd_reset(s); - ps2dc->parent_reset(dev); + + if (ps2dc->parent_phases.hold) { + ps2dc->parent_phases.hold(obj); + } s->scan_enabled = 1; s->translate = 0; @@ -1050,13 +1059,16 @@ static void ps2_kbd_reset(DeviceState *dev) s->modifiers = 0; } -static void ps2_mouse_reset(DeviceState *dev) +static void ps2_mouse_reset_hold(Object *obj) { - PS2DeviceClass *ps2dc = PS2_DEVICE_GET_CLASS(dev); - PS2MouseState *s = PS2_MOUSE_DEVICE(dev); + PS2DeviceClass *ps2dc = PS2_DEVICE_GET_CLASS(obj); + PS2MouseState *s = PS2_MOUSE_DEVICE(obj); trace_ps2_mouse_reset(s); - ps2dc->parent_reset(dev); + + if (ps2dc->parent_phases.hold) { + ps2dc->parent_phases.hold(obj); + } s->mouse_status = 0; s->mouse_resolution = 0; @@ -1239,10 +1251,12 @@ static void ps2_mouse_realize(DeviceState *dev, Error **errp) static void ps2_kbd_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); PS2DeviceClass *ps2dc = PS2_DEVICE_CLASS(klass); dc->realize = ps2_kbd_realize; - device_class_set_parent_reset(dc, ps2_kbd_reset, &ps2dc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, ps2_kbd_reset_hold, NULL, + &ps2dc->parent_phases); dc->vmsd = &vmstate_ps2_keyboard; } @@ -1256,11 +1270,12 @@ static const TypeInfo ps2_kbd_info = { static void ps2_mouse_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); PS2DeviceClass *ps2dc = PS2_DEVICE_CLASS(klass); dc->realize = ps2_mouse_realize; - device_class_set_parent_reset(dc, ps2_mouse_reset, - &ps2dc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, ps2_mouse_reset_hold, NULL, + &ps2dc->parent_phases); dc->vmsd = &vmstate_ps2_mouse; } @@ -1281,8 +1296,10 @@ static void ps2_init(Object *obj) static void ps2_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); - dc->reset = ps2_reset; + rc->phases.hold = ps2_reset_hold; + rc->phases.exit = ps2_reset_exit; set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c index 7b44d5625b..a379cea395 100644 --- a/hw/intc/arm_gic_common.c +++ b/hw/intc/arm_gic_common.c @@ -261,9 +261,9 @@ static inline void arm_gic_common_reset_irq_state(GICState *s, int first_cpu, } } -static void arm_gic_common_reset(DeviceState *dev) +static void arm_gic_common_reset_hold(Object *obj) { - GICState *s = ARM_GIC_COMMON(dev); + GICState *s = ARM_GIC_COMMON(obj); int i, j; int resetprio; @@ -364,9 +364,10 @@ static Property arm_gic_common_properties[] = { static void arm_gic_common_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); ARMLinuxBootIfClass *albifc = ARM_LINUX_BOOT_IF_CLASS(klass); - dc->reset = arm_gic_common_reset; + rc->phases.hold = arm_gic_common_reset_hold; dc->realize = arm_gic_common_realize; device_class_set_props(dc, arm_gic_common_properties); dc->vmsd = &vmstate_gic; diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c index 7d2a13273a..1d588946bc 100644 --- a/hw/intc/arm_gic_kvm.c +++ b/hw/intc/arm_gic_kvm.c @@ -38,7 +38,7 @@ DECLARE_OBJ_CHECKERS(GICState, KVMARMGICClass, struct KVMARMGICClass { ARMGICCommonClass parent_class; DeviceRealize parent_realize; - void (*parent_reset)(DeviceState *dev); + ResettablePhases parent_phases; }; void kvm_arm_gic_set_irq(uint32_t num_irq, int irq, int level) @@ -473,12 +473,14 @@ static void kvm_arm_gic_get(GICState *s) } } -static void kvm_arm_gic_reset(DeviceState *dev) +static void kvm_arm_gic_reset_hold(Object *obj) { - GICState *s = ARM_GIC_COMMON(dev); + GICState *s = ARM_GIC_COMMON(obj); KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s); - kgc->parent_reset(dev); + if (kgc->parent_phases.hold) { + kgc->parent_phases.hold(obj); + } if (kvm_arm_gic_can_save_restore(s)) { kvm_arm_gic_put(s); @@ -593,6 +595,7 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp) static void kvm_arm_gic_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); ARMGICCommonClass *agcc = ARM_GIC_COMMON_CLASS(klass); KVMARMGICClass *kgc = KVM_ARM_GIC_CLASS(klass); @@ -600,7 +603,8 @@ static void kvm_arm_gic_class_init(ObjectClass *klass, void *data) agcc->post_load = kvm_arm_gic_put; device_class_set_parent_realize(dc, kvm_arm_gic_realize, &kgc->parent_realize); - device_class_set_parent_reset(dc, kvm_arm_gic_reset, &kgc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, kvm_arm_gic_reset_hold, NULL, + &kgc->parent_phases); } static const TypeInfo kvm_arm_gic_info = { diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c index 351843db4a..642a8243ed 100644 --- a/hw/intc/arm_gicv3_common.c +++ b/hw/intc/arm_gicv3_common.c @@ -450,9 +450,9 @@ static void arm_gicv3_finalize(Object *obj) g_free(s->redist_region_count); } -static void arm_gicv3_common_reset(DeviceState *dev) +static void arm_gicv3_common_reset_hold(Object *obj) { - GICv3State *s = ARM_GICV3_COMMON(dev); + GICv3State *s = ARM_GICV3_COMMON(obj); int i; for (i = 0; i < s->num_cpu; i++) { @@ -578,9 +578,10 @@ static Property arm_gicv3_common_properties[] = { static void arm_gicv3_common_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); ARMLinuxBootIfClass *albifc = ARM_LINUX_BOOT_IF_CLASS(klass); - dc->reset = arm_gicv3_common_reset; + rc->phases.hold = arm_gicv3_common_reset_hold; dc->realize = arm_gicv3_common_realize; device_class_set_props(dc, arm_gicv3_common_properties); dc->vmsd = &vmstate_gicv3; diff --git a/hw/intc/arm_gicv3_dist.c b/hw/intc/arm_gicv3_dist.c index eea0368118..d599fefcbc 100644 --- a/hw/intc/arm_gicv3_dist.c +++ b/hw/intc/arm_gicv3_dist.c @@ -390,9 +390,9 @@ static bool gicd_readl(GICv3State *s, hwaddr offset, * MBIS == 0 (message-based SPIs not supported) * SecurityExtn == 1 if security extns supported * CPUNumber == 0 since for us ARE is always 1 - * ITLinesNumber == (num external irqs / 32) - 1 + * ITLinesNumber == (((max SPI IntID + 1) / 32) - 1) */ - int itlinesnumber = ((s->num_irq - GIC_INTERNAL) / 32) - 1; + int itlinesnumber = (s->num_irq / 32) - 1; /* * SecurityExtn must be RAZ if GICD_CTLR.DS == 1, and * "security extensions not supported" always implies DS == 1, diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c index 2ff21ed6bb..57c79da5c5 100644 --- a/hw/intc/arm_gicv3_its.c +++ b/hw/intc/arm_gicv3_its.c @@ -27,7 +27,7 @@ DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass, struct GICv3ITSClass { GICv3ITSCommonClass parent_class; - void (*parent_reset)(DeviceState *dev); + ResettablePhases parent_phases; }; /* @@ -1953,12 +1953,14 @@ static void gicv3_arm_its_realize(DeviceState *dev, Error **errp) } } -static void gicv3_its_reset(DeviceState *dev) +static void gicv3_its_reset_hold(Object *obj) { - GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); + GICv3ITSState *s = ARM_GICV3_ITS_COMMON(obj); GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s); - c->parent_reset(dev); + if (c->parent_phases.hold) { + c->parent_phases.hold(obj); + } /* Quiescent bit reset to 1 */ s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1); @@ -2012,12 +2014,14 @@ static Property gicv3_its_props[] = { static void gicv3_its_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass); GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass); dc->realize = gicv3_arm_its_realize; device_class_set_props(dc, gicv3_its_props); - device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset); + resettable_class_set_parent_phases(rc, NULL, gicv3_its_reset_hold, NULL, + &ic->parent_phases); icc->post_load = gicv3_its_post_load; } diff --git a/hw/intc/arm_gicv3_its_common.c b/hw/intc/arm_gicv3_its_common.c index 90b85f1e25..d7532a7a89 100644 --- a/hw/intc/arm_gicv3_its_common.c +++ b/hw/intc/arm_gicv3_its_common.c @@ -122,9 +122,9 @@ void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops, msi_nonbroken = true; } -static void gicv3_its_common_reset(DeviceState *dev) +static void gicv3_its_common_reset_hold(Object *obj) { - GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); + GICv3ITSState *s = ARM_GICV3_ITS_COMMON(obj); s->ctlr = 0; s->cbaser = 0; @@ -137,8 +137,9 @@ static void gicv3_its_common_reset(DeviceState *dev) static void gicv3_its_common_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); - dc->reset = gicv3_its_common_reset; + rc->phases.hold = gicv3_its_common_reset_hold; dc->vmsd = &vmstate_its; } diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c index 529c7bd494..7eda9fb86e 100644 --- a/hw/intc/arm_gicv3_its_kvm.c +++ b/hw/intc/arm_gicv3_its_kvm.c @@ -37,7 +37,7 @@ DECLARE_OBJ_CHECKERS(GICv3ITSState, KVMARMITSClass, struct KVMARMITSClass { GICv3ITSCommonClass parent_class; - void (*parent_reset)(DeviceState *dev); + ResettablePhases parent_phases; }; @@ -197,13 +197,15 @@ static void kvm_arm_its_post_load(GICv3ITSState *s) GITS_CTLR, &s->ctlr, true, &error_abort); } -static void kvm_arm_its_reset(DeviceState *dev) +static void kvm_arm_its_reset_hold(Object *obj) { - GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); + GICv3ITSState *s = ARM_GICV3_ITS_COMMON(obj); KVMARMITSClass *c = KVM_ARM_ITS_GET_CLASS(s); int i; - c->parent_reset(dev); + if (c->parent_phases.hold) { + c->parent_phases.hold(obj); + } if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, KVM_DEV_ARM_ITS_CTRL_RESET)) { @@ -241,12 +243,14 @@ static Property kvm_arm_its_props[] = { static void kvm_arm_its_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass); KVMARMITSClass *ic = KVM_ARM_ITS_CLASS(klass); dc->realize = kvm_arm_its_realize; device_class_set_props(dc, kvm_arm_its_props); - device_class_set_parent_reset(dc, kvm_arm_its_reset, &ic->parent_reset); + resettable_class_set_parent_phases(rc, NULL, kvm_arm_its_reset_hold, NULL, + &ic->parent_phases); icc->send_msi = kvm_its_send_msi; icc->pre_save = kvm_arm_its_pre_save; icc->post_load = kvm_arm_its_post_load; diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c index 3ca643ecba..72ad916d3d 100644 --- a/hw/intc/arm_gicv3_kvm.c +++ b/hw/intc/arm_gicv3_kvm.c @@ -77,7 +77,7 @@ DECLARE_OBJ_CHECKERS(GICv3State, KVMARMGICv3Class, struct KVMARMGICv3Class { ARMGICv3CommonClass parent_class; DeviceRealize parent_realize; - void (*parent_reset)(DeviceState *dev); + ResettablePhases parent_phases; }; static void kvm_arm_gicv3_set_irq(void *opaque, int irq, int level) @@ -703,14 +703,16 @@ static void arm_gicv3_icc_reset(CPUARMState *env, const ARMCPRegInfo *ri) c->icc_ctlr_el1[GICV3_S] = c->icc_ctlr_el1[GICV3_NS]; } -static void kvm_arm_gicv3_reset(DeviceState *dev) +static void kvm_arm_gicv3_reset_hold(Object *obj) { - GICv3State *s = ARM_GICV3_COMMON(dev); + GICv3State *s = ARM_GICV3_COMMON(obj); KVMARMGICv3Class *kgc = KVM_ARM_GICV3_GET_CLASS(s); DPRINTF("Reset\n"); - kgc->parent_reset(dev); + if (kgc->parent_phases.hold) { + kgc->parent_phases.hold(obj); + } if (s->migration_blocker) { DPRINTF("Cannot put kernel gic state, no kernel interface\n"); @@ -890,6 +892,7 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) static void kvm_arm_gicv3_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass); KVMARMGICv3Class *kgc = KVM_ARM_GICV3_CLASS(klass); @@ -897,7 +900,8 @@ static void kvm_arm_gicv3_class_init(ObjectClass *klass, void *data) agcc->post_load = kvm_arm_gicv3_put; device_class_set_parent_realize(dc, kvm_arm_gicv3_realize, &kgc->parent_realize); - device_class_set_parent_reset(dc, kvm_arm_gicv3_reset, &kgc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, kvm_arm_gicv3_reset_hold, NULL, + &kgc->parent_phases); } static const TypeInfo kvm_arm_gicv3_info = { diff --git a/hw/intc/xics.c b/hw/intc/xics.c index dcd021af66..c7f8abd71e 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -564,9 +564,9 @@ static void ics_reset_irq(ICSIRQState *irq) irq->saved_priority = 0xff; } -static void ics_reset(DeviceState *dev) +static void ics_reset_hold(Object *obj) { - ICSState *ics = ICS(dev); + ICSState *ics = ICS(obj); g_autofree uint8_t *flags = g_malloc(ics->nr_irqs); int i; @@ -584,7 +584,7 @@ static void ics_reset(DeviceState *dev) if (kvm_irqchip_in_kernel()) { Error *local_err = NULL; - ics_set_kvm_state(ICS(dev), &local_err); + ics_set_kvm_state(ics, &local_err); if (local_err) { error_report_err(local_err); } @@ -593,7 +593,7 @@ static void ics_reset(DeviceState *dev) static void ics_reset_handler(void *dev) { - ics_reset(dev); + device_cold_reset(dev); } static void ics_realize(DeviceState *dev, Error **errp) @@ -688,16 +688,17 @@ static Property ics_properties[] = { static void ics_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); dc->realize = ics_realize; device_class_set_props(dc, ics_properties); - dc->reset = ics_reset; dc->vmsd = &vmstate_ics; /* * Reason: part of XICS interrupt controller, needs to be wired up, * e.g. by spapr_irq_init(). */ dc->user_creatable = false; + rc->phases.hold = ics_reset_hold; } static const TypeInfo ics_info = { diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c index f27e1a11ba..50ef83215c 100644 --- a/hw/mem/pc-dimm.c +++ b/hw/mem/pc-dimm.c @@ -252,7 +252,6 @@ static void pc_dimm_md_fill_device_info(const MemoryDeviceState *md, const DeviceState *dev = DEVICE(md); if (dev->id) { - di->has_id = true; di->id = g_strdup(dev->id); } di->hotplugged = dev->hotplugged; diff --git a/hw/misc/imx6_src.c b/hw/misc/imx6_src.c index 7b0e968804..a9c64d06eb 100644 --- a/hw/misc/imx6_src.c +++ b/hw/misc/imx6_src.c @@ -15,7 +15,7 @@ #include "qemu/log.h" #include "qemu/main-loop.h" #include "qemu/module.h" -#include "arm-powerctl.h" +#include "target/arm/arm-powerctl.h" #include "hw/core/cpu.h" #ifndef DEBUG_IMX6_SRC diff --git a/hw/misc/iotkit-sysctl.c b/hw/misc/iotkit-sysctl.c index 7147e2f84e..e664215ee6 100644 --- a/hw/misc/iotkit-sysctl.c +++ b/hw/misc/iotkit-sysctl.c @@ -30,7 +30,6 @@ #include "hw/qdev-properties.h" #include "hw/arm/armsse-version.h" #include "target/arm/arm-powerctl.h" -#include "target/arm/cpu.h" REG32(SECDBGSTAT, 0x0) REG32(SECDBGSET, 0x4) diff --git a/hw/misc/mac_via.c b/hw/misc/mac_via.c index f42c12755a..076d18e5fd 100644 --- a/hw/misc/mac_via.c +++ b/hw/misc/mac_via.c @@ -975,14 +975,16 @@ static int via1_post_load(void *opaque, int version_id) } /* VIA 1 */ -static void mos6522_q800_via1_reset(DeviceState *dev) +static void mos6522_q800_via1_reset_hold(Object *obj) { - MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(dev); + MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(obj); MOS6522State *ms = MOS6522(v1s); MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms); ADBBusState *adb_bus = &v1s->adb_bus; - mdc->parent_reset(dev); + if (mdc->parent_phases.hold) { + mdc->parent_phases.hold(obj); + } ms->timers[0].frequency = VIA_TIMER_FREQ; ms->timers[1].frequency = VIA_TIMER_FREQ; @@ -1097,11 +1099,12 @@ static Property mos6522_q800_via1_properties[] = { static void mos6522_q800_via1_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); MOS6522DeviceClass *mdc = MOS6522_CLASS(oc); dc->realize = mos6522_q800_via1_realize; - device_class_set_parent_reset(dc, mos6522_q800_via1_reset, - &mdc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, mos6522_q800_via1_reset_hold, + NULL, &mdc->parent_phases); dc->vmsd = &vmstate_q800_via1; device_class_set_props(dc, mos6522_q800_via1_properties); } @@ -1123,12 +1126,14 @@ static void mos6522_q800_via2_portB_write(MOS6522State *s) } } -static void mos6522_q800_via2_reset(DeviceState *dev) +static void mos6522_q800_via2_reset_hold(Object *obj) { - MOS6522State *ms = MOS6522(dev); + MOS6522State *ms = MOS6522(obj); MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms); - mdc->parent_reset(dev); + if (mdc->parent_phases.hold) { + mdc->parent_phases.hold(obj); + } ms->timers[0].frequency = VIA_TIMER_FREQ; ms->timers[1].frequency = VIA_TIMER_FREQ; @@ -1183,10 +1188,11 @@ static const VMStateDescription vmstate_q800_via2 = { static void mos6522_q800_via2_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); MOS6522DeviceClass *mdc = MOS6522_CLASS(oc); - device_class_set_parent_reset(dc, mos6522_q800_via2_reset, - &mdc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, mos6522_q800_via2_reset_hold, + NULL, &mdc->parent_phases); dc->vmsd = &vmstate_q800_via2; mdc->portB_write = mos6522_q800_via2_portB_write; } diff --git a/hw/misc/macio/cuda.c b/hw/misc/macio/cuda.c index 0d4c13319a..853e88bfed 100644 --- a/hw/misc/macio/cuda.c +++ b/hw/misc/macio/cuda.c @@ -589,12 +589,14 @@ static void mos6522_cuda_portB_write(MOS6522State *s) cuda_update(cs); } -static void mos6522_cuda_reset(DeviceState *dev) +static void mos6522_cuda_reset_hold(Object *obj) { - MOS6522State *ms = MOS6522(dev); + MOS6522State *ms = MOS6522(obj); MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms); - mdc->parent_reset(dev); + if (mdc->parent_phases.hold) { + mdc->parent_phases.hold(obj); + } ms->timers[0].frequency = CUDA_TIMER_FREQ; ms->timers[1].frequency = (SCALE_US * 6000) / 4700; @@ -602,11 +604,11 @@ static void mos6522_cuda_reset(DeviceState *dev) static void mos6522_cuda_class_init(ObjectClass *oc, void *data) { - DeviceClass *dc = DEVICE_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); MOS6522DeviceClass *mdc = MOS6522_CLASS(oc); - device_class_set_parent_reset(dc, mos6522_cuda_reset, - &mdc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, mos6522_cuda_reset_hold, + NULL, &mdc->parent_phases); mdc->portB_write = mos6522_cuda_portB_write; mdc->get_timer1_counter_value = cuda_get_counter_value; mdc->get_timer2_counter_value = cuda_get_counter_value; diff --git a/hw/misc/macio/pmu.c b/hw/misc/macio/pmu.c index 70562ed8d0..97ef8c771b 100644 --- a/hw/misc/macio/pmu.c +++ b/hw/misc/macio/pmu.c @@ -797,14 +797,16 @@ static void mos6522_pmu_portB_write(MOS6522State *s) pmu_update(ps); } -static void mos6522_pmu_reset(DeviceState *dev) +static void mos6522_pmu_reset_hold(Object *obj) { - MOS6522State *ms = MOS6522(dev); + MOS6522State *ms = MOS6522(obj); MOS6522PMUState *mps = container_of(ms, MOS6522PMUState, parent_obj); PMUState *s = container_of(mps, PMUState, mos6522_pmu); MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms); - mdc->parent_reset(dev); + if (mdc->parent_phases.hold) { + mdc->parent_phases.hold(obj); + } ms->timers[0].frequency = VIA_TIMER_FREQ; ms->timers[1].frequency = (SCALE_US * 6000) / 4700; @@ -814,11 +816,11 @@ static void mos6522_pmu_reset(DeviceState *dev) static void mos6522_pmu_class_init(ObjectClass *oc, void *data) { - DeviceClass *dc = DEVICE_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); MOS6522DeviceClass *mdc = MOS6522_CLASS(oc); - device_class_set_parent_reset(dc, mos6522_pmu_reset, - &mdc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, mos6522_pmu_reset_hold, + NULL, &mdc->parent_phases); mdc->portB_write = mos6522_pmu_portB_write; } diff --git a/hw/misc/meson.build b/hw/misc/meson.build index 95268eddc0..ed0598dc9e 100644 --- a/hw/misc/meson.build +++ b/hw/misc/meson.build @@ -51,6 +51,7 @@ softmmu_ss.add(when: 'CONFIG_IMX', if_true: files( 'imx25_ccm.c', 'imx31_ccm.c', 'imx6_ccm.c', + 'imx6_src.c', 'imx6ul_ccm.c', 'imx7_ccm.c', 'imx7_gpr.c', @@ -84,8 +85,8 @@ softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files( )) softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_misc.c')) softmmu_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq_slcr.c')) -specific_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx-zynqmp-crf.c')) -specific_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx-zynqmp-apu-ctrl.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx-zynqmp-crf.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx-zynqmp-apu-ctrl.c')) specific_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-crl.c')) softmmu_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files( 'xlnx-versal-xramc.c', @@ -101,6 +102,7 @@ softmmu_ss.add(when: 'CONFIG_TZ_MPC', if_true: files('tz-mpc.c')) softmmu_ss.add(when: 'CONFIG_TZ_MSC', if_true: files('tz-msc.c')) softmmu_ss.add(when: 'CONFIG_TZ_PPC', if_true: files('tz-ppc.c')) softmmu_ss.add(when: 'CONFIG_IOTKIT_SECCTL', if_true: files('iotkit-secctl.c')) +softmmu_ss.add(when: 'CONFIG_IOTKIT_SYSCTL', if_true: files('iotkit-sysctl.c')) softmmu_ss.add(when: 'CONFIG_IOTKIT_SYSINFO', if_true: files('iotkit-sysinfo.c')) softmmu_ss.add(when: 'CONFIG_ARMSSE_CPU_PWRCTRL', if_true: files('armsse-cpu-pwrctrl.c')) softmmu_ss.add(when: 'CONFIG_ARMSSE_CPUID', if_true: files('armsse-cpuid.c')) @@ -126,15 +128,12 @@ softmmu_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_ahb_apb_pnp.c')) specific_ss.add(when: 'CONFIG_AVR_POWER', if_true: files('avr_power.c')) -specific_ss.add(when: 'CONFIG_IMX', if_true: files('imx6_src.c')) -specific_ss.add(when: 'CONFIG_IOTKIT_SYSCTL', if_true: files('iotkit-sysctl.c')) - specific_ss.add(when: 'CONFIG_MAC_VIA', if_true: files('mac_via.c')) specific_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('mips_cmgcr.c', 'mips_cpc.c')) specific_ss.add(when: 'CONFIG_MIPS_ITU', if_true: files('mips_itu.c')) -specific_ss.add(when: 'CONFIG_SBSA_REF', if_true: files('sbsa_ec.c')) +softmmu_ss.add(when: 'CONFIG_SBSA_REF', if_true: files('sbsa_ec.c')) # HPPA devices softmmu_ss.add(when: 'CONFIG_LASI', if_true: files('lasi.c')) diff --git a/hw/misc/mos6522.c b/hw/misc/mos6522.c index fe38c44426..0ed631186c 100644 --- a/hw/misc/mos6522.c +++ b/hw/misc/mos6522.c @@ -643,9 +643,9 @@ const VMStateDescription vmstate_mos6522 = { } }; -static void mos6522_reset(DeviceState *dev) +static void mos6522_reset_hold(Object *obj) { - MOS6522State *s = MOS6522(dev); + MOS6522State *s = MOS6522(obj); s->b = 0; s->a = 0; @@ -705,9 +705,10 @@ static Property mos6522_properties[] = { static void mos6522_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); MOS6522DeviceClass *mdc = MOS6522_CLASS(oc); - dc->reset = mos6522_reset; + rc->phases.hold = mos6522_reset_hold; dc->vmsd = &vmstate_mos6522; device_class_set_props(dc, mos6522_properties); mdc->portB_write = mos6522_portB_write; diff --git a/hw/net/Kconfig b/hw/net/Kconfig index 6d795ec752..1cc1c5775e 100644 --- a/hw/net/Kconfig +++ b/hw/net/Kconfig @@ -51,7 +51,7 @@ config RTL8139_PCI config VMXNET3_PCI bool - default y if PCI_DEVICES + default y if PCI_DEVICES && PC_PCI depends on PCI config SMC91C111 diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c index 24b3a0ff66..42ea2411a2 100644 --- a/hw/net/cadence_gem.c +++ b/hw/net/cadence_gem.c @@ -1429,7 +1429,7 @@ static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size) { CadenceGEMState *s; uint32_t retval; - s = (CadenceGEMState *)opaque; + s = opaque; offset >>= 2; retval = s->regs[offset]; diff --git a/hw/net/rocker/rocker_of_dpa.c b/hw/net/rocker/rocker_of_dpa.c index b3b8c5bb6d..dfe4754469 100644 --- a/hw/net/rocker/rocker_of_dpa.c +++ b/hw/net/rocker/rocker_of_dpa.c @@ -2348,23 +2348,19 @@ static void of_dpa_flow_fill(void *cookie, void *value, void *user_data) if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) || memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN)) { - nkey->has_eth_src = true; nkey->eth_src = qemu_mac_strdup_printf(key->eth.src.a); } - if (nkey->has_eth_src && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) { - nmask->has_eth_src = true; + if (nkey->eth_src && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) { nmask->eth_src = qemu_mac_strdup_printf(mask->eth.src.a); } if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) || memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN)) { - nkey->has_eth_dst = true; nkey->eth_dst = qemu_mac_strdup_printf(key->eth.dst.a); } - if (nkey->has_eth_dst && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) { - nmask->has_eth_dst = true; + if (nkey->eth_dst && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) { nmask->eth_dst = qemu_mac_strdup_printf(mask->eth.dst.a); } @@ -2400,7 +2396,6 @@ static void of_dpa_flow_fill(void *cookie, void *value, void *user_data) if (key->ipv4.addr.dst || mask->ipv4.addr.dst) { char *dst = inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst); int dst_len = of_dpa_mask2prefix(mask->ipv4.addr.dst); - nkey->has_ip_dst = true; nkey->ip_dst = g_strdup_printf("%s/%d", dst, dst_len); } break; @@ -2501,12 +2496,10 @@ static void of_dpa_group_fill(void *key, void *value, void *user_data) ngroup->set_vlan_id = ntohs(group->l2_rewrite.vlan_id); } if (memcmp(group->l2_rewrite.src_mac.a, zero_mac.a, ETH_ALEN)) { - ngroup->has_set_eth_src = true; ngroup->set_eth_src = qemu_mac_strdup_printf(group->l2_rewrite.src_mac.a); } if (memcmp(group->l2_rewrite.dst_mac.a, zero_mac.a, ETH_ALEN)) { - ngroup->has_set_eth_dst = true; ngroup->set_eth_dst = qemu_mac_strdup_printf(group->l2_rewrite.dst_mac.a); } @@ -2532,12 +2525,10 @@ static void of_dpa_group_fill(void *key, void *value, void *user_data) ngroup->set_vlan_id = ntohs(group->l3_unicast.vlan_id); } if (memcmp(group->l3_unicast.src_mac.a, zero_mac.a, ETH_ALEN)) { - ngroup->has_set_eth_src = true; ngroup->set_eth_src = qemu_mac_strdup_printf(group->l3_unicast.src_mac.a); } if (memcmp(group->l3_unicast.dst_mac.a, zero_mac.a, ETH_ALEN)) { - ngroup->has_set_eth_dst = true; ngroup->set_eth_dst = qemu_mac_strdup_printf(group->l3_unicast.dst_mac.a); } diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index aba12759d5..9cbdfa5547 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -457,8 +457,7 @@ static void rxfilter_notify(NetClientState *nc) if (nc->rxfilter_notify_enabled) { char *path = object_get_canonical_path(OBJECT(n->qdev)); - qapi_event_send_nic_rx_filter_changed(!!n->netclient_name, - n->netclient_name, path); + qapi_event_send_nic_rx_filter_changed(n->netclient_name, path); g_free(path); /* disable event notification to avoid events flooding */ @@ -2472,7 +2471,7 @@ static size_t virtio_net_rsc_receive6(void *opq, NetClientState *nc, VirtioNetRscChain *chain; VirtioNetRscUnit unit; - chain = (VirtioNetRscChain *)opq; + chain = opq; hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len; if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip6_header) diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c index e54276dc1d..4a0c51a947 100644 --- a/hw/nvme/ctrl.c +++ b/hw/nvme/ctrl.c @@ -4028,14 +4028,14 @@ static uint16_t nvme_zone_mgmt_recv(NvmeCtrl *n, NvmeRequest *req) nr_zones++; } } - header = (NvmeZoneReportHeader *)buf; + header = buf; header->nr_zones = cpu_to_le64(nr_zones); buf_p = buf + sizeof(NvmeZoneReportHeader); for (; zone_idx < ns->num_zones && max_zones > 0; zone_idx++) { zone = &ns->zone_array[zone_idx]; if (nvme_zone_matches_filter(zrasf, zone)) { - z = (NvmeZoneDescr *)buf_p; + z = buf_p; buf_p += sizeof(NvmeZoneDescr); z->zt = zone->d.zt; diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c index 6edf5ea3e9..a00881bc64 100644 --- a/hw/nvram/fw_cfg.c +++ b/hw/nvram/fw_cfg.c @@ -201,7 +201,7 @@ static void fw_cfg_bootsplash(FWCfgState *s) } /* insert splash file if user configurated */ - if (current_machine->boot_config.has_splash) { + if (current_machine->boot_config.splash) { const char *boot_splash_filename = current_machine->boot_config.splash; filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, boot_splash_filename); if (filename == NULL) { diff --git a/hw/pci-bridge/cxl_root_port.c b/hw/pci-bridge/cxl_root_port.c index fb213fa06e..6664783974 100644 --- a/hw/pci-bridge/cxl_root_port.c +++ b/hw/pci-bridge/cxl_root_port.c @@ -138,12 +138,14 @@ static void cxl_rp_realize(DeviceState *dev, Error **errp) component_bar); } -static void cxl_rp_reset(DeviceState *dev) +static void cxl_rp_reset_hold(Object *obj) { - PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); - CXLRootPort *crp = CXL_ROOT_PORT(dev); + PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(obj); + CXLRootPort *crp = CXL_ROOT_PORT(obj); - rpc->parent_reset(dev); + if (rpc->parent_phases.hold) { + rpc->parent_phases.hold(obj); + } latch_registers(crp); } @@ -199,6 +201,7 @@ static void cxl_root_port_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PCIDeviceClass *k = PCI_DEVICE_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); PCIERootPortClass *rpc = PCIE_ROOT_PORT_CLASS(oc); k->vendor_id = PCI_VENDOR_ID_INTEL; @@ -209,7 +212,8 @@ static void cxl_root_port_class_init(ObjectClass *oc, void *data) k->config_write = cxl_rp_write_config; device_class_set_parent_realize(dc, cxl_rp_realize, &rpc->parent_realize); - device_class_set_parent_reset(dc, cxl_rp_reset, &rpc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, cxl_rp_reset_hold, NULL, + &rpc->parent_phases); rpc->aer_offset = GEN_PCIE_ROOT_PORT_AER_OFFSET; rpc->acs_offset = GEN_PCIE_ROOT_PORT_ACS_OFFSET; diff --git a/hw/pci-bridge/pcie_root_port.c b/hw/pci-bridge/pcie_root_port.c index 460e48269d..36bc0bafa7 100644 --- a/hw/pci-bridge/pcie_root_port.c +++ b/hw/pci-bridge/pcie_root_port.c @@ -43,9 +43,10 @@ static void rp_write_config(PCIDevice *d, uint32_t address, pcie_aer_root_write_config(d, address, val, len, root_cmd); } -static void rp_reset(DeviceState *qdev) +static void rp_reset_hold(Object *obj) { - PCIDevice *d = PCI_DEVICE(qdev); + PCIDevice *d = PCI_DEVICE(obj); + DeviceState *qdev = DEVICE(obj); rp_aer_vector_update(d); pcie_cap_root_reset(d); @@ -171,13 +172,14 @@ static void rp_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); k->is_bridge = true; k->config_write = rp_write_config; k->realize = rp_realize; k->exit = rp_exit; set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); - dc->reset = rp_reset; + rc->phases.hold = rp_reset_hold; device_class_set_props(dc, rp_props); } diff --git a/hw/pci-host/pnv_phb.c b/hw/pci-host/pnv_phb.c index 0b26b43736..c62b08538a 100644 --- a/hw/pci-host/pnv_phb.c +++ b/hw/pci-host/pnv_phb.c @@ -199,14 +199,16 @@ static void pnv_phb_class_init(ObjectClass *klass, void *data) dc->user_creatable = true; } -static void pnv_phb_root_port_reset(DeviceState *dev) +static void pnv_phb_root_port_reset_hold(Object *obj) { - PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); - PnvPHBRootPort *phb_rp = PNV_PHB_ROOT_PORT(dev); - PCIDevice *d = PCI_DEVICE(dev); + PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(obj); + PnvPHBRootPort *phb_rp = PNV_PHB_ROOT_PORT(obj); + PCIDevice *d = PCI_DEVICE(obj); uint8_t *conf = d->config; - rpc->parent_reset(dev); + if (rpc->parent_phases.hold) { + rpc->parent_phases.hold(obj); + } if (phb_rp->version == 3) { return; @@ -300,6 +302,7 @@ static Property pnv_phb_root_port_properties[] = { static void pnv_phb_root_port_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); PCIERootPortClass *rpc = PCIE_ROOT_PORT_CLASS(klass); @@ -308,9 +311,8 @@ static void pnv_phb_root_port_class_init(ObjectClass *klass, void *data) device_class_set_props(dc, pnv_phb_root_port_properties); device_class_set_parent_realize(dc, pnv_phb_root_port_realize, &rpc->parent_realize); - device_class_set_parent_reset(dc, pnv_phb_root_port_reset, - &rpc->parent_reset); - dc->reset = &pnv_phb_root_port_reset; + resettable_class_set_parent_phases(rc, NULL, pnv_phb_root_port_reset_hold, + NULL, &rpc->parent_phases); dc->user_creatable = true; k->vendor_id = PCI_VENDOR_ID_IBM; diff --git a/hw/pci-host/pnv_phb3_msi.c b/hw/pci-host/pnv_phb3_msi.c index 2f4112907b..41e63b066f 100644 --- a/hw/pci-host/pnv_phb3_msi.c +++ b/hw/pci-host/pnv_phb3_msi.c @@ -228,22 +228,19 @@ static void phb3_msi_resend(ICSState *ics) } } -static void phb3_msi_reset(DeviceState *dev) +static void phb3_msi_reset_hold(Object *obj) { - Phb3MsiState *msi = PHB3_MSI(dev); - ICSStateClass *icsc = ICS_GET_CLASS(dev); + Phb3MsiState *msi = PHB3_MSI(obj); + ICSStateClass *icsc = ICS_GET_CLASS(obj); - icsc->parent_reset(dev); + if (icsc->parent_phases.hold) { + icsc->parent_phases.hold(obj); + } memset(msi->rba, 0, sizeof(msi->rba)); msi->rba_sum = 0; } -static void phb3_msi_reset_handler(void *dev) -{ - phb3_msi_reset(dev); -} - void pnv_phb3_msi_update_config(Phb3MsiState *msi, uint32_t base, uint32_t count) { @@ -272,8 +269,6 @@ static void phb3_msi_realize(DeviceState *dev, Error **errp) } msi->qirqs = qemu_allocate_irqs(phb3_msi_set_irq, msi, ics->nr_irqs); - - qemu_register_reset(phb3_msi_reset_handler, dev); } static void phb3_msi_instance_init(Object *obj) @@ -294,11 +289,12 @@ static void phb3_msi_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ICSStateClass *isc = ICS_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); device_class_set_parent_realize(dc, phb3_msi_realize, &isc->parent_realize); - device_class_set_parent_reset(dc, phb3_msi_reset, - &isc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, phb3_msi_reset_hold, NULL, + &isc->parent_phases); isc->reject = phb3_msi_reject; isc->resend = phb3_msi_resend; diff --git a/hw/pci/msi.c b/hw/pci/msi.c index 058d1d1ef1..1cadf150bc 100644 --- a/hw/pci/msi.c +++ b/hw/pci/msi.c @@ -317,7 +317,6 @@ bool msi_is_masked(const PCIDevice *dev, unsigned int vector) void msi_set_mask(PCIDevice *dev, int vector, bool mask, Error **errp) { - ERRP_GUARD(); uint16_t flags = pci_get_word(dev->config + msi_flags_off(dev)); bool msi64bit = flags & PCI_MSI_FLAGS_64BIT; uint32_t irq_state, vector_mask, pending; diff --git a/hw/pci/pci.c b/hw/pci/pci.c index 2f450f6a72..c61348dca0 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -378,14 +378,14 @@ static void pci_do_device_reset(PCIDevice *dev) */ void pci_device_reset(PCIDevice *dev) { - qdev_reset_all(&dev->qdev); + device_cold_reset(&dev->qdev); pci_do_device_reset(dev); } /* * Trigger pci bus reset under a given bus. - * Called via qbus_reset_all on RST# assert, after the devices - * have been reset qdev_reset_all-ed already. + * Called via bus_cold_reset on RST# assert, after the devices + * have been reset device_cold_reset-ed already. */ static void pcibus_reset(BusState *qbus) { @@ -1879,7 +1879,6 @@ static PciDeviceInfo *qmp_query_pci_device(PCIDevice *dev, PCIBus *bus, info->class_info->q_class = class; desc = get_class_desc(class); if (desc->desc) { - info->class_info->has_desc = true; info->class_info->desc = g_strdup(desc->desc); } @@ -1897,7 +1896,6 @@ static PciDeviceInfo *qmp_query_pci_device(PCIDevice *dev, PCIBus *bus, type = dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; if (type == PCI_HEADER_TYPE_BRIDGE) { - info->has_pci_bridge = true; info->pci_bridge = qmp_query_pci_bridge(dev, bus, bus_num); } else if (type == PCI_HEADER_TYPE_NORMAL) { info->id->has_subsystem = info->id->has_subsystem_vendor = true; diff --git a/hw/pci/pci_bridge.c b/hw/pci/pci_bridge.c index da34c8ebcd..b2b180edd6 100644 --- a/hw/pci/pci_bridge.c +++ b/hw/pci/pci_bridge.c @@ -275,7 +275,7 @@ void pci_bridge_write_config(PCIDevice *d, newctl = pci_get_word(d->config + PCI_BRIDGE_CONTROL); if (~oldctl & newctl & PCI_BRIDGE_CTL_BUS_RESET) { /* Trigger hot reset on 0->1 transition. */ - qbus_reset_all(BUS(&s->sec_bus)); + bus_cold_reset(BUS(&s->sec_bus)); } } diff --git a/hw/ppc/ppc4xx_sdram.c b/hw/ppc/ppc4xx_sdram.c index 8d7137faf3..a24c80b1d2 100644 --- a/hw/ppc/ppc4xx_sdram.c +++ b/hw/ppc/ppc4xx_sdram.c @@ -192,17 +192,13 @@ static inline hwaddr sdram_ddr_base(uint32_t bcr) static hwaddr sdram_ddr_size(uint32_t bcr) { - hwaddr size; - int sh; + int sh = (bcr >> 17) & 0x7; - sh = (bcr >> 17) & 0x7; if (sh == 7) { - size = -1; - } else { - size = (4 * MiB) << sh; + return -1; } - return size; + return (4 * MiB) << sh; } static uint32_t sdram_ddr_dcr_read(void *opaque, int dcrn) @@ -520,13 +516,10 @@ static inline hwaddr sdram_ddr2_base(uint32_t bcr) static hwaddr sdram_ddr2_size(uint32_t bcr) { - hwaddr size; int sh; sh = 1024 - ((bcr >> 6) & 0x3ff); - size = 8 * MiB * sh; - - return size; + return 8 * MiB * sh; } static uint32_t sdram_ddr2_dcr_read(void *opaque, int dcrn) diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 66b414d2e9..dc850032ae 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -3728,7 +3728,7 @@ void spapr_memory_unplug_rollback(SpaprMachineState *spapr, DeviceState *dev) qapi_event_send_mem_unplug_error(dev->id ? : "", qapi_error); - qapi_event_send_device_unplug_guest_error(!!dev->id, dev->id, + qapi_event_send_device_unplug_guest_error(dev->id, dev->canonical_path); } diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c index 76bc5d42a0..4923435a8b 100644 --- a/hw/ppc/spapr_drc.c +++ b/hw/ppc/spapr_drc.c @@ -175,8 +175,7 @@ static uint32_t drc_unisolate_logical(SpaprDrc *drc) "for device %s", drc->dev->id); } - qapi_event_send_device_unplug_guest_error(!!drc->dev->id, - drc->dev->id, + qapi_event_send_device_unplug_guest_error(drc->dev->id, drc->dev->canonical_path); } diff --git a/hw/rdma/vmw/pvrdma_cmd.c b/hw/rdma/vmw/pvrdma_cmd.c index da7ddfa548..1eca6328c9 100644 --- a/hw/rdma/vmw/pvrdma_cmd.c +++ b/hw/rdma/vmw/pvrdma_cmd.c @@ -182,13 +182,10 @@ static int create_pd(PVRDMADev *dev, union pvrdma_cmd_req *req, { struct pvrdma_cmd_create_pd *cmd = &req->create_pd; struct pvrdma_cmd_create_pd_resp *resp = &rsp->create_pd_resp; - int rc; memset(resp, 0, sizeof(*resp)); - rc = rdma_rm_alloc_pd(&dev->rdma_dev_res, &dev->backend_dev, - &resp->pd_handle, cmd->ctx_handle); - - return rc; + return rdma_rm_alloc_pd(&dev->rdma_dev_res, &dev->backend_dev, + &resp->pd_handle, cmd->ctx_handle); } static int destroy_pd(PVRDMADev *dev, union pvrdma_cmd_req *req, @@ -269,8 +266,7 @@ static int create_cq_ring(PCIDevice *pci_dev , PvrdmaRing **ring, r = g_malloc(sizeof(*r)); *ring = r; - r->ring_state = (PvrdmaRingState *) - rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE); + r->ring_state = rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE); if (!r->ring_state) { rdma_error_report("Failed to map to CQ ring state"); @@ -405,8 +401,7 @@ static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma, *rings = sr; /* Create send ring */ - sr->ring_state = (PvrdmaRingState *) - rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE); + sr->ring_state = rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE); if (!sr->ring_state) { rdma_error_report("Failed to map to QP ring state"); goto out_free_sr_mem; @@ -506,20 +501,17 @@ static int modify_qp(PVRDMADev *dev, union pvrdma_cmd_req *req, union pvrdma_cmd_resp *rsp) { struct pvrdma_cmd_modify_qp *cmd = &req->modify_qp; - int rc; /* No need to verify sgid_index since it is u8 */ - rc = rdma_rm_modify_qp(&dev->rdma_dev_res, &dev->backend_dev, - cmd->qp_handle, cmd->attr_mask, - cmd->attrs.ah_attr.grh.sgid_index, - (union ibv_gid *)&cmd->attrs.ah_attr.grh.dgid, - cmd->attrs.dest_qp_num, - (enum ibv_qp_state)cmd->attrs.qp_state, - cmd->attrs.qkey, cmd->attrs.rq_psn, - cmd->attrs.sq_psn); - - return rc; + return rdma_rm_modify_qp(&dev->rdma_dev_res, &dev->backend_dev, + cmd->qp_handle, cmd->attr_mask, + cmd->attrs.ah_attr.grh.sgid_index, + (union ibv_gid *)&cmd->attrs.ah_attr.grh.dgid, + cmd->attrs.dest_qp_num, + (enum ibv_qp_state)cmd->attrs.qp_state, + cmd->attrs.qkey, cmd->attrs.rq_psn, + cmd->attrs.sq_psn); } static int query_qp(PVRDMADev *dev, union pvrdma_cmd_req *req, @@ -528,15 +520,14 @@ static int query_qp(PVRDMADev *dev, union pvrdma_cmd_req *req, struct pvrdma_cmd_query_qp *cmd = &req->query_qp; struct pvrdma_cmd_query_qp_resp *resp = &rsp->query_qp_resp; struct ibv_qp_init_attr init_attr; - int rc; memset(resp, 0, sizeof(*resp)); - rc = rdma_rm_query_qp(&dev->rdma_dev_res, &dev->backend_dev, cmd->qp_handle, - (struct ibv_qp_attr *)&resp->attrs, cmd->attr_mask, - &init_attr); - - return rc; + return rdma_rm_query_qp(&dev->rdma_dev_res, &dev->backend_dev, + cmd->qp_handle, + (struct ibv_qp_attr *)&resp->attrs, + cmd->attr_mask, + &init_attr); } static int destroy_qp(PVRDMADev *dev, union pvrdma_cmd_req *req, @@ -562,34 +553,27 @@ static int create_bind(PVRDMADev *dev, union pvrdma_cmd_req *req, union pvrdma_cmd_resp *rsp) { struct pvrdma_cmd_create_bind *cmd = &req->create_bind; - int rc; union ibv_gid *gid = (union ibv_gid *)&cmd->new_gid; if (cmd->index >= MAX_PORT_GIDS) { return -EINVAL; } - rc = rdma_rm_add_gid(&dev->rdma_dev_res, &dev->backend_dev, - dev->backend_eth_device_name, gid, cmd->index); - - return rc; + return rdma_rm_add_gid(&dev->rdma_dev_res, &dev->backend_dev, + dev->backend_eth_device_name, gid, cmd->index); } static int destroy_bind(PVRDMADev *dev, union pvrdma_cmd_req *req, union pvrdma_cmd_resp *rsp) { - int rc; - struct pvrdma_cmd_destroy_bind *cmd = &req->destroy_bind; if (cmd->index >= MAX_PORT_GIDS) { return -EINVAL; } - rc = rdma_rm_del_gid(&dev->rdma_dev_res, &dev->backend_dev, - dev->backend_eth_device_name, cmd->index); - - return rc; + return rdma_rm_del_gid(&dev->rdma_dev_res, &dev->backend_dev, + dev->backend_eth_device_name, cmd->index); } static int create_uc(PVRDMADev *dev, union pvrdma_cmd_req *req, @@ -597,12 +581,9 @@ static int create_uc(PVRDMADev *dev, union pvrdma_cmd_req *req, { struct pvrdma_cmd_create_uc *cmd = &req->create_uc; struct pvrdma_cmd_create_uc_resp *resp = &rsp->create_uc_resp; - int rc; memset(resp, 0, sizeof(*resp)); - rc = rdma_rm_alloc_uc(&dev->rdma_dev_res, cmd->pfn, &resp->ctx_handle); - - return rc; + return rdma_rm_alloc_uc(&dev->rdma_dev_res, cmd->pfn, &resp->ctx_handle); } static int destroy_uc(PVRDMADev *dev, union pvrdma_cmd_req *req, @@ -646,8 +627,7 @@ static int create_srq_ring(PCIDevice *pci_dev, PvrdmaRing **ring, r = g_malloc(sizeof(*r)); *ring = r; - r->ring_state = (PvrdmaRingState *) - rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE); + r->ring_state = rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE); if (!r->ring_state) { rdma_error_report("Failed to map tp SRQ ring state"); goto out_free_ring_mem; diff --git a/hw/rdma/vmw/pvrdma_qp_ops.c b/hw/rdma/vmw/pvrdma_qp_ops.c index bd7cbf2bdf..c30c8344f6 100644 --- a/hw/rdma/vmw/pvrdma_qp_ops.c +++ b/hw/rdma/vmw/pvrdma_qp_ops.c @@ -149,7 +149,7 @@ void pvrdma_qp_send(PVRDMADev *dev, uint32_t qp_handle) ring = (PvrdmaRing *)qp->opaque; - wqe = (struct PvrdmaSqWqe *)pvrdma_ring_next_elem_read(ring); + wqe = pvrdma_ring_next_elem_read(ring); while (wqe) { CompHandlerCtx *comp_ctx; @@ -212,7 +212,7 @@ void pvrdma_qp_recv(PVRDMADev *dev, uint32_t qp_handle) ring = &((PvrdmaRing *)qp->opaque)[1]; - wqe = (struct PvrdmaRqWqe *)pvrdma_ring_next_elem_read(ring); + wqe = pvrdma_ring_next_elem_read(ring); while (wqe) { CompHandlerCtx *comp_ctx; @@ -254,7 +254,7 @@ void pvrdma_srq_recv(PVRDMADev *dev, uint32_t srq_handle) ring = (PvrdmaRing *)srq->opaque; - wqe = (struct PvrdmaRqWqe *)pvrdma_ring_next_elem_read(ring); + wqe = pvrdma_ring_next_elem_read(ring); while (wqe) { CompHandlerCtx *comp_ctx; diff --git a/hw/remote/vfio-user-obj.c b/hw/remote/vfio-user-obj.c index 4e36bb8bcf..fe1fdfb5f7 100644 --- a/hw/remote/vfio-user-obj.c +++ b/hw/remote/vfio-user-obj.c @@ -678,7 +678,7 @@ static int vfu_object_device_reset(vfu_ctx_t *vfu_ctx, vfu_reset_type_t type) return 0; } - qdev_reset_all(DEVICE(o->pci_dev)); + device_cold_reset(DEVICE(o->pci_dev)); return 0; } @@ -719,7 +719,6 @@ static void vfu_object_machine_done(Notifier *notifier, void *data) */ static void vfu_object_init_ctx(VfuObject *o, Error **errp) { - ERRP_GUARD(); DeviceState *dev = NULL; vfu_pci_type_t pci_type = VFU_PCI_TYPE_CONVENTIONAL; int ret; diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c index 977e7daa15..02751f3597 100644 --- a/hw/s390x/s390-pci-bus.c +++ b/hw/s390x/s390-pci-bus.c @@ -24,6 +24,8 @@ #include "hw/pci/msi.h" #include "qemu/error-report.h" #include "qemu/module.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" #ifndef DEBUG_S390PCI_BUS #define DEBUG_S390PCI_BUS 0 @@ -150,10 +152,30 @@ out: psccb->header.response_code = cpu_to_be16(rc); } +static void s390_pci_shutdown_notifier(Notifier *n, void *opaque) +{ + S390PCIBusDevice *pbdev = container_of(n, S390PCIBusDevice, + shutdown_notifier); + + pci_device_reset(pbdev->pdev); +} + +static void s390_pci_reset_cb(void *opaque) +{ + S390PCIBusDevice *pbdev = opaque; + + pci_device_reset(pbdev->pdev); +} + static void s390_pci_perform_unplug(S390PCIBusDevice *pbdev) { HotplugHandler *hotplug_ctrl; + if (pbdev->pft == ZPCI_PFT_ISM) { + notifier_remove(&pbdev->shutdown_notifier); + qemu_unregister_reset(s390_pci_reset_cb, pbdev); + } + /* Unplug the PCI device */ if (pbdev->pdev) { DeviceState *pdev = DEVICE(pbdev->pdev); @@ -1111,6 +1133,12 @@ static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev, pbdev->fh |= FH_SHM_VFIO; pbdev->forwarding_assist = false; } + /* Register shutdown notifier and reset callback for ISM devices */ + if (pbdev->pft == ZPCI_PFT_ISM) { + pbdev->shutdown_notifier.notify = s390_pci_shutdown_notifier; + qemu_register_shutdown_notifier(&pbdev->shutdown_notifier); + qemu_register_reset(s390_pci_reset_cb, pbdev); + } } else { pbdev->fh |= FH_SHM_EMUL; /* Always intercept emulated devices */ diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c index 7cc4bcf850..9abe95130c 100644 --- a/hw/s390x/s390-pci-inst.c +++ b/hw/s390x/s390-pci-inst.c @@ -272,7 +272,7 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra) stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); goto out; } - device_legacy_reset(DEVICE(pbdev)); + device_cold_reset(DEVICE(pbdev)); pbdev->fh &= ~FH_MASK_ENABLE; pbdev->state = ZPCI_FS_DISABLED; stl_p(&ressetpci->fh, pbdev->fh); @@ -640,6 +640,8 @@ static uint32_t s390_pci_update_iotlb(S390PCIIOMMU *iommu, } g_hash_table_remove(iommu->iotlb, &entry->iova); inc_dma_avail(iommu); + /* Don't notify the iommu yet, maybe we can bundle contiguous unmaps */ + goto out; } else { if (cache) { if (cache->perm == entry->perm && @@ -663,15 +665,44 @@ static uint32_t s390_pci_update_iotlb(S390PCIIOMMU *iommu, dec_dma_avail(iommu); } + /* + * All associated iotlb entries have already been cleared, trigger the + * unmaps. + */ memory_region_notify_iommu(&iommu->iommu_mr, 0, event); out: return iommu->dma_limit ? iommu->dma_limit->avail : 1; } +static void s390_pci_batch_unmap(S390PCIIOMMU *iommu, uint64_t iova, + uint64_t len) +{ + uint64_t remain = len, start = iova, end = start + len - 1, mask, size; + IOMMUTLBEvent event = { + .type = IOMMU_NOTIFIER_UNMAP, + .entry = { + .target_as = &address_space_memory, + .translated_addr = 0, + .perm = IOMMU_NONE, + }, + }; + + while (remain >= TARGET_PAGE_SIZE) { + mask = dma_aligned_pow2_mask(start, end, 64); + size = mask + 1; + event.entry.iova = start; + event.entry.addr_mask = mask; + memory_region_notify_iommu(&iommu->iommu_mr, 0, event); + start += size; + remain -= size; + } +} + int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra) { CPUS390XState *env = &cpu->env; + uint64_t iova, coalesce = 0; uint32_t fh; uint16_t error = 0; S390PCIBusDevice *pbdev; @@ -742,6 +773,21 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra) break; } + /* + * If this is an unmap of a PTE, let's try to coalesce multiple unmaps + * into as few notifier events as possible. + */ + if (entry.perm == IOMMU_NONE && entry.len == TARGET_PAGE_SIZE) { + if (coalesce == 0) { + iova = entry.iova; + } + coalesce += entry.len; + } else if (coalesce > 0) { + /* Unleash the coalesced unmap before processing a new map */ + s390_pci_batch_unmap(iommu, iova, coalesce); + coalesce = 0; + } + start += entry.len; while (entry.iova < start && entry.iova < end) { if (dma_avail > 0 || entry.perm == IOMMU_NONE) { @@ -759,6 +805,11 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra) } } } + if (coalesce) { + /* Unleash the coalesced unmap before finishing rpcit */ + s390_pci_batch_unmap(iommu, iova, coalesce); + coalesce = 0; + } if (again && dma_avail > 0) goto retry; err: diff --git a/hw/s390x/s390-pci-vfio.c b/hw/s390x/s390-pci-vfio.c index 5f0adb0b4a..f51190d466 100644 --- a/hw/s390x/s390-pci-vfio.c +++ b/hw/s390x/s390-pci-vfio.c @@ -84,6 +84,7 @@ S390PCIDMACount *s390_pci_start_dma_count(S390pciState *s, cnt->users = 1; cnt->avail = avail; QTAILQ_INSERT_TAIL(&s->zpci_dma_limit, cnt, link); + pbdev->iommu->max_dma_limit = avail; return cnt; } @@ -103,6 +104,7 @@ static void s390_pci_read_base(S390PCIBusDevice *pbdev, struct vfio_info_cap_header *hdr; struct vfio_device_info_cap_zpci_base *cap; VFIOPCIDevice *vpci = container_of(pbdev->pdev, VFIOPCIDevice, pdev); + uint64_t vfio_size; hdr = vfio_get_device_info_cap(info, VFIO_DEVICE_INFO_CAP_ZPCI_BASE); @@ -122,6 +124,17 @@ static void s390_pci_read_base(S390PCIBusDevice *pbdev, /* The following values remain 0 until we support other FMB formats */ pbdev->zpci_fn.fmbl = 0; pbdev->zpci_fn.pft = 0; + /* Store function type separately for type-specific behavior */ + pbdev->pft = cap->pft; + + /* + * If appropriate, reduce the size of the supported DMA aperture reported + * to the guest based upon the vfio DMA limit. + */ + vfio_size = pbdev->iommu->max_dma_limit << TARGET_PAGE_BITS; + if (vfio_size < (cap->end_dma - cap->start_dma + 1)) { + pbdev->zpci_fn.edma = cap->start_dma + vfio_size - 1; + } } static bool get_host_fh(S390PCIBusDevice *pbdev, struct vfio_device_info *info, diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index 2e64ffab45..fab79045dd 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -118,7 +118,7 @@ static void subsystem_reset(void) for (i = 0; i < ARRAY_SIZE(reset_dev_types); i++) { dev = DEVICE(object_resolve_path_type("", reset_dev_types[i], NULL)); if (dev) { - qdev_reset_all(dev); + device_cold_reset(dev); } } } diff --git a/hw/usb/dev-uas.c b/hw/usb/dev-uas.c index 5192b062d6..88f99c05d5 100644 --- a/hw/usb/dev-uas.c +++ b/hw/usb/dev-uas.c @@ -791,7 +791,7 @@ static void usb_uas_task(UASDevice *uas, uas_iu *iu) case UAS_TMF_LOGICAL_UNIT_RESET: trace_usb_uas_tmf_logical_unit_reset(uas->dev.addr, tag, lun); - qdev_reset_all(&dev->qdev); + device_cold_reset(&dev->qdev); usb_uas_queue_response(uas, tag, UAS_RC_TMF_COMPLETE); break; diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index 8f635844af..b8aaa99ab5 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -2533,11 +2533,7 @@ vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id) static bool vhost_user_mem_section_filter(struct vhost_dev *dev, MemoryRegionSection *section) { - bool result; - - result = memory_region_get_fd(section->mr) >= 0; - - return result; + return memory_region_get_fd(section->mr) >= 0; } static int vhost_user_get_inflight_fd(struct vhost_dev *dev, diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 7468e44b87..bc1c79b325 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -963,6 +963,7 @@ static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev, struct vhost_vring_addr *addr, Error **errp) { + ERRP_GUARD(); DMAMap device_region, driver_region; struct vhost_vring_addr svq_addr; struct vhost_vdpa *v = dev->opaque; @@ -971,7 +972,6 @@ static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev, size_t avail_offset; bool ok; - ERRP_GUARD(); vhost_svq_get_vring_addr(svq, &svq_addr); driver_region = (DMAMap) { diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c index 73ac5eb675..746f07c4d2 100644 --- a/hw/virtio/virtio-balloon.c +++ b/hw/virtio/virtio-balloon.c @@ -241,36 +241,34 @@ static void balloon_stats_poll_cb(void *opaque) static void balloon_stats_get_all(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Error *err = NULL; VirtIOBalloon *s = VIRTIO_BALLOON(obj); + bool ok = false; int i; - if (!visit_start_struct(v, name, NULL, 0, &err)) { - goto out; + if (!visit_start_struct(v, name, NULL, 0, errp)) { + return; } - if (!visit_type_int(v, "last-update", &s->stats_last_update, &err)) { + if (!visit_type_int(v, "last-update", &s->stats_last_update, errp)) { goto out_end; } - if (!visit_start_struct(v, "stats", NULL, 0, &err)) { + if (!visit_start_struct(v, "stats", NULL, 0, errp)) { goto out_end; } for (i = 0; i < VIRTIO_BALLOON_S_NR; i++) { - if (!visit_type_uint64(v, balloon_stat_names[i], &s->stats[i], &err)) { + if (!visit_type_uint64(v, balloon_stat_names[i], &s->stats[i], errp)) { goto out_nested; } } - visit_check_struct(v, &err); + ok = visit_check_struct(v, errp); out_nested: visit_end_struct(v, NULL); - if (!err) { - visit_check_struct(v, &err); + if (ok) { + visit_check_struct(v, errp); } out_end: visit_end_struct(v, NULL); -out: - error_propagate(errp, err); } static void balloon_stats_get_poll_interval(Object *obj, Visitor *v, diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c index 62e07ec2e4..23c470977e 100644 --- a/hw/virtio/virtio-iommu.c +++ b/hw/virtio/virtio-iommu.c @@ -775,8 +775,7 @@ static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq) output_size = s->config.probe_size + sizeof(tail); buf = g_malloc0(output_size); - ptail = (struct virtio_iommu_req_tail *) - (buf + s->config.probe_size); + ptail = buf + s->config.probe_size; ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf); break; } diff --git a/hw/virtio/virtio-mem-pci.c b/hw/virtio/virtio-mem-pci.c index 5c5c1e3ae3..e8c338c5d9 100644 --- a/hw/virtio/virtio-mem-pci.c +++ b/hw/virtio/virtio-mem-pci.c @@ -65,7 +65,6 @@ static void virtio_mem_pci_fill_device_info(const MemoryDeviceState *md, DeviceState *dev = DEVICE(md); if (dev->id) { - vi->has_id = true; vi->id = g_strdup(dev->id); } @@ -90,8 +89,7 @@ static void virtio_mem_pci_size_change_notify(Notifier *notifier, void *data) char *qom_path = object_get_canonical_path(OBJECT(dev)); const uint64_t * const size_p = data; - qapi_event_send_memory_device_size_change(!!dev->id, dev->id, *size_p, - qom_path); + qapi_event_send_memory_device_size_change(dev->id, *size_p, qom_path); g_free(qom_path); } diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c index ed170def48..d96bde1fab 100644 --- a/hw/virtio/virtio-mem.c +++ b/hw/virtio/virtio-mem.c @@ -1094,12 +1094,9 @@ static void virtio_mem_set_requested_size(Object *obj, Visitor *v, Error **errp) { VirtIOMEM *vmem = VIRTIO_MEM(obj); - Error *err = NULL; uint64_t value; - visit_type_size(v, name, &value, &err); - if (err) { - error_propagate(errp, err); + if (!visit_type_size(v, name, &value, errp)) { return; } @@ -1159,7 +1156,6 @@ static void virtio_mem_set_block_size(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { VirtIOMEM *vmem = VIRTIO_MEM(obj); - Error *err = NULL; uint64_t value; if (DEVICE(obj)->realized) { @@ -1167,9 +1163,7 @@ static void virtio_mem_set_block_size(Object *obj, Visitor *v, const char *name, return; } - visit_type_size(v, name, &value, &err); - if (err) { - error_propagate(errp, err); + if (!visit_type_size(v, name, &value, errp)) { return; } diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index a1c9dfa7bb..7873083b86 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -2008,9 +2008,10 @@ static void virtio_pci_reset(DeviceState *qdev) } } -static void virtio_pci_bus_reset(DeviceState *qdev) +static void virtio_pci_bus_reset_hold(Object *obj) { - PCIDevice *dev = PCI_DEVICE(qdev); + PCIDevice *dev = PCI_DEVICE(obj); + DeviceState *qdev = DEVICE(obj); virtio_pci_reset(qdev); @@ -2071,6 +2072,7 @@ static void virtio_pci_class_init(ObjectClass *klass, void *data) DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); device_class_set_props(dc, virtio_pci_properties); k->realize = virtio_pci_realize; @@ -2080,7 +2082,7 @@ static void virtio_pci_class_init(ObjectClass *klass, void *data) k->class_id = PCI_CLASS_OTHERS; device_class_set_parent_realize(dc, virtio_pci_dc_realize, &vpciklass->parent_dc_realize); - dc->reset = virtio_pci_bus_reset; + rc->phases.hold = virtio_pci_bus_reset_hold; } static const TypeInfo virtio_pci_info = { diff --git a/hw/virtio/virtio-pmem-pci.c b/hw/virtio/virtio-pmem-pci.c index 7d9f4ec189..1b89ade9d1 100644 --- a/hw/virtio/virtio-pmem-pci.c +++ b/hw/virtio/virtio-pmem-pci.c @@ -70,7 +70,6 @@ static void virtio_pmem_pci_fill_device_info(const MemoryDeviceState *md, DeviceState *dev = DEVICE(md); if (dev->id) { - vi->has_id = true; vi->id = g_strdup(dev->id); } diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index eb6347ab5d..2118efbe72 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -4701,7 +4701,6 @@ VirtioStatus *qmp_x_query_virtio_status(const char *path, Error **errp) status->disable_legacy_check = vdev->disable_legacy_check; status->bus_name = g_strdup(vdev->bus_name); status->use_guest_notifier_mask = vdev->use_guest_notifier_mask; - status->has_vhost_dev = vdev->vhost_started; if (vdev->vhost_started) { VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); diff --git a/include/block/aio.h b/include/block/aio.h index d128558f1d..0f65a3cc9e 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -22,6 +22,7 @@ #include "qemu/event_notifier.h" #include "qemu/thread.h" #include "qemu/timer.h" +#include "block/graph-lock.h" typedef struct BlockAIOCB BlockAIOCB; typedef void BlockCompletionFunc(void *opaque, int ret); @@ -127,6 +128,14 @@ struct AioContext { /* Used by AioContext users to protect from multi-threaded access. */ QemuRecMutex lock; + /* + * Keep track of readers and writers of the block layer graph. + * This is essential to avoid performing additions and removal + * of nodes and edges from block graph while some + * other thread is traversing it. + */ + BdrvGraphRWlock *bdrv_graph; + /* The list of registered AIO handlers. Protected by ctx->list_lock. */ AioHandlerList aio_handlers; diff --git a/include/block/block-common.h b/include/block/block-common.h index 297704c1e9..4749c46a5e 100644 --- a/include/block/block-common.h +++ b/include/block/block-common.h @@ -29,20 +29,35 @@ #include "qemu/iov.h" #include "qemu/coroutine.h" #include "block/accounting.h" -#include "block/dirty-bitmap.h" -#include "block/blockjob.h" #include "qemu/hbitmap.h" #include "qemu/transactions.h" /* - * generated_co_wrapper + * co_wrapper{*}: Function specifiers used by block-coroutine-wrapper.py * - * Function specifier, which does nothing but mark functions to be + * Function specifiers, which do nothing but mark functions to be * generated by scripts/block-coroutine-wrapper.py * - * Read more in docs/devel/block-coroutine-wrapper.rst + * Usage: read docs/devel/block-coroutine-wrapper.rst + * + * There are 4 kind of specifiers: + * - co_wrapper functions can be called by only non-coroutine context, because + * they always generate a new coroutine. + * - co_wrapper_mixed functions can be called by both coroutine and + * non-coroutine context. + * - co_wrapper_bdrv_rdlock are co_wrapper functions but automatically take and + * release the graph rdlock when creating a new coroutine + * - co_wrapper_mixed_bdrv_rdlock are co_wrapper_mixed functions but + * automatically take and release the graph rdlock when creating a new + * coroutine. */ -#define generated_co_wrapper +#define co_wrapper +#define co_wrapper_mixed +#define co_wrapper_bdrv_rdlock +#define co_wrapper_mixed_bdrv_rdlock + +#include "block/dirty-bitmap.h" +#include "block/blockjob.h" /* block.c */ typedef struct BlockDriver BlockDriver; diff --git a/include/block/block-copy.h b/include/block/block-copy.h index ba0b425d78..8cea4f9b90 100644 --- a/include/block/block-copy.h +++ b/include/block/block-copy.h @@ -36,8 +36,9 @@ void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm); void block_copy_state_free(BlockCopyState *s); void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes); -int64_t block_copy_reset_unallocated(BlockCopyState *s, - int64_t offset, int64_t *count); +int64_t coroutine_fn block_copy_reset_unallocated(BlockCopyState *s, + int64_t offset, + int64_t *count); int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes, bool ignore_ratelimit, uint64_t timeout_ns, diff --git a/include/block/block-global-state.h b/include/block/block-global-state.h index c7bd4a2088..b0a3cfe6b8 100644 --- a/include/block/block-global-state.h +++ b/include/block/block-global-state.h @@ -55,9 +55,14 @@ BlockDriver *bdrv_find_protocol(const char *filename, bool allow_protocol_prefix, Error **errp); BlockDriver *bdrv_find_format(const char *format_name); -int bdrv_create(BlockDriver *drv, const char* filename, - QemuOpts *opts, Error **errp); -int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp); + +int coroutine_fn bdrv_co_create(BlockDriver *drv, const char *filename, + QemuOpts *opts, Error **errp); +int co_wrapper bdrv_create(BlockDriver *drv, const char *filename, + QemuOpts *opts, Error **errp); + +int coroutine_fn bdrv_co_create_file(const char *filename, QemuOpts *opts, + Error **errp); BlockDriverState *bdrv_new(void); int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, @@ -82,6 +87,9 @@ int bdrv_open_file_child(const char *filename, BlockDriverState *bdrv_open_blockdev_ref(BlockdevRef *ref, Error **errp); int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, Error **errp); +int bdrv_set_backing_hd_drained(BlockDriverState *bs, + BlockDriverState *backing_hd, + Error **errp); int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, const char *bdref_key, Error **errp); BlockDriverState *bdrv_open(const char *filename, const char *reference, @@ -144,6 +152,7 @@ int bdrv_inactivate_all(void); int bdrv_flush_all(void); void bdrv_close_all(void); void bdrv_drain_all_begin(void); +void bdrv_drain_all_begin_nopoll(void); void bdrv_drain_all_end(void); void bdrv_drain_all(void); diff --git a/include/block/block-io.h b/include/block/block-io.h index b099d7db45..2ed6214909 100644 --- a/include/block/block-io.h +++ b/include/block/block-io.h @@ -39,19 +39,24 @@ * to catch when they are accidentally called by the wrong API. */ -int generated_co_wrapper bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, - int64_t bytes, - BdrvRequestFlags flags); +int co_wrapper_mixed_bdrv_rdlock +bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, int64_t bytes, + BdrvRequestFlags flags); + int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags); -int generated_co_wrapper bdrv_pread(BdrvChild *child, int64_t offset, - int64_t bytes, void *buf, - BdrvRequestFlags flags); -int generated_co_wrapper bdrv_pwrite(BdrvChild *child, int64_t offset, - int64_t bytes, const void *buf, - BdrvRequestFlags flags); -int generated_co_wrapper bdrv_pwrite_sync(BdrvChild *child, int64_t offset, - int64_t bytes, const void *buf, - BdrvRequestFlags flags); + +int co_wrapper_mixed_bdrv_rdlock +bdrv_pread(BdrvChild *child, int64_t offset, int64_t bytes, void *buf, + BdrvRequestFlags flags); + +int co_wrapper_mixed_bdrv_rdlock +bdrv_pwrite(BdrvChild *child, int64_t offset,int64_t bytes, + const void *buf, BdrvRequestFlags flags); + +int co_wrapper_mixed_bdrv_rdlock +bdrv_pwrite_sync(BdrvChild *child, int64_t offset, int64_t bytes, + const void *buf, BdrvRequestFlags flags); + int coroutine_fn bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset, int64_t bytes, const void *buf, BdrvRequestFlags flags); @@ -94,14 +99,29 @@ bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs); int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file); + +int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs, + BlockDriverState *base, + int64_t offset, int64_t bytes, + int64_t *pnum, int64_t *map, + BlockDriverState **file); int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file); + +int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset, + int64_t bytes, int64_t *pnum); int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes, int64_t *pnum); + +int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top, + BlockDriverState *base, + bool include_base, int64_t offset, + int64_t bytes, int64_t *pnum); int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base, bool include_base, int64_t offset, int64_t bytes, int64_t *pnum); + int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, int64_t bytes); @@ -200,8 +220,14 @@ AioContext *child_of_bds_get_parent_aio_context(BdrvChild *c); void bdrv_io_plug(BlockDriverState *bs); void bdrv_io_unplug(BlockDriverState *bs); -bool bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name, - uint32_t granularity, Error **errp); +bool coroutine_fn bdrv_co_can_store_new_dirty_bitmap(BlockDriverState *bs, + const char *name, + uint32_t granularity, + Error **errp); +bool co_wrapper bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, + const char *name, + uint32_t granularity, + Error **errp); /** * @@ -237,21 +263,6 @@ int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset, int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags); -/** - * bdrv_drained_end_no_poll: - * - * Same as bdrv_drained_end(), but do not poll for the subgraph to - * actually become unquiesced. Therefore, no graph changes will occur - * with this function. - * - * *drained_end_counter is incremented for every background operation - * that is scheduled, and will be decremented for every operation once - * it settles. The caller must poll until it reaches 0. The counter - * should be accessed using atomic operations only. - */ -void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter); - - /* * "I/O or GS" API functions. These functions can run without * the BQL, but only in one specific iothread/main loop. @@ -281,47 +292,54 @@ void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter); void bdrv_drain(BlockDriverState *bs); -int generated_co_wrapper +int co_wrapper_mixed_bdrv_rdlock bdrv_truncate(BdrvChild *child, int64_t offset, bool exact, PreallocMode prealloc, BdrvRequestFlags flags, Error **errp); -int generated_co_wrapper bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, - BdrvCheckMode fix); +int co_wrapper_mixed_bdrv_rdlock +bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix); /* Invalidate any cached metadata used by image formats */ -int generated_co_wrapper bdrv_invalidate_cache(BlockDriverState *bs, - Error **errp); -int generated_co_wrapper bdrv_flush(BlockDriverState *bs); -int generated_co_wrapper bdrv_pdiscard(BdrvChild *child, int64_t offset, - int64_t bytes); -int generated_co_wrapper +int co_wrapper_mixed_bdrv_rdlock +bdrv_invalidate_cache(BlockDriverState *bs, Error **errp); + +int co_wrapper_mixed_bdrv_rdlock bdrv_flush(BlockDriverState *bs); + +int co_wrapper_mixed_bdrv_rdlock +bdrv_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes); + +int co_wrapper_mixed_bdrv_rdlock bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); -int generated_co_wrapper + +int co_wrapper_mixed_bdrv_rdlock bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); /** * bdrv_parent_drained_begin_single: * - * Begin a quiesced section for the parent of @c. If @poll is true, wait for - * any pending activity to cease. + * Begin a quiesced section for the parent of @c. + */ +void bdrv_parent_drained_begin_single(BdrvChild *c); + +/** + * bdrv_parent_drained_poll_single: + * + * Returns true if there is any pending activity to cease before @c can be + * called quiesced, false otherwise. */ -void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll); +bool bdrv_parent_drained_poll_single(BdrvChild *c); /** * bdrv_parent_drained_end_single: * * End a quiesced section for the parent of @c. - * - * This polls @bs's AioContext until all scheduled sub-drained_ends - * have settled, which may result in graph changes. */ void bdrv_parent_drained_end_single(BdrvChild *c); /** * bdrv_drain_poll: * - * Poll for pending requests in @bs, its parents (except for @ignore_parent), - * and if @recursive is true its children as well (used for subtree drain). + * Poll for pending requests in @bs and its parents (except for @ignore_parent). * * If @ignore_bds_parents is true, parents that are BlockDriverStates must * ignore the drain request because they will be drained separately (used for @@ -329,8 +347,8 @@ void bdrv_parent_drained_end_single(BdrvChild *c); * * This is part of bdrv_drained_begin. */ -bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, - BdrvChild *ignore_parent, bool ignore_bds_parents); +bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent, + bool ignore_bds_parents); /** * bdrv_drained_begin: @@ -348,31 +366,13 @@ void bdrv_drained_begin(BlockDriverState *bs); * Quiesces a BDS like bdrv_drained_begin(), but does not wait for already * running requests to complete. */ -void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, - BdrvChild *parent, bool ignore_bds_parents); - -/** - * Like bdrv_drained_begin, but recursively begins a quiesced section for - * exclusive access to all child nodes as well. - */ -void bdrv_subtree_drained_begin(BlockDriverState *bs); +void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent); /** * bdrv_drained_end: * * End a quiescent section started by bdrv_drained_begin(). - * - * This polls @bs's AioContext until all scheduled sub-drained_ends - * have settled. On one hand, that may result in graph changes. On - * the other, this requires that the caller either runs in the main - * loop; or that all involved nodes (@bs and all of its parents) are - * in the caller's AioContext. */ void bdrv_drained_end(BlockDriverState *bs); -/** - * End a quiescent section started by bdrv_subtree_drained_begin(). - */ -void bdrv_subtree_drained_end(BlockDriverState *bs); - #endif /* BLOCK_IO_H */ diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h index 31ae91e56e..c34c525fa6 100644 --- a/include/block/block_int-common.h +++ b/include/block/block_int-common.h @@ -641,8 +641,8 @@ struct BlockDriver { /* * Invalidate any cached meta-data. */ - void coroutine_fn (*bdrv_co_invalidate_cache)(BlockDriverState *bs, - Error **errp); + void coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_invalidate_cache)( + BlockDriverState *bs, Error **errp); /* * Flushes all data for all layers by calling bdrv_co_flush for underlying @@ -701,12 +701,11 @@ struct BlockDriver { Error **errp); BlockStatsSpecific *(*bdrv_get_specific_stats)(BlockDriverState *bs); - int coroutine_fn (*bdrv_save_vmstate)(BlockDriverState *bs, - QEMUIOVector *qiov, - int64_t pos); - int coroutine_fn (*bdrv_load_vmstate)(BlockDriverState *bs, - QEMUIOVector *qiov, - int64_t pos); + int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_save_vmstate)( + BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); + + int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_load_vmstate)( + BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); /* removable device specific */ bool (*bdrv_is_inserted)(BlockDriverState *bs); @@ -724,9 +723,8 @@ struct BlockDriver { * Returns 0 for completed check, -errno for internal errors. * The check results are stored in result. */ - int coroutine_fn (*bdrv_co_check)(BlockDriverState *bs, - BdrvCheckResult *result, - BdrvCheckMode fix); + int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_check)( + BlockDriverState *bs, BdrvCheckResult *result, BdrvCheckMode fix); void (*bdrv_debug_event)(BlockDriverState *bs, BlkdebugEvent event); @@ -735,17 +733,19 @@ struct BlockDriver { void (*bdrv_io_unplug)(BlockDriverState *bs); /** - * bdrv_co_drain_begin is called if implemented in the beginning of a + * bdrv_drain_begin is called if implemented in the beginning of a * drain operation to drain and stop any internal sources of requests in * the driver. - * bdrv_co_drain_end is called if implemented at the end of the drain. + * bdrv_drain_end is called if implemented at the end of the drain. * * They should be used by the driver to e.g. manage scheduled I/O * requests, or toggle an internal state. After the end of the drain new * requests will continue normally. + * + * Implementations of both functions must not call aio_poll(). */ - void coroutine_fn (*bdrv_co_drain_begin)(BlockDriverState *bs); - void coroutine_fn (*bdrv_co_drain_end)(BlockDriverState *bs); + void (*bdrv_drain_begin)(BlockDriverState *bs); + void (*bdrv_drain_end)(BlockDriverState *bs); bool (*bdrv_supports_persistent_dirty_bitmap)(BlockDriverState *bs); bool coroutine_fn (*bdrv_co_can_store_new_dirty_bitmap)( @@ -896,8 +896,8 @@ struct BdrvChildClass { void (*activate)(BdrvChild *child, Error **errp); int (*inactivate)(BdrvChild *child); - void (*attach)(BdrvChild *child); - void (*detach)(BdrvChild *child); + void GRAPH_WRLOCK_PTR (*attach)(BdrvChild *child); + void GRAPH_WRLOCK_PTR (*detach)(BdrvChild *child); /* * Notifies the parent that the filename of its child has changed (e.g. @@ -937,15 +937,11 @@ struct BdrvChildClass { * These functions must not change the graph (and therefore also must not * call aio_poll(), which could change the graph indirectly). * - * If drained_end() schedules background operations, it must atomically - * increment *drained_end_counter for each such operation and atomically - * decrement it once the operation has settled. - * * Note that this can be nested. If drained_begin() was called twice, new * I/O is allowed only after drained_end() was called twice, too. */ void (*drained_begin)(BdrvChild *child); - void (*drained_end)(BdrvChild *child, int *drained_end_counter); + void (*drained_end)(BdrvChild *child); /* * Returns whether the parent has pending requests for the child. This @@ -982,13 +978,13 @@ struct BdrvChild { bool frozen; /* - * How many times the parent of this child has been drained + * True if the parent of this child has been drained by this BdrvChild * (through klass->drained_*). - * Usually, this is equal to bs->quiesce_counter (potentially - * reduced by bdrv_drain_all_count). It may differ while the + * + * It is generally true if bs->quiesce_counter > 0. It may differ while the * child is entering or leaving a drained section. */ - int parent_quiesce_counter; + bool quiesced_parent; QLIST_ENTRY(BdrvChild) next; QLIST_ENTRY(BdrvChild) next_parent; @@ -1186,7 +1182,6 @@ struct BlockDriverState { /* Accessed with atomic ops. */ int quiesce_counter; - int recursive_quiesce_counter; unsigned int write_gen; /* Current data generation */ diff --git a/include/block/block_int-global-state.h b/include/block/block_int-global-state.h index b49f4eb35b..2f0993f6e9 100644 --- a/include/block/block_int-global-state.h +++ b/include/block/block_int-global-state.h @@ -310,21 +310,4 @@ void bdrv_remove_aio_context_notifier(BlockDriverState *bs, */ void bdrv_drain_all_end_quiesce(BlockDriverState *bs); -/** - * Make sure that the function is running under both drain and BQL. - * The latter protects from concurrent writings - * from the GS API, while the former prevents concurrent reads - * from I/O. - */ -static inline void assert_bdrv_graph_writable(BlockDriverState *bs) -{ - /* - * TODO: this function is incomplete. Because the users of this - * assert lack the necessary drains, check only for BQL. - * Once the necessary drains are added, - * assert also for qatomic_read(&bs->quiesce_counter) > 0 - */ - assert(qemu_in_main_thread()); -} - #endif /* BLOCK_INT_GLOBAL_STATE_H */ diff --git a/include/block/block_int-io.h b/include/block/block_int-io.h index 4b0b3e17ef..8bc061ebb8 100644 --- a/include/block/block_int-io.h +++ b/include/block/block_int-io.h @@ -179,16 +179,4 @@ void bdrv_bsc_invalidate_range(BlockDriverState *bs, */ void bdrv_bsc_fill(BlockDriverState *bs, int64_t offset, int64_t bytes); - -/* - * "I/O or GS" API functions. These functions can run without - * the BQL, but only in one specific iothread/main loop. - * - * See include/block/block-io.h for more information about - * the "I/O or GS" API. - */ - -void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent); -void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent); - #endif /* BLOCK_INT_IO_H */ diff --git a/include/block/block_int.h b/include/block/block_int.h index 7d50b6bbd1..b35b0138ed 100644 --- a/include/block/block_int.h +++ b/include/block/block_int.h @@ -26,6 +26,7 @@ #include "block_int-global-state.h" #include "block_int-io.h" +#include "block/graph-lock.h" /* DO NOT ADD ANYTHING IN HERE. USE ONE OF THE HEADERS INCLUDED ABOVE */ diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h index 6528336c4c..c3700cec76 100644 --- a/include/block/dirty-bitmap.h +++ b/include/block/dirty-bitmap.h @@ -34,8 +34,14 @@ int bdrv_dirty_bitmap_check(const BdrvDirtyBitmap *bitmap, uint32_t flags, Error **errp); void bdrv_release_dirty_bitmap(BdrvDirtyBitmap *bitmap); void bdrv_release_named_dirty_bitmaps(BlockDriverState *bs); -int bdrv_remove_persistent_dirty_bitmap(BlockDriverState *bs, const char *name, - Error **errp); + +int coroutine_fn bdrv_co_remove_persistent_dirty_bitmap(BlockDriverState *bs, + const char *name, + Error **errp); +int co_wrapper bdrv_remove_persistent_dirty_bitmap(BlockDriverState *bs, + const char *name, + Error **errp); + void bdrv_disable_dirty_bitmap(BdrvDirtyBitmap *bitmap); void bdrv_enable_dirty_bitmap(BdrvDirtyBitmap *bitmap); void bdrv_enable_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap); diff --git a/include/block/graph-lock.h b/include/block/graph-lock.h new file mode 100644 index 0000000000..4c92cd8edf --- /dev/null +++ b/include/block/graph-lock.h @@ -0,0 +1,280 @@ +/* + * Graph lock: rwlock to protect block layer graph manipulations (add/remove + * edges and nodes) + * + * Copyright (c) 2022 Red Hat + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#ifndef GRAPH_LOCK_H +#define GRAPH_LOCK_H + +#include "qemu/osdep.h" +#include "qemu/clang-tsa.h" + +#include "qemu/coroutine.h" + +/** + * Graph Lock API + * This API provides a rwlock used to protect block layer + * graph modifications like edge (BdrvChild) and node (BlockDriverState) + * addition and removal. + * Currently we have 1 writer only, the Main loop, and many + * readers, mostly coroutines running in other AioContext thus other threads. + * + * We distinguish between writer (main loop, under BQL) that modifies the + * graph, and readers (all other coroutines running in various AioContext), + * that go through the graph edges, reading + * BlockDriverState ->parents and->children. + * + * The writer (main loop) has an "exclusive" access, so it first waits for + * current read to finish, and then prevents incoming ones from + * entering while it has the exclusive access. + * + * The readers (coroutines in multiple AioContext) are free to + * access the graph as long the writer is not modifying the graph. + * In case it is, they go in a CoQueue and sleep until the writer + * is done. + * + * If a coroutine changes AioContext, the counter in the original and new + * AioContext are left intact, since the writer does not care where is the + * reader, but only if there is one. + * As a result, some AioContexts might have a negative reader count, to + * balance the positive count of the AioContext that took the lock. + * This also means that when an AioContext is deleted it may have a nonzero + * reader count. In that case we transfer the count to a global shared counter + * so that the writer is always aware of all readers. + */ +typedef struct BdrvGraphRWlock BdrvGraphRWlock; + +/* Dummy lock object to use for Thread Safety Analysis (TSA) */ +typedef struct TSA_CAPABILITY("mutex") BdrvGraphLock { +} BdrvGraphLock; + +extern BdrvGraphLock graph_lock; + +/* + * clang doesn't check consistency in locking annotations between forward + * declarations and the function definition. Having the annotation on the + * definition, but not the declaration in a header file, may give the reader + * a false sense of security because the condition actually remains unchecked + * for callers in other source files. + * + * Therefore, as a convention, for public functions, GRAPH_RDLOCK and + * GRAPH_WRLOCK annotations should be present only in the header file. + */ +#define GRAPH_WRLOCK TSA_REQUIRES(graph_lock) +#define GRAPH_RDLOCK TSA_REQUIRES_SHARED(graph_lock) + +/* + * TSA annotations are not part of function types, so checks are defeated when + * using a function pointer. As a workaround, annotate function pointers with + * this macro that will require that the lock is at least taken while reading + * the pointer. In most cases this is equivalent to actually protecting the + * function call. + */ +#define GRAPH_RDLOCK_PTR TSA_GUARDED_BY(graph_lock) +#define GRAPH_WRLOCK_PTR TSA_GUARDED_BY(graph_lock) + +/* + * register_aiocontext: + * Add AioContext @ctx to the list of AioContext. + * This list is used to obtain the total number of readers + * currently running the graph. + */ +void register_aiocontext(AioContext *ctx); + +/* + * unregister_aiocontext: + * Removes AioContext @ctx to the list of AioContext. + */ +void unregister_aiocontext(AioContext *ctx); + +/* + * bdrv_graph_wrlock: + * Start an exclusive write operation to modify the graph. This means we are + * adding or removing an edge or a node in the block layer graph. Nobody else + * is allowed to access the graph. + * + * Must only be called from outside bdrv_graph_co_rdlock. + * + * The wrlock can only be taken from the main loop, with BQL held, as only the + * main loop is allowed to modify the graph. + * + * This function polls. Callers must not hold the lock of any AioContext other + * than the current one. + */ +void bdrv_graph_wrlock(void) TSA_ACQUIRE(graph_lock) TSA_NO_TSA; + +/* + * bdrv_graph_wrunlock: + * Write finished, reset global has_writer to 0 and restart + * all readers that are waiting. + */ +void bdrv_graph_wrunlock(void) TSA_RELEASE(graph_lock) TSA_NO_TSA; + +/* + * bdrv_graph_co_rdlock: + * Read the bs graph. This usually means traversing all nodes in + * the graph, therefore it can't happen while another thread is + * modifying it. + * Increases the reader counter of the current aiocontext, + * and if has_writer is set, it means that the writer is modifying + * the graph, therefore wait in a coroutine queue. + * The writer will then wake this coroutine once it is done. + * + * This lock should be taken from Iothreads (IO_CODE() class of functions) + * because it signals the writer that there are some + * readers currently running, or waits until the current + * write is finished before continuing. + * Calling this function from the Main Loop with BQL held + * is not necessary, since the Main Loop itself is the only + * writer, thus won't be able to read and write at the same time. + * The only exception to that is when we can't take the lock in the + * function/coroutine itself, and need to delegate the caller (usually main + * loop) to take it and wait that the coroutine ends, so that + * we always signal that a reader is running. + */ +void coroutine_fn TSA_ACQUIRE_SHARED(graph_lock) TSA_NO_TSA +bdrv_graph_co_rdlock(void); + +/* + * bdrv_graph_rdunlock: + * Read terminated, decrease the count of readers in the current aiocontext. + * If the writer is waiting for reads to finish (has_writer == 1), signal + * the writer that we are done via aio_wait_kick() to let it continue. + */ +void coroutine_fn TSA_RELEASE_SHARED(graph_lock) TSA_NO_TSA +bdrv_graph_co_rdunlock(void); + +/* + * bdrv_graph_rd{un}lock_main_loop: + * Just a placeholder to mark where the graph rdlock should be taken + * in the main loop. It is just asserting that we are not + * in a coroutine and in GLOBAL_STATE_CODE. + */ +void TSA_ACQUIRE_SHARED(graph_lock) TSA_NO_TSA +bdrv_graph_rdlock_main_loop(void); + +void TSA_RELEASE_SHARED(graph_lock) TSA_NO_TSA +bdrv_graph_rdunlock_main_loop(void); + +/* + * assert_bdrv_graph_readable: + * Make sure that the reader is either the main loop, + * or there is at least a reader helding the rdlock. + * In this way an incoming writer is aware of the read and waits. + */ +void GRAPH_RDLOCK assert_bdrv_graph_readable(void); + +/* + * assert_bdrv_graph_writable: + * Make sure that the writer is the main loop and has set @has_writer, + * so that incoming readers will pause. + */ +void GRAPH_WRLOCK assert_bdrv_graph_writable(void); + +/* + * Calling this function tells TSA that we know that the lock is effectively + * taken even though we cannot prove it (yet) with GRAPH_RDLOCK. This can be + * useful in intermediate stages of a conversion to using the GRAPH_RDLOCK + * macro. + */ +static inline void TSA_ASSERT_SHARED(graph_lock) TSA_NO_TSA +assume_graph_lock(void) +{ +} + +typedef struct GraphLockable { } GraphLockable; + +/* + * In C, compound literals have the lifetime of an automatic variable. + * In C++ it would be different, but then C++ wouldn't need QemuLockable + * either... + */ +#define GML_OBJ_() (&(GraphLockable) { }) + +/* + * This is not marked as TSA_ACQUIRE() because TSA doesn't understand the + * cleanup attribute and would therefore complain that the graph is never + * unlocked. TSA_ASSERT() makes sure that the following calls know that we + * hold the lock while unlocking is left unchecked. + */ +static inline GraphLockable * TSA_ASSERT(graph_lock) TSA_NO_TSA +graph_lockable_auto_lock(GraphLockable *x) +{ + bdrv_graph_co_rdlock(); + return x; +} + +static inline void TSA_NO_TSA +graph_lockable_auto_unlock(GraphLockable *x) +{ + bdrv_graph_co_rdunlock(); +} + +G_DEFINE_AUTOPTR_CLEANUP_FUNC(GraphLockable, graph_lockable_auto_unlock) + +#define WITH_GRAPH_RDLOCK_GUARD_(var) \ + for (g_autoptr(GraphLockable) var = graph_lockable_auto_lock(GML_OBJ_()); \ + var; \ + graph_lockable_auto_unlock(var), var = NULL) + +#define WITH_GRAPH_RDLOCK_GUARD() \ + WITH_GRAPH_RDLOCK_GUARD_(glue(graph_lockable_auto, __COUNTER__)) + +#define GRAPH_RDLOCK_GUARD(x) \ + g_autoptr(GraphLockable) \ + glue(graph_lockable_auto, __COUNTER__) G_GNUC_UNUSED = \ + graph_lockable_auto_lock(GML_OBJ_()) + + +typedef struct GraphLockableMainloop { } GraphLockableMainloop; + +/* + * In C, compound literals have the lifetime of an automatic variable. + * In C++ it would be different, but then C++ wouldn't need QemuLockable + * either... + */ +#define GMLML_OBJ_() (&(GraphLockableMainloop) { }) + +/* + * This is not marked as TSA_ACQUIRE() because TSA doesn't understand the + * cleanup attribute and would therefore complain that the graph is never + * unlocked. TSA_ASSERT() makes sure that the following calls know that we + * hold the lock while unlocking is left unchecked. + */ +static inline GraphLockableMainloop * TSA_ASSERT(graph_lock) TSA_NO_TSA +graph_lockable_auto_lock_mainloop(GraphLockableMainloop *x) +{ + bdrv_graph_rdlock_main_loop(); + return x; +} + +static inline void TSA_NO_TSA +graph_lockable_auto_unlock_mainloop(GraphLockableMainloop *x) +{ + bdrv_graph_rdunlock_main_loop(); +} + +G_DEFINE_AUTOPTR_CLEANUP_FUNC(GraphLockableMainloop, + graph_lockable_auto_unlock_mainloop) + +#define GRAPH_RDLOCK_GUARD_MAINLOOP(x) \ + g_autoptr(GraphLockableMainloop) \ + glue(graph_lockable_auto, __COUNTER__) G_GNUC_UNUSED = \ + graph_lockable_auto_lock_mainloop(GMLML_OBJ_()) + +#endif /* GRAPH_LOCK_H */ + diff --git a/include/hw/arm/smmuv3.h b/include/hw/arm/smmuv3.h index c641e60735..f1921fdf9e 100644 --- a/include/hw/arm/smmuv3.h +++ b/include/hw/arm/smmuv3.h @@ -77,7 +77,7 @@ struct SMMUv3Class { /*< public >*/ DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; }; #define TYPE_ARM_SMMUV3 "arm-smmuv3" diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h index 6ec479ca2b..c7dd59d7f1 100644 --- a/include/hw/arm/virt.h +++ b/include/hw/arm/virt.h @@ -125,6 +125,7 @@ struct VirtMachineClass { bool no_pmu; bool claim_edge_triggered_timers; bool smbios_old_sys_ver; + bool no_highmem_compact; bool no_highmem_ecam; bool no_ged; /* Machines < 4.2 have no support for ACPI GED device */ bool kvm_no_adjvtime; @@ -144,6 +145,7 @@ struct VirtMachineState { PFlashCFI01 *flash[2]; bool secure; bool highmem; + bool highmem_compact; bool highmem_ecam; bool highmem_mmio; bool highmem_redists; diff --git a/include/hw/input/ps2.h b/include/hw/input/ps2.h index ff777582cd..cd61a634c3 100644 --- a/include/hw/input/ps2.h +++ b/include/hw/input/ps2.h @@ -36,7 +36,7 @@ struct PS2DeviceClass { SysBusDeviceClass parent_class; - DeviceReset parent_reset; + ResettablePhases parent_phases; }; /* diff --git a/include/hw/misc/mos6522.h b/include/hw/misc/mos6522.h index 0bc22a8395..05872fffc9 100644 --- a/include/hw/misc/mos6522.h +++ b/include/hw/misc/mos6522.h @@ -157,7 +157,7 @@ OBJECT_DECLARE_TYPE(MOS6522State, MOS6522DeviceClass, MOS6522) struct MOS6522DeviceClass { DeviceClass parent_class; - DeviceReset parent_reset; + ResettablePhases parent_phases; void (*portB_write)(MOS6522State *dev); void (*portA_write)(MOS6522State *dev); /* These are used to influence the CUDA MacOS timebase calibration */ diff --git a/include/hw/misc/xlnx-zynqmp-apu-ctrl.h b/include/hw/misc/xlnx-zynqmp-apu-ctrl.h index b8ca9434af..c3bf3c1583 100644 --- a/include/hw/misc/xlnx-zynqmp-apu-ctrl.h +++ b/include/hw/misc/xlnx-zynqmp-apu-ctrl.h @@ -13,7 +13,7 @@ #include "hw/sysbus.h" #include "hw/register.h" -#include "target/arm/cpu.h" +#include "target/arm/cpu-qom.h" #define TYPE_XLNX_ZYNQMP_APU_CTRL "xlnx.apu-ctrl" OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPAPUCtrl, XLNX_ZYNQMP_APU_CTRL) diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h index 6ccaaf5154..06e2d5f889 100644 --- a/include/hw/pci/pci.h +++ b/include/hw/pci/pci.h @@ -921,11 +921,8 @@ PCI_DMA_DEFINE_LDST(q_be, q_be, 64); static inline void *pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t *plen, DMADirection dir) { - void *buf; - - buf = dma_memory_map(pci_get_address_space(dev), addr, plen, dir, - MEMTXATTRS_UNSPECIFIED); - return buf; + return dma_memory_map(pci_get_address_space(dev), addr, plen, dir, + MEMTXATTRS_UNSPECIFIED); } static inline void pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len, diff --git a/include/hw/pci/pcie_port.h b/include/hw/pci/pcie_port.h index 7b8193061a..d9b5d07504 100644 --- a/include/hw/pci/pcie_port.h +++ b/include/hw/pci/pcie_port.h @@ -80,7 +80,7 @@ DECLARE_CLASS_CHECKERS(PCIERootPortClass, PCIE_ROOT_PORT, struct PCIERootPortClass { PCIDeviceClass parent_class; DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; uint8_t (*aer_vector)(const PCIDevice *dev); int (*interrupts_init)(PCIDevice *dev, Error **errp); diff --git a/include/hw/ppc/xics.h b/include/hw/ppc/xics.h index 00b80b08c2..95ead0dd7c 100644 --- a/include/hw/ppc/xics.h +++ b/include/hw/ppc/xics.h @@ -95,7 +95,7 @@ struct ICSStateClass { DeviceClass parent_class; DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; void (*reject)(ICSState *s, uint32_t irq); void (*resend)(ICSState *s); diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h index 785dd5a56e..35fddb19a6 100644 --- a/include/hw/qdev-core.h +++ b/include/hw/qdev-core.h @@ -744,32 +744,6 @@ int qdev_walk_children(DeviceState *dev, void *opaque); /** - * @qdev_reset_all: - * Reset @dev. See @qbus_reset_all() for more details. - * - * Note: This function is deprecated and will be removed when it becomes unused. - * Please use device_cold_reset() now. - */ -void qdev_reset_all(DeviceState *dev); -void qdev_reset_all_fn(void *opaque); - -/** - * @qbus_reset_all: - * @bus: Bus to be reset. - * - * Reset @bus and perform a bus-level ("hard") reset of all devices connected - * to it, including recursive processing of all buses below @bus itself. A - * hard reset means that qbus_reset_all will reset all state of the device. - * For PCI devices, for example, this will include the base address registers - * or configuration space. - * - * Note: This function is deprecated and will be removed when it becomes unused. - * Please use bus_cold_reset() now. - */ -void qbus_reset_all(BusState *bus); -void qbus_reset_all_fn(void *opaque); - -/** * device_cold_reset: * Reset device @dev and perform a recursive processing using the resettable * interface. It triggers a RESET_TYPE_COLD. @@ -802,15 +776,6 @@ BusState *sysbus_get_default(void); char *qdev_get_fw_dev_path(DeviceState *dev); char *qdev_get_own_fw_dev_path_from_handler(BusState *bus, DeviceState *dev); -/** - * device_legacy_reset: - * - * Reset a single device (by calling the reset method). - * Note: This function is deprecated and will be removed when it becomes unused. - * Please use device_cold_reset() now. - */ -void device_legacy_reset(DeviceState *dev); - void device_class_set_props(DeviceClass *dc, Property *props); /** diff --git a/include/hw/s390x/s390-pci-bus.h b/include/hw/s390x/s390-pci-bus.h index 0605fcea24..e0a9f9385b 100644 --- a/include/hw/s390x/s390-pci-bus.h +++ b/include/hw/s390x/s390-pci-bus.h @@ -39,6 +39,9 @@ #define UID_CHECKING_ENABLED 0x01 #define ZPCI_DTSM 0x40 +/* zPCI Function Types */ +#define ZPCI_PFT_ISM 5 + OBJECT_DECLARE_SIMPLE_TYPE(S390pciState, S390_PCI_HOST_BRIDGE) OBJECT_DECLARE_SIMPLE_TYPE(S390PCIBus, S390_PCI_BUS) OBJECT_DECLARE_SIMPLE_TYPE(S390PCIBusDevice, S390_PCI_DEVICE) @@ -278,6 +281,7 @@ struct S390PCIIOMMU { uint64_t g_iota; uint64_t pba; uint64_t pal; + uint64_t max_dma_limit; GHashTable *iotlb; S390PCIDMACount *dma_limit; }; @@ -343,6 +347,7 @@ struct S390PCIBusDevice { uint16_t noi; uint16_t maxstbl; uint8_t sum; + uint8_t pft; S390PCIGroup *pci_group; ClpRspQueryPci zpci_fn; S390MsixInfo msix; @@ -351,6 +356,7 @@ struct S390PCIBusDevice { MemoryRegion msix_notify_mr; IndAddr *summary_ind; IndAddr *indicator; + Notifier shutdown_notifier; bool pci_unplug_request_processed; bool unplug_requested; bool interp; diff --git a/include/io/channel.h b/include/io/channel.h index c680ee7480..f1b7e05f81 100644 --- a/include/io/channel.h +++ b/include/io/channel.h @@ -350,7 +350,7 @@ int qio_channel_readv_all(QIOChannel *ioc, int qio_channel_writev_all(QIOChannel *ioc, const struct iovec *iov, size_t niov, - Error **erp); + Error **errp); /** * qio_channel_readv: diff --git a/include/monitor/monitor.h b/include/monitor/monitor.h index 737e750670..1e6f4c9bd7 100644 --- a/include/monitor/monitor.h +++ b/include/monitor/monitor.h @@ -46,8 +46,7 @@ int monitor_read_password(MonitorHMP *mon, ReadLineFunc *readline_func, void *opaque); AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id, - bool has_opaque, const char *opaque, - Error **errp); + const char *opaque, Error **errp); int monitor_fdset_dup_fd_add(int64_t fdset_id, int flags); void monitor_fdset_dup_fd_remove(int dup_fd); int64_t monitor_fdset_dup_fd_find(int dup_fd); diff --git a/include/qemu/clang-tsa.h b/include/qemu/clang-tsa.h new file mode 100644 index 0000000000..ba06fb8c92 --- /dev/null +++ b/include/qemu/clang-tsa.h @@ -0,0 +1,114 @@ +#ifndef CLANG_TSA_H +#define CLANG_TSA_H + +/* + * Copyright 2018 Jarkko Hietaniemi <jhi@iki.fi> + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without + * limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* http://clang.llvm.org/docs/ThreadSafetyAnalysis.html + * + * TSA is available since clang 3.6-ish. + */ +#ifdef __clang__ +# define TSA(x) __attribute__((x)) +#else +# define TSA(x) /* No TSA, make TSA attributes no-ops. */ +#endif + +/* TSA_CAPABILITY() is used to annotate typedefs: + * + * typedef pthread_mutex_t TSA_CAPABILITY("mutex") tsa_mutex; + */ +#define TSA_CAPABILITY(x) TSA(capability(x)) + +/* TSA_GUARDED_BY() is used to annotate global variables, + * the data is guarded: + * + * Foo foo TSA_GUARDED_BY(mutex); + */ +#define TSA_GUARDED_BY(x) TSA(guarded_by(x)) + +/* TSA_PT_GUARDED_BY() is used to annotate global pointers, the data + * behind the pointer is guarded. + * + * Foo* ptr TSA_PT_GUARDED_BY(mutex); + */ +#define TSA_PT_GUARDED_BY(x) TSA(pt_guarded_by(x)) + +/* The TSA_REQUIRES() is used to annotate functions: the caller of the + * function MUST hold the resource, the function will NOT release it. + * + * More than one mutex may be specified, comma-separated. + * + * void Foo(void) TSA_REQUIRES(mutex); + */ +#define TSA_REQUIRES(...) TSA(requires_capability(__VA_ARGS__)) +#define TSA_REQUIRES_SHARED(...) TSA(requires_shared_capability(__VA_ARGS__)) + +/* TSA_EXCLUDES() is used to annotate functions: the caller of the + * function MUST NOT hold resource, the function first acquires the + * resource, and then releases it. + * + * More than one mutex may be specified, comma-separated. + * + * void Foo(void) TSA_EXCLUDES(mutex); + */ +#define TSA_EXCLUDES(...) TSA(locks_excluded(__VA_ARGS__)) + +/* TSA_ACQUIRE() is used to annotate functions: the caller of the + * function MUST NOT hold the resource, the function will acquire the + * resource, but NOT release it. + * + * More than one mutex may be specified, comma-separated. + * + * void Foo(void) TSA_ACQUIRE(mutex); + */ +#define TSA_ACQUIRE(...) TSA(acquire_capability(__VA_ARGS__)) +#define TSA_ACQUIRE_SHARED(...) TSA(acquire_shared_capability(__VA_ARGS__)) + +/* TSA_RELEASE() is used to annotate functions: the caller of the + * function MUST hold the resource, but the function will then release it. + * + * More than one mutex may be specified, comma-separated. + * + * void Foo(void) TSA_RELEASE(mutex); + */ +#define TSA_RELEASE(...) TSA(release_capability(__VA_ARGS__)) +#define TSA_RELEASE_SHARED(...) TSA(release_shared_capability(__VA_ARGS__)) + +/* TSA_NO_TSA is used to annotate functions. Use only when you need to. + * + * void Foo(void) TSA_NO_TSA; + */ +#define TSA_NO_TSA TSA(no_thread_safety_analysis) + +/* + * TSA_ASSERT() is used to annotate functions: This function will assert that + * the lock is held. When it returns, the caller of the function is assumed to + * already hold the resource. + * + * More than one mutex may be specified, comma-separated. + */ +#define TSA_ASSERT(...) TSA(assert_capability(__VA_ARGS__)) +#define TSA_ASSERT_SHARED(...) TSA(assert_shared_capability(__VA_ARGS__)) + +#endif /* #ifndef CLANG_TSA_H */ diff --git a/include/qemu/config-file.h b/include/qemu/config-file.h index 321e7c7c03..b82a778123 100644 --- a/include/qemu/config-file.h +++ b/include/qemu/config-file.h @@ -22,7 +22,7 @@ int qemu_read_config_file(const char *filename, QEMUConfigCB *f, Error **errp); /* Parse QDict options as a replacement for a config file (allowing multiple enumerated (0..(n-1)) configuration "sections") */ -void qemu_config_parse_qdict(QDict *options, QemuOptsList **lists, +bool qemu_config_parse_qdict(QDict *options, QemuOptsList **lists, Error **errp); #endif /* QEMU_CONFIG_FILE_H */ diff --git a/include/sysemu/block-backend-io.h b/include/sysemu/block-backend-io.h index 50f5aa2e07..7ec6d978d4 100644 --- a/include/sysemu/block-backend-io.h +++ b/include/sysemu/block-backend-io.h @@ -92,6 +92,15 @@ int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in, int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags); +int coroutine_fn blk_co_block_status_above(BlockBackend *blk, + BlockDriverState *base, + int64_t offset, int64_t bytes, + int64_t *pnum, int64_t *map, + BlockDriverState **file); +int coroutine_fn blk_co_is_allocated_above(BlockBackend *blk, + BlockDriverState *base, + bool include_base, int64_t offset, + int64_t bytes, int64_t *pnum); /* * "I/O or GS" API functions. These functions can run without @@ -101,77 +110,77 @@ int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in, * the "I/O or GS" API. */ -int generated_co_wrapper blk_pread(BlockBackend *blk, int64_t offset, - int64_t bytes, void *buf, - BdrvRequestFlags flags); +int co_wrapper_mixed blk_pread(BlockBackend *blk, int64_t offset, + int64_t bytes, void *buf, + BdrvRequestFlags flags); int coroutine_fn blk_co_pread(BlockBackend *blk, int64_t offset, int64_t bytes, void *buf, BdrvRequestFlags flags); -int generated_co_wrapper blk_preadv(BlockBackend *blk, int64_t offset, - int64_t bytes, QEMUIOVector *qiov, - BdrvRequestFlags flags); +int co_wrapper_mixed blk_preadv(BlockBackend *blk, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags); int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags); -int generated_co_wrapper blk_preadv_part(BlockBackend *blk, int64_t offset, - int64_t bytes, QEMUIOVector *qiov, - size_t qiov_offset, - BdrvRequestFlags flags); +int co_wrapper_mixed blk_preadv_part(BlockBackend *blk, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + size_t qiov_offset, + BdrvRequestFlags flags); int coroutine_fn blk_co_preadv_part(BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags); -int generated_co_wrapper blk_pwrite(BlockBackend *blk, int64_t offset, - int64_t bytes, const void *buf, - BdrvRequestFlags flags); +int co_wrapper_mixed blk_pwrite(BlockBackend *blk, int64_t offset, + int64_t bytes, const void *buf, + BdrvRequestFlags flags); int coroutine_fn blk_co_pwrite(BlockBackend *blk, int64_t offset, int64_t bytes, const void *buf, BdrvRequestFlags flags); -int generated_co_wrapper blk_pwritev(BlockBackend *blk, int64_t offset, - int64_t bytes, QEMUIOVector *qiov, - BdrvRequestFlags flags); +int co_wrapper_mixed blk_pwritev(BlockBackend *blk, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags); int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags); -int generated_co_wrapper blk_pwritev_part(BlockBackend *blk, int64_t offset, - int64_t bytes, QEMUIOVector *qiov, - size_t qiov_offset, - BdrvRequestFlags flags); +int co_wrapper_mixed blk_pwritev_part(BlockBackend *blk, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + size_t qiov_offset, + BdrvRequestFlags flags); int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags); -int generated_co_wrapper blk_pwrite_compressed(BlockBackend *blk, - int64_t offset, int64_t bytes, - const void *buf); +int co_wrapper_mixed blk_pwrite_compressed(BlockBackend *blk, + int64_t offset, int64_t bytes, + const void *buf); int coroutine_fn blk_co_pwrite_compressed(BlockBackend *blk, int64_t offset, int64_t bytes, const void *buf); -int generated_co_wrapper blk_pwrite_zeroes(BlockBackend *blk, int64_t offset, - int64_t bytes, - BdrvRequestFlags flags); +int co_wrapper_mixed blk_pwrite_zeroes(BlockBackend *blk, int64_t offset, + int64_t bytes, + BdrvRequestFlags flags); int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset, int64_t bytes, BdrvRequestFlags flags); -int generated_co_wrapper blk_pdiscard(BlockBackend *blk, int64_t offset, - int64_t bytes); +int co_wrapper_mixed blk_pdiscard(BlockBackend *blk, int64_t offset, + int64_t bytes); int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes); -int generated_co_wrapper blk_flush(BlockBackend *blk); +int co_wrapper_mixed blk_flush(BlockBackend *blk); int coroutine_fn blk_co_flush(BlockBackend *blk); -int generated_co_wrapper blk_ioctl(BlockBackend *blk, unsigned long int req, - void *buf); +int co_wrapper_mixed blk_ioctl(BlockBackend *blk, unsigned long int req, + void *buf); int coroutine_fn blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf); -int generated_co_wrapper blk_truncate(BlockBackend *blk, int64_t offset, - bool exact, PreallocMode prealloc, - BdrvRequestFlags flags, Error **errp); +int co_wrapper_mixed blk_truncate(BlockBackend *blk, int64_t offset, + bool exact, PreallocMode prealloc, + BdrvRequestFlags flags, Error **errp); int coroutine_fn blk_co_truncate(BlockBackend *blk, int64_t offset, bool exact, PreallocMode prealloc, BdrvRequestFlags flags, Error **errp); diff --git a/iothread.c b/iothread.c index 529194a566..3862a64471 100644 --- a/iothread.c +++ b/iothread.c @@ -155,8 +155,8 @@ static void iothread_init_gcontext(IOThread *iothread) static void iothread_set_aio_context_params(EventLoopBase *base, Error **errp) { - IOThread *iothread = IOTHREAD(base); ERRP_GUARD(); + IOThread *iothread = IOTHREAD(base); if (!iothread->ctx) { return; @@ -156,8 +156,7 @@ static JobInfo *job_query_single_locked(Job *job, Error **errp) .status = job->status, .current_progress = progress_current, .total_progress = progress_total, - .has_error = !!job->err, - .error = job->err ? \ + .error = job->err ? g_strdup(error_get_pretty(job->err)) : NULL, }; diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 24b25759be..1f8c10f8ef 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -5471,7 +5471,7 @@ static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, for (i = 0; i < se->nb_fields; i++) { if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { assert(*field_types == TYPE_PTRVOID); - target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); + target_rt_dev_ptr = argptr + src_offsets[i]; host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); if (*target_rt_dev_ptr != 0) { *host_rt_dev_ptr = (unsigned long)lock_user_string( diff --git a/migration/block-dirty-bitmap.c b/migration/block-dirty-bitmap.c index 9aba7d9c22..283017d7d3 100644 --- a/migration/block-dirty-bitmap.c +++ b/migration/block-dirty-bitmap.c @@ -551,7 +551,7 @@ static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs, } bitmap_alias = bmap_inner->alias; - if (bmap_inner->has_transform) { + if (bmap_inner->transform) { bitmap_transform = bmap_inner->transform; } } else { @@ -821,7 +821,7 @@ static int dirty_bitmap_load_start(QEMUFile *f, DBMLoadState *s) } if (s->bmap_inner && - s->bmap_inner->has_transform && + s->bmap_inner->transform && s->bmap_inner->transform->has_persistent) { persistent = s->bmap_inner->transform->persistent; } else { diff --git a/migration/colo.c b/migration/colo.c index 2b71722fd6..232c8d44b1 100644 --- a/migration/colo.c +++ b/migration/colo.c @@ -250,7 +250,6 @@ ReplicationStatus *qmp_query_xen_replication_status(Error **errp) replication_get_error_all(&err); if (err) { s->error = true; - s->has_desc = true; s->desc = g_strdup(error_get_pretty(err)); } else { s->error = false; diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c index d6f1e01a70..4bfb97fc68 100644 --- a/migration/dirtyrate.c +++ b/migration/dirtyrate.c @@ -111,7 +111,6 @@ static void global_dirty_log_sync(unsigned int flag, bool one_shot) static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat) { CPUState *cpu; - DirtyPageRecord *records; int nvcpu = 0; CPU_FOREACH(cpu) { @@ -121,9 +120,7 @@ static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat) stat->nvcpu = nvcpu; stat->rates = g_new0(DirtyRateVcpu, nvcpu); - records = g_new0(DirtyPageRecord, nvcpu); - - return records; + return g_new0(DirtyPageRecord, nvcpu); } static void vcpu_dirty_stat_collect(VcpuStat *stat, @@ -473,7 +470,6 @@ find_block_matched(RAMBlock *block, int count, struct RamblockDirtyInfo *infos) { int i; - struct RamblockDirtyInfo *matched; for (i = 0; i < count; i++) { if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) { @@ -492,9 +488,7 @@ find_block_matched(RAMBlock *block, int count, return NULL; } - matched = &infos[i]; - - return matched; + return &infos[i]; } static bool compare_page_hash_info(struct RamblockDirtyInfo *info, diff --git a/migration/migration.c b/migration/migration.c index f485eea5fb..52b5d39244 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -918,11 +918,8 @@ MigrationParameters *qmp_query_migrate_parameters(Error **errp) params->cpu_throttle_increment = s->parameters.cpu_throttle_increment; params->has_cpu_throttle_tailslow = true; params->cpu_throttle_tailslow = s->parameters.cpu_throttle_tailslow; - params->has_tls_creds = true; params->tls_creds = g_strdup(s->parameters.tls_creds); - params->has_tls_hostname = true; params->tls_hostname = g_strdup(s->parameters.tls_hostname); - params->has_tls_authz = true; params->tls_authz = g_strdup(s->parameters.tls_authz ? s->parameters.tls_authz : ""); params->has_max_bandwidth = true; @@ -1047,15 +1044,14 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) { size_t page_size = qemu_target_page_size(); - info->has_ram = true; info->ram = g_malloc0(sizeof(*info->ram)); - info->ram->transferred = ram_counters.transferred; + info->ram->transferred = stat64_get(&ram_atomic_counters.transferred); info->ram->total = ram_bytes_total(); - info->ram->duplicate = ram_counters.duplicate; + info->ram->duplicate = stat64_get(&ram_atomic_counters.duplicate); /* legacy value. It is not used anymore */ info->ram->skipped = 0; - info->ram->normal = ram_counters.normal; - info->ram->normal_bytes = ram_counters.normal * page_size; + info->ram->normal = stat64_get(&ram_atomic_counters.normal); + info->ram->normal_bytes = info->ram->normal * page_size; info->ram->mbps = s->mbps; info->ram->dirty_sync_count = ram_counters.dirty_sync_count; info->ram->dirty_sync_missed_zero_copy = @@ -1066,10 +1062,9 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) info->ram->pages_per_second = s->pages_per_second; info->ram->precopy_bytes = ram_counters.precopy_bytes; info->ram->downtime_bytes = ram_counters.downtime_bytes; - info->ram->postcopy_bytes = ram_counters.postcopy_bytes; + info->ram->postcopy_bytes = stat64_get(&ram_atomic_counters.postcopy_bytes); if (migrate_use_xbzrle()) { - info->has_xbzrle_cache = true; info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache)); info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); info->xbzrle_cache->bytes = xbzrle_counters.bytes; @@ -1081,7 +1076,6 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) } if (migrate_use_compression()) { - info->has_compression = true; info->compression = g_malloc0(sizeof(*info->compression)); info->compression->pages = compression_counters.pages; info->compression->busy = compression_counters.busy; @@ -1106,7 +1100,6 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) static void populate_disk_info(MigrationInfo *info) { if (blk_mig_active()) { - info->has_disk = true; info->disk = g_malloc0(sizeof(*info->disk)); info->disk->transferred = blk_mig_bytes_transferred(); info->disk->remaining = blk_mig_bytes_remaining(); @@ -1171,7 +1164,6 @@ static void fill_source_migration_info(MigrationInfo *info) case MIGRATION_STATUS_FAILED: info->has_status = true; if (s->error) { - info->has_error_desc = true; info->error_desc = g_strdup(error_get_pretty(s->error)); } break; @@ -1575,7 +1567,7 @@ static bool migrate_params_check(MigrationParameters *params, Error **errp) #ifdef CONFIG_LINUX if (migrate_use_zero_copy_send() && ((params->has_multifd_compression && params->multifd_compression) || - (params->has_tls_creds && params->tls_creds && *params->tls_creds))) { + (params->tls_creds && *params->tls_creds))) { error_setg(errp, "Zero copy only available for non-compressed non-TLS multifd migration"); return false; @@ -1624,12 +1616,12 @@ static void migrate_params_test_apply(MigrateSetParameters *params, dest->cpu_throttle_tailslow = params->cpu_throttle_tailslow; } - if (params->has_tls_creds) { + if (params->tls_creds) { assert(params->tls_creds->type == QTYPE_QSTRING); dest->tls_creds = params->tls_creds->u.s; } - if (params->has_tls_hostname) { + if (params->tls_hostname) { assert(params->tls_hostname->type == QTYPE_QSTRING); dest->tls_hostname = params->tls_hostname->u.s; } @@ -1721,19 +1713,19 @@ static void migrate_params_apply(MigrateSetParameters *params, Error **errp) s->parameters.cpu_throttle_tailslow = params->cpu_throttle_tailslow; } - if (params->has_tls_creds) { + if (params->tls_creds) { g_free(s->parameters.tls_creds); assert(params->tls_creds->type == QTYPE_QSTRING); s->parameters.tls_creds = g_strdup(params->tls_creds->u.s); } - if (params->has_tls_hostname) { + if (params->tls_hostname) { g_free(s->parameters.tls_hostname); assert(params->tls_hostname->type == QTYPE_QSTRING); s->parameters.tls_hostname = g_strdup(params->tls_hostname->u.s); } - if (params->has_tls_authz) { + if (params->tls_authz) { g_free(s->parameters.tls_authz); assert(params->tls_authz->type == QTYPE_QSTRING); s->parameters.tls_authz = g_strdup(params->tls_authz->u.s); @@ -1810,14 +1802,14 @@ void qmp_migrate_set_parameters(MigrateSetParameters *params, Error **errp) MigrationParameters tmp; /* TODO Rewrite "" to null instead */ - if (params->has_tls_creds + if (params->tls_creds && params->tls_creds->type == QTYPE_QNULL) { qobject_unref(params->tls_creds->u.n); params->tls_creds->type = QTYPE_QSTRING; params->tls_creds->u.s = strdup(""); } /* TODO Rewrite "" to null instead */ - if (params->has_tls_hostname + if (params->tls_hostname && params->tls_hostname->type == QTYPE_QNULL) { qobject_unref(params->tls_hostname->u.n); params->tls_hostname->type = QTYPE_QSTRING; @@ -2848,8 +2840,11 @@ static int migrate_handle_rp_resume_ack(MigrationState *s, uint32_t value) return 0; } -/* Release ms->rp_state.from_dst_file in a safe way */ -static void migration_release_from_dst_file(MigrationState *ms) +/* + * Release ms->rp_state.from_dst_file (and postcopy_qemufile_src if + * existed) in a safe way. + */ +static void migration_release_dst_files(MigrationState *ms) { QEMUFile *file; @@ -2862,6 +2857,18 @@ static void migration_release_from_dst_file(MigrationState *ms) ms->rp_state.from_dst_file = NULL; } + /* + * Do the same to postcopy fast path socket too if there is. No + * locking needed because this qemufile should only be managed by + * return path thread. + */ + if (ms->postcopy_qemufile_src) { + migration_ioc_unregister_yank_from_file(ms->postcopy_qemufile_src); + qemu_file_shutdown(ms->postcopy_qemufile_src); + qemu_fclose(ms->postcopy_qemufile_src); + ms->postcopy_qemufile_src = NULL; + } + qemu_fclose(file); } @@ -3006,7 +3013,7 @@ out: * Maybe there is something we can do: it looks like a * network down issue, and we pause for a recovery. */ - migration_release_from_dst_file(ms); + migration_release_dst_files(ms); rp = NULL; if (postcopy_pause_return_path_thread(ms)) { /* @@ -3024,7 +3031,7 @@ out: } trace_source_return_path_thread_end(); - migration_release_from_dst_file(ms); + migration_release_dst_files(ms); rcu_unregister_thread(); return NULL; } @@ -3547,18 +3554,6 @@ static MigThrError postcopy_pause(MigrationState *s) qemu_file_shutdown(file); qemu_fclose(file); - /* - * Do the same to postcopy fast path socket too if there is. No - * locking needed because no racer as long as we do this before setting - * status to paused. - */ - if (s->postcopy_qemufile_src) { - migration_ioc_unregister_yank_from_file(s->postcopy_qemufile_src); - qemu_file_shutdown(s->postcopy_qemufile_src); - qemu_fclose(s->postcopy_qemufile_src); - s->postcopy_qemufile_src = NULL; - } - migrate_set_state(&s->state, s->state, MIGRATION_STATUS_POSTCOPY_PAUSED); @@ -4399,8 +4394,6 @@ static Property migration_properties[] = { DEFINE_PROP_SIZE("announce-step", MigrationState, parameters.announce_step, DEFAULT_MIGRATE_ANNOUNCE_STEP), - DEFINE_PROP_BOOL("x-postcopy-preempt-break-huge", MigrationState, - postcopy_preempt_break_huge, true), DEFINE_PROP_STRING("tls-creds", MigrationState, parameters.tls_creds), DEFINE_PROP_STRING("tls-hostname", MigrationState, parameters.tls_hostname), DEFINE_PROP_STRING("tls-authz", MigrationState, parameters.tls_authz), @@ -4492,9 +4485,6 @@ static void migration_instance_init(Object *obj) params->has_announce_max = true; params->has_announce_rounds = true; params->has_announce_step = true; - params->has_tls_creds = true; - params->has_tls_hostname = true; - params->has_tls_authz = true; qemu_sem_init(&ms->postcopy_pause_sem, 0); qemu_sem_init(&ms->postcopy_pause_rp_sem, 0); diff --git a/migration/migration.h b/migration/migration.h index cdad8aceaa..ae4ffd3454 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -340,13 +340,6 @@ struct MigrationState { bool send_configuration; /* Whether we send section footer during migration */ bool send_section_footer; - /* - * Whether we allow break sending huge pages when postcopy preempt is - * enabled. When disabled, we won't interrupt precopy within sending a - * host huge page, which is the old behavior of vanilla postcopy. - * NOTE: this parameter is ignored if postcopy preempt is not enabled. - */ - bool postcopy_preempt_break_huge; /* Needed by postcopy-pause state */ QemuSemaphore postcopy_pause_sem; diff --git a/migration/multifd-zlib.c b/migration/multifd-zlib.c index 18213a9513..37770248e1 100644 --- a/migration/multifd-zlib.c +++ b/migration/multifd-zlib.c @@ -116,7 +116,6 @@ static void zlib_send_cleanup(MultiFDSendParams *p, Error **errp) static int zlib_send_prepare(MultiFDSendParams *p, Error **errp) { struct zlib_data *z = p->data; - size_t page_size = qemu_target_page_size(); z_stream *zs = &z->zs; uint32_t out_size = 0; int ret; @@ -135,8 +134,8 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error **errp) * with compression. zlib does not guarantee that this is safe, * therefore copy the page before calling deflate(). */ - memcpy(z->buf, p->pages->block->host + p->normal[i], page_size); - zs->avail_in = page_size; + memcpy(z->buf, p->pages->block->host + p->normal[i], p->page_size); + zs->avail_in = p->page_size; zs->next_in = z->buf; zs->avail_out = available; @@ -242,12 +241,11 @@ static void zlib_recv_cleanup(MultiFDRecvParams *p) static int zlib_recv_pages(MultiFDRecvParams *p, Error **errp) { struct zlib_data *z = p->data; - size_t page_size = qemu_target_page_size(); z_stream *zs = &z->zs; uint32_t in_size = p->next_packet_size; /* we measure the change of total_out */ uint32_t out_size = zs->total_out; - uint32_t expected_size = p->normal_num * page_size; + uint32_t expected_size = p->normal_num * p->page_size; uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; int ret; int i; @@ -274,7 +272,7 @@ static int zlib_recv_pages(MultiFDRecvParams *p, Error **errp) flush = Z_SYNC_FLUSH; } - zs->avail_out = page_size; + zs->avail_out = p->page_size; zs->next_out = p->host + p->normal[i]; /* @@ -288,8 +286,8 @@ static int zlib_recv_pages(MultiFDRecvParams *p, Error **errp) do { ret = inflate(zs, flush); } while (ret == Z_OK && zs->avail_in - && (zs->total_out - start) < page_size); - if (ret == Z_OK && (zs->total_out - start) < page_size) { + && (zs->total_out - start) < p->page_size); + if (ret == Z_OK && (zs->total_out - start) < p->page_size) { error_setg(errp, "multifd %u: inflate generated too few output", p->id); return -1; diff --git a/migration/multifd-zstd.c b/migration/multifd-zstd.c index d788d309f2..f4a8e1ed1f 100644 --- a/migration/multifd-zstd.c +++ b/migration/multifd-zstd.c @@ -113,7 +113,6 @@ static void zstd_send_cleanup(MultiFDSendParams *p, Error **errp) static int zstd_send_prepare(MultiFDSendParams *p, Error **errp) { struct zstd_data *z = p->data; - size_t page_size = qemu_target_page_size(); int ret; uint32_t i; @@ -128,7 +127,7 @@ static int zstd_send_prepare(MultiFDSendParams *p, Error **errp) flush = ZSTD_e_flush; } z->in.src = p->pages->block->host + p->normal[i]; - z->in.size = page_size; + z->in.size = p->page_size; z->in.pos = 0; /* @@ -241,8 +240,7 @@ static int zstd_recv_pages(MultiFDRecvParams *p, Error **errp) { uint32_t in_size = p->next_packet_size; uint32_t out_size = 0; - size_t page_size = qemu_target_page_size(); - uint32_t expected_size = p->normal_num * page_size; + uint32_t expected_size = p->normal_num * p->page_size; uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; struct zstd_data *z = p->data; int ret; @@ -265,7 +263,7 @@ static int zstd_recv_pages(MultiFDRecvParams *p, Error **errp) for (i = 0; i < p->normal_num; i++) { z->out.dst = p->host + p->normal[i]; - z->out.size = page_size; + z->out.size = p->page_size; z->out.pos = 0; /* @@ -279,8 +277,8 @@ static int zstd_recv_pages(MultiFDRecvParams *p, Error **errp) do { ret = ZSTD_decompressStream(z->zds, &z->out, &z->in); } while (ret > 0 && (z->in.size - z->in.pos > 0) - && (z->out.pos < page_size)); - if (ret > 0 && (z->out.pos < page_size)) { + && (z->out.pos < p->page_size)); + if (ret > 0 && (z->out.pos < p->page_size)) { error_setg(errp, "multifd %u: decompressStream buffer too small", p->id); return -1; diff --git a/migration/multifd.c b/migration/multifd.c index 509bbbe3bf..000ca4d4ec 100644 --- a/migration/multifd.c +++ b/migration/multifd.c @@ -87,15 +87,14 @@ static void nocomp_send_cleanup(MultiFDSendParams *p, Error **errp) static int nocomp_send_prepare(MultiFDSendParams *p, Error **errp) { MultiFDPages_t *pages = p->pages; - size_t page_size = qemu_target_page_size(); for (int i = 0; i < p->normal_num; i++) { p->iov[p->iovs_num].iov_base = pages->block->host + p->normal[i]; - p->iov[p->iovs_num].iov_len = page_size; + p->iov[p->iovs_num].iov_len = p->page_size; p->iovs_num++; } - p->next_packet_size = p->normal_num * page_size; + p->next_packet_size = p->normal_num * p->page_size; p->flags |= MULTIFD_FLAG_NOCOMP; return 0; } @@ -139,7 +138,6 @@ static void nocomp_recv_cleanup(MultiFDRecvParams *p) static int nocomp_recv_pages(MultiFDRecvParams *p, Error **errp) { uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; - size_t page_size = qemu_target_page_size(); if (flags != MULTIFD_FLAG_NOCOMP) { error_setg(errp, "multifd %u: flags received %x flags expected %x", @@ -148,7 +146,7 @@ static int nocomp_recv_pages(MultiFDRecvParams *p, Error **errp) } for (int i = 0; i < p->normal_num; i++) { p->iov[i].iov_base = p->host + p->normal[i]; - p->iov[i].iov_len = page_size; + p->iov[i].iov_len = p->page_size; } return qio_channel_readv_all(p->c, p->iov, p->normal_num, errp); } @@ -281,8 +279,6 @@ static void multifd_send_fill_packet(MultiFDSendParams *p) static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) { MultiFDPacket_t *packet = p->packet; - size_t page_size = qemu_target_page_size(); - uint32_t page_count = MULTIFD_PACKET_SIZE / page_size; RAMBlock *block; int i; @@ -309,10 +305,10 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) * If we received a packet that is 100 times bigger than expected * just stop migration. It is a magic number. */ - if (packet->pages_alloc > page_count) { + if (packet->pages_alloc > p->page_count) { error_setg(errp, "multifd: received packet " "with size %u and expected a size of %u", - packet->pages_alloc, page_count) ; + packet->pages_alloc, p->page_count) ; return -1; } @@ -344,7 +340,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) for (i = 0; i < p->normal_num; i++) { uint64_t offset = be64_to_cpu(packet->offset[i]); - if (offset > (block->used_length - page_size)) { + if (offset > (block->used_length - p->page_size)) { error_setg(errp, "multifd: offset too long %" PRIu64 " (max " RAM_ADDR_FMT ")", offset, block->used_length); @@ -433,11 +429,10 @@ static int multifd_send_pages(QEMUFile *f) p->packet_num = multifd_send_state->packet_num++; multifd_send_state->pages = p->pages; p->pages = pages; - transferred = ((uint64_t) pages->num) * qemu_target_page_size() - + p->packet_len; + transferred = ((uint64_t) pages->num) * p->page_size + p->packet_len; qemu_file_acct_rate_limit(f, transferred); ram_counters.multifd_bytes += transferred; - ram_counters.transferred += transferred; + stat64_add(&ram_atomic_counters.transferred, transferred); qemu_mutex_unlock(&p->mutex); qemu_sem_post(&p->sem); @@ -629,7 +624,7 @@ int multifd_send_sync_main(QEMUFile *f) p->pending_job++; qemu_file_acct_rate_limit(f, p->packet_len); ram_counters.multifd_bytes += p->packet_len; - ram_counters.transferred += p->packet_len; + stat64_add(&ram_atomic_counters.transferred, p->packet_len); qemu_mutex_unlock(&p->mutex); qemu_sem_post(&p->sem); @@ -947,6 +942,8 @@ int multifd_save_setup(Error **errp) /* We need one extra place for the packet header */ p->iov = g_new0(struct iovec, page_count + 1); p->normal = g_new0(ram_addr_t, page_count); + p->page_size = qemu_target_page_size(); + p->page_count = page_count; if (migrate_use_zero_copy_send()) { p->write_flags = QIO_CHANNEL_WRITE_FLAG_ZERO_COPY; @@ -1194,6 +1191,8 @@ int multifd_load_setup(Error **errp) p->name = g_strdup_printf("multifdrecv_%d", i); p->iov = g_new0(struct iovec, page_count); p->normal = g_new0(ram_addr_t, page_count); + p->page_count = page_count; + p->page_size = qemu_target_page_size(); } for (i = 0; i < thread_count; i++) { diff --git a/migration/multifd.h b/migration/multifd.h index 519f498643..e2802a9ce2 100644 --- a/migration/multifd.h +++ b/migration/multifd.h @@ -80,6 +80,10 @@ typedef struct { bool registered_yank; /* packet allocated len */ uint32_t packet_len; + /* guest page size */ + uint32_t page_size; + /* number of pages in a full packet */ + uint32_t page_count; /* multifd flags for sending ram */ int write_flags; @@ -143,6 +147,10 @@ typedef struct { QIOChannel *c; /* packet allocated len */ uint32_t packet_len; + /* guest page size */ + uint32_t page_size; + /* number of pages in a full packet */ + uint32_t page_count; /* syncs main thread and channels */ QemuSemaphore sem_sync; diff --git a/migration/ram.c b/migration/ram.c index 1338e47665..334309f1c6 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -85,6 +85,26 @@ XBZRLECacheStats xbzrle_counters; +/* used by the search for pages to send */ +struct PageSearchStatus { + /* The migration channel used for a specific host page */ + QEMUFile *pss_channel; + /* Last block from where we have sent data */ + RAMBlock *last_sent_block; + /* Current block being searched */ + RAMBlock *block; + /* Current page to search from */ + unsigned long page; + /* Set once we wrap around */ + bool complete_round; + /* Whether we're sending a host page */ + bool host_page_sending; + /* The start/end of current host page. Invalid if host_page_sending==false */ + unsigned long host_page_start; + unsigned long host_page_end; +}; +typedef struct PageSearchStatus PageSearchStatus; + /* struct contains XBZRLE cache and a static page used by the compression */ static struct { @@ -162,6 +182,11 @@ out: return ret; } +static bool postcopy_preempt_active(void) +{ + return migrate_postcopy_preempt() && migration_in_postcopy(); +} + bool ramblock_is_ignored(RAMBlock *block) { return !qemu_ram_is_migratable(block) || @@ -296,30 +321,17 @@ struct RAMSrcPageRequest { QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req; }; -typedef struct { - /* - * Cached ramblock/offset values if preempted. They're only meaningful if - * preempted==true below. - */ - RAMBlock *ram_block; - unsigned long ram_page; - /* - * Whether a postcopy preemption just happened. Will be reset after - * precopy recovered to background migration. - */ - bool preempted; -} PostcopyPreemptState; - /* State of RAM for migration */ struct RAMState { - /* QEMUFile used for this migration */ - QEMUFile *f; + /* + * PageSearchStatus structures for the channels when send pages. + * Protected by the bitmap_mutex. + */ + PageSearchStatus pss[RAM_CHANNEL_MAX]; /* UFFD file descriptor, used in 'write-tracking' migration */ int uffdio_fd; /* Last block that we have visited searching for dirty pages */ RAMBlock *last_seen_block; - /* Last block from where we have sent data */ - RAMBlock *last_sent_block; /* Last dirty target page we have sent */ ram_addr_t last_page; /* last ram version we have seen */ @@ -357,21 +369,18 @@ struct RAMState { uint64_t target_page_count; /* number of dirty bits in the bitmap */ uint64_t migration_dirty_pages; - /* Protects modification of the bitmap and migration dirty pages */ + /* + * Protects: + * - dirty/clear bitmap + * - migration_dirty_pages + * - pss structures + */ QemuMutex bitmap_mutex; /* The RAMBlock used in the last src_page_requests */ RAMBlock *last_req_rb; /* Queue of outstanding page requests from the destination */ QemuMutex src_page_req_mutex; QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests; - - /* Postcopy preemption informations */ - PostcopyPreemptState postcopy_preempt_state; - /* - * Current channel we're using on src VM. Only valid if postcopy-preempt - * is enabled. - */ - unsigned int postcopy_channel; }; typedef struct RAMState RAMState; @@ -379,11 +388,6 @@ static RAMState *ram_state; static NotifierWithReturnList precopy_notifier_list; -static void postcopy_preempt_reset(RAMState *rs) -{ - memset(&rs->postcopy_preempt_state, 0, sizeof(PostcopyPreemptState)); -} - /* Whether postcopy has queued requests? */ static bool postcopy_has_request(RAMState *rs) { @@ -420,18 +424,25 @@ uint64_t ram_bytes_remaining(void) 0; } +/* + * NOTE: not all stats in ram_counters are used in reality. See comments + * for struct MigrationAtomicStats. The ultimate result of ram migration + * counters will be a merged version with both ram_counters and the atomic + * fields in ram_atomic_counters. + */ MigrationStats ram_counters; +MigrationAtomicStats ram_atomic_counters; -static void ram_transferred_add(uint64_t bytes) +void ram_transferred_add(uint64_t bytes) { if (runstate_is_running()) { ram_counters.precopy_bytes += bytes; } else if (migration_in_postcopy()) { - ram_counters.postcopy_bytes += bytes; + stat64_add(&ram_atomic_counters.postcopy_bytes, bytes); } else { ram_counters.downtime_bytes += bytes; } - ram_counters.transferred += bytes; + stat64_add(&ram_atomic_counters.transferred, bytes); } void dirty_sync_missed_zero_copy(void) @@ -439,39 +450,6 @@ void dirty_sync_missed_zero_copy(void) ram_counters.dirty_sync_missed_zero_copy++; } -/* used by the search for pages to send */ -struct PageSearchStatus { - /* Current block being searched */ - RAMBlock *block; - /* Current page to search from */ - unsigned long page; - /* Set once we wrap around */ - bool complete_round; - /* - * [POSTCOPY-ONLY] Whether current page is explicitly requested by - * postcopy. When set, the request is "urgent" because the dest QEMU - * threads are waiting for us. - */ - bool postcopy_requested; - /* - * [POSTCOPY-ONLY] The target channel to use to send current page. - * - * Note: This may _not_ match with the value in postcopy_requested - * above. Let's imagine the case where the postcopy request is exactly - * the page that we're sending in progress during precopy. In this case - * we'll have postcopy_requested set to true but the target channel - * will be the precopy channel (so that we don't split brain on that - * specific page since the precopy channel already contains partial of - * that page data). - * - * Besides that specific use case, postcopy_target_channel should - * always be equal to postcopy_requested, because by default we send - * postcopy pages via postcopy preempt channel. - */ - bool postcopy_target_channel; -}; -typedef struct PageSearchStatus PageSearchStatus; - CompressionStats compression_counters; struct CompressParam { @@ -517,11 +495,28 @@ static QemuThread *decompress_threads; static QemuMutex decomp_done_lock; static QemuCond decomp_done_cond; +static int ram_save_host_page_urgent(PageSearchStatus *pss); + static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block, ram_addr_t offset, uint8_t *source_buf); -static void postcopy_preempt_restore(RAMState *rs, PageSearchStatus *pss, - bool postcopy_requested); +/* NOTE: page is the PFN not real ram_addr_t. */ +static void pss_init(PageSearchStatus *pss, RAMBlock *rb, ram_addr_t page) +{ + pss->block = rb; + pss->page = page; + pss->complete_round = false; +} + +/* + * Check whether two PSSs are actively sending the same page. Return true + * if it is, false otherwise. + */ +static bool pss_overlap(PageSearchStatus *pss1, PageSearchStatus *pss2) +{ + return pss1->host_page_sending && pss2->host_page_sending && + (pss1->host_page_start == pss2->host_page_start); +} static void *do_data_compress(void *opaque) { @@ -647,28 +642,30 @@ exit: * * Returns the number of bytes written * - * @f: QEMUFile where to send the data + * @pss: current PSS channel status * @block: block that contains the page we want to send * @offset: offset inside the block for the page * in the lower bits, it contains flags */ -static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block, +static size_t save_page_header(PageSearchStatus *pss, RAMBlock *block, ram_addr_t offset) { size_t size, len; + bool same_block = (block == pss->last_sent_block); + QEMUFile *f = pss->pss_channel; - if (block == rs->last_sent_block) { + if (same_block) { offset |= RAM_SAVE_FLAG_CONTINUE; } qemu_put_be64(f, offset); size = 8; - if (!(offset & RAM_SAVE_FLAG_CONTINUE)) { + if (!same_block) { len = strlen(block->idstr); qemu_put_byte(f, len); qemu_put_buffer(f, (uint8_t *)block->idstr, len); size += 1 + len; - rs->last_sent_block = block; + pss->last_sent_block = block; } return size; } @@ -719,7 +716,7 @@ void mig_throttle_counter_reset(void) rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); rs->num_dirty_pages_period = 0; - rs->bytes_xfer_prev = ram_counters.transferred; + rs->bytes_xfer_prev = stat64_get(&ram_atomic_counters.transferred); } /** @@ -736,10 +733,6 @@ void mig_throttle_counter_reset(void) */ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr) { - if (!rs->xbzrle_enabled) { - return; - } - /* We don't care if this fails to allocate a new cache page * as long as it updated an old one */ cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page, @@ -756,17 +749,19 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr) * -1 means that xbzrle would be longer than normal * * @rs: current RAM state + * @pss: current PSS channel * @current_data: pointer to the address of the page contents * @current_addr: addr of the page * @block: block that contains the page we want to send * @offset: offset inside the block for the page */ -static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, - ram_addr_t current_addr, RAMBlock *block, - ram_addr_t offset) +static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss, + uint8_t **current_data, ram_addr_t current_addr, + RAMBlock *block, ram_addr_t offset) { int encoded_len = 0, bytes_xbzrle; uint8_t *prev_cached_page; + QEMUFile *file = pss->pss_channel; if (!cache_is_cached(XBZRLE.cache, current_addr, ram_counters.dirty_sync_count)) { @@ -831,11 +826,11 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, } /* Send XBZRLE based compressed page */ - bytes_xbzrle = save_page_header(rs, rs->f, block, + bytes_xbzrle = save_page_header(pss, block, offset | RAM_SAVE_FLAG_XBZRLE); - qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE); - qemu_put_be16(rs->f, encoded_len); - qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len); + qemu_put_byte(file, ENCODING_FLAG_XBZRLE); + qemu_put_be16(file, encoded_len); + qemu_put_buffer(file, XBZRLE.encoded_buf, encoded_len); bytes_xbzrle += encoded_len + 1 + 2; /* * Like compressed_size (please see update_compress_thread_counts), @@ -849,26 +844,38 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, } /** - * migration_bitmap_find_dirty: find the next dirty page from start + * pss_find_next_dirty: find the next dirty page of current ramblock * - * Returns the page offset within memory region of the start of a dirty page + * This function updates pss->page to point to the next dirty page index + * within the ramblock to migrate, or the end of ramblock when nothing + * found. Note that when pss->host_page_sending==true it means we're + * during sending a host page, so we won't look for dirty page that is + * outside the host page boundary. * - * @rs: current RAM state - * @rb: RAMBlock where to search for dirty pages - * @start: page where we start the search + * @pss: the current page search status */ -static inline -unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, - unsigned long start) +static void pss_find_next_dirty(PageSearchStatus *pss) { + RAMBlock *rb = pss->block; unsigned long size = rb->used_length >> TARGET_PAGE_BITS; unsigned long *bitmap = rb->bmap; if (ramblock_is_ignored(rb)) { - return size; + /* Points directly to the end, so we know no dirty page */ + pss->page = size; + return; + } + + /* + * If during sending a host page, only look for dirty pages within the + * current host page being send. + */ + if (pss->host_page_sending) { + assert(pss->host_page_end); + size = MIN(size, pss->host_page_end); } - return find_next_bit(bitmap, size, start); + pss->page = find_next_bit(bitmap, size, pss->page); } static void migration_clear_memory_region_dirty_bitmap(RAMBlock *rb, @@ -1083,8 +1090,9 @@ uint64_t ram_pagesize_summary(void) uint64_t ram_get_total_transferred_pages(void) { - return ram_counters.normal + ram_counters.duplicate + - compression_counters.pages + xbzrle_counters.pages; + return stat64_get(&ram_atomic_counters.normal) + + stat64_get(&ram_atomic_counters.duplicate) + + compression_counters.pages + xbzrle_counters.pages; } static void migration_update_rates(RAMState *rs, int64_t end_time) @@ -1143,8 +1151,8 @@ static void migration_trigger_throttle(RAMState *rs) { MigrationState *s = migrate_get_current(); uint64_t threshold = s->parameters.throttle_trigger_threshold; - - uint64_t bytes_xfer_period = ram_counters.transferred - rs->bytes_xfer_prev; + uint64_t bytes_xfer_period = + stat64_get(&ram_atomic_counters.transferred) - rs->bytes_xfer_prev; uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE; uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100; @@ -1207,7 +1215,7 @@ static void migration_bitmap_sync(RAMState *rs) /* reset period counters */ rs->time_last_bitmap_sync = end_time; rs->num_dirty_pages_period = 0; - rs->bytes_xfer_prev = ram_counters.transferred; + rs->bytes_xfer_prev = stat64_get(&ram_atomic_counters.transferred); } if (migrate_use_events()) { qapi_event_send_migration_pass(ram_counters.dirty_sync_count); @@ -1234,7 +1242,7 @@ static void migration_bitmap_sync_precopy(RAMState *rs) } } -static void ram_release_page(const char *rbname, uint64_t offset) +void ram_release_page(const char *rbname, uint64_t offset) { if (!migrate_release_ram() || !migration_in_postcopy()) { return; @@ -1249,19 +1257,19 @@ static void ram_release_page(const char *rbname, uint64_t offset) * Returns the size of data written to the file, 0 means the page is not * a zero page * - * @rs: current RAM state - * @file: the file where the data is saved + * @pss: current PSS channel * @block: block that contains the page we want to send * @offset: offset inside the block for the page */ -static int save_zero_page_to_file(RAMState *rs, QEMUFile *file, +static int save_zero_page_to_file(PageSearchStatus *pss, RAMBlock *block, ram_addr_t offset) { uint8_t *p = block->host + offset; + QEMUFile *file = pss->pss_channel; int len = 0; if (buffer_is_zero(p, TARGET_PAGE_SIZE)) { - len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO); + len += save_page_header(pss, block, offset | RAM_SAVE_FLAG_ZERO); qemu_put_byte(file, 0); len += 1; ram_release_page(block->idstr, offset); @@ -1274,16 +1282,17 @@ static int save_zero_page_to_file(RAMState *rs, QEMUFile *file, * * Returns the number of pages written. * - * @rs: current RAM state + * @pss: current PSS channel * @block: block that contains the page we want to send * @offset: offset inside the block for the page */ -static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset) +static int save_zero_page(PageSearchStatus *pss, RAMBlock *block, + ram_addr_t offset) { - int len = save_zero_page_to_file(rs, rs->f, block, offset); + int len = save_zero_page_to_file(pss, block, offset); if (len) { - ram_counters.duplicate++; + stat64_add(&ram_atomic_counters.duplicate, 1); ram_transferred_add(len); return 1; } @@ -1297,15 +1306,15 @@ static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset) * * Return true if the pages has been saved, otherwise false is returned. */ -static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset, - int *pages) +static bool control_save_page(PageSearchStatus *pss, RAMBlock *block, + ram_addr_t offset, int *pages) { uint64_t bytes_xmit = 0; int ret; *pages = -1; - ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE, - &bytes_xmit); + ret = ram_control_save_page(pss->pss_channel, block->offset, offset, + TARGET_PAGE_SIZE, &bytes_xmit); if (ret == RAM_SAVE_CONTROL_NOT_SUPP) { return false; } @@ -1320,9 +1329,9 @@ static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset, } if (bytes_xmit > 0) { - ram_counters.normal++; + stat64_add(&ram_atomic_counters.normal, 1); } else if (bytes_xmit == 0) { - ram_counters.duplicate++; + stat64_add(&ram_atomic_counters.duplicate, 1); } return true; @@ -1333,26 +1342,28 @@ static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset, * * Returns the number of pages written. * - * @rs: current RAM state + * @pss: current PSS channel * @block: block that contains the page we want to send * @offset: offset inside the block for the page * @buf: the page to be sent * @async: send to page asyncly */ -static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset, - uint8_t *buf, bool async) +static int save_normal_page(PageSearchStatus *pss, RAMBlock *block, + ram_addr_t offset, uint8_t *buf, bool async) { - ram_transferred_add(save_page_header(rs, rs->f, block, + QEMUFile *file = pss->pss_channel; + + ram_transferred_add(save_page_header(pss, block, offset | RAM_SAVE_FLAG_PAGE)); if (async) { - qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE, + qemu_put_buffer_async(file, buf, TARGET_PAGE_SIZE, migrate_release_ram() && migration_in_postcopy()); } else { - qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE); + qemu_put_buffer(file, buf, TARGET_PAGE_SIZE); } ram_transferred_add(TARGET_PAGE_SIZE); - ram_counters.normal++; + stat64_add(&ram_atomic_counters.normal, 1); return 1; } @@ -1382,8 +1393,8 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss) XBZRLE_cache_lock(); if (rs->xbzrle_enabled && !migration_in_postcopy()) { - pages = save_xbzrle_page(rs, &p, current_addr, block, - offset); + pages = save_xbzrle_page(rs, pss, &p, current_addr, + block, offset); if (!rs->last_stage) { /* Can't send this cached data async, since the cache page * might get updated before it gets to the wire @@ -1394,7 +1405,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss) /* XBZRLE overflow or normal page */ if (pages == -1) { - pages = save_normal_page(rs, block, offset, p, send_async); + pages = save_normal_page(pss, block, offset, p, send_async); } XBZRLE_cache_unlock(); @@ -1402,13 +1413,13 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss) return pages; } -static int ram_save_multifd_page(RAMState *rs, RAMBlock *block, +static int ram_save_multifd_page(QEMUFile *file, RAMBlock *block, ram_addr_t offset) { - if (multifd_queue_page(rs->f, block, offset) < 0) { + if (multifd_queue_page(file, block, offset) < 0) { return -1; } - ram_counters.normal++; + stat64_add(&ram_atomic_counters.normal, 1); return 1; } @@ -1417,14 +1428,15 @@ static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block, ram_addr_t offset, uint8_t *source_buf) { RAMState *rs = ram_state; + PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY]; uint8_t *p = block->host + offset; int ret; - if (save_zero_page_to_file(rs, f, block, offset)) { + if (save_zero_page_to_file(pss, block, offset)) { return true; } - save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE); + save_page_header(pss, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE); /* * copy it to a internal buffer to avoid it being modified by VM @@ -1446,7 +1458,7 @@ update_compress_thread_counts(const CompressParam *param, int bytes_xmit) ram_transferred_add(bytes_xmit); if (param->zero_page) { - ram_counters.duplicate++; + stat64_add(&ram_atomic_counters.duplicate, 1); return; } @@ -1459,6 +1471,7 @@ static bool save_page_use_compression(RAMState *rs); static void flush_compressed_data(RAMState *rs) { + MigrationState *ms = migrate_get_current(); int idx, len, thread_count; if (!save_page_use_compression(rs)) { @@ -1477,7 +1490,7 @@ static void flush_compressed_data(RAMState *rs) for (idx = 0; idx < thread_count; idx++) { qemu_mutex_lock(&comp_param[idx].mutex); if (!comp_param[idx].quit) { - len = qemu_put_qemu_file(rs->f, comp_param[idx].file); + len = qemu_put_qemu_file(ms->to_dst_file, comp_param[idx].file); /* * it's safe to fetch zero_page without holding comp_done_lock * as there is no further request submitted to the thread, @@ -1496,11 +1509,11 @@ static inline void set_compress_params(CompressParam *param, RAMBlock *block, param->offset = offset; } -static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block, - ram_addr_t offset) +static int compress_page_with_multi_thread(RAMBlock *block, ram_addr_t offset) { int idx, thread_count, bytes_xmit = -1, pages = -1; bool wait = migrate_compress_wait_thread(); + MigrationState *ms = migrate_get_current(); thread_count = migrate_compress_threads(); qemu_mutex_lock(&comp_done_lock); @@ -1508,7 +1521,8 @@ retry: for (idx = 0; idx < thread_count; idx++) { if (comp_param[idx].done) { comp_param[idx].done = false; - bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file); + bytes_xmit = qemu_put_qemu_file(ms->to_dst_file, + comp_param[idx].file); qemu_mutex_lock(&comp_param[idx].mutex); set_compress_params(&comp_param[idx], block, offset); qemu_cond_signal(&comp_param[idx].cond); @@ -1544,14 +1558,9 @@ retry: */ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again) { - /* - * This is not a postcopy requested page, mark it "not urgent", and use - * precopy channel to send it. - */ - pss->postcopy_requested = false; - pss->postcopy_target_channel = RAM_CHANNEL_PRECOPY; + /* Update pss->page for the next dirty bit in ramblock */ + pss_find_next_dirty(pss); - pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page); if (pss->complete_round && pss->block == rs->last_seen_block && pss->page >= rs->last_page) { /* @@ -1696,7 +1705,7 @@ static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss, uint64_t run_length = (pss->page - start_page) << TARGET_PAGE_BITS; /* Flush async buffers before un-protect. */ - qemu_fflush(rs->f); + qemu_fflush(pss->pss_channel); /* Un-protect memory range. */ res = uffd_change_protection(rs->uffdio_fd, page_address, run_length, false, false); @@ -1999,55 +2008,6 @@ void ram_write_tracking_stop(void) } #endif /* defined(__linux__) */ -/* - * Check whether two addr/offset of the ramblock falls onto the same host huge - * page. Returns true if so, false otherwise. - */ -static bool offset_on_same_huge_page(RAMBlock *rb, uint64_t addr1, - uint64_t addr2) -{ - size_t page_size = qemu_ram_pagesize(rb); - - addr1 = ROUND_DOWN(addr1, page_size); - addr2 = ROUND_DOWN(addr2, page_size); - - return addr1 == addr2; -} - -/* - * Whether a previous preempted precopy huge page contains current requested - * page? Returns true if so, false otherwise. - * - * This should really happen very rarely, because it means when we were sending - * during background migration for postcopy we're sending exactly the page that - * some vcpu got faulted on on dest node. When it happens, we probably don't - * need to do much but drop the request, because we know right after we restore - * the precopy stream it'll be serviced. It'll slightly affect the order of - * postcopy requests to be serviced (e.g. it'll be the same as we move current - * request to the end of the queue) but it shouldn't be a big deal. The most - * imporant thing is we can _never_ try to send a partial-sent huge page on the - * POSTCOPY channel again, otherwise that huge page will got "split brain" on - * two channels (PRECOPY, POSTCOPY). - */ -static bool postcopy_preempted_contains(RAMState *rs, RAMBlock *block, - ram_addr_t offset) -{ - PostcopyPreemptState *state = &rs->postcopy_preempt_state; - - /* No preemption at all? */ - if (!state->preempted) { - return false; - } - - /* Not even the same ramblock? */ - if (state->ram_block != block) { - return false; - } - - return offset_on_same_huge_page(block, offset, - state->ram_page << TARGET_PAGE_BITS); -} - /** * get_queued_page: unqueue a page from the postcopy requests * @@ -2087,20 +2047,7 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss) } while (block && !dirty); - if (block) { - /* See comment above postcopy_preempted_contains() */ - if (postcopy_preempted_contains(rs, block, offset)) { - trace_postcopy_preempt_hit(block->idstr, offset); - /* - * If what we preempted previously was exactly what we're - * requesting right now, restore the preempted precopy - * immediately, boosting its priority as it's requested by - * postcopy. - */ - postcopy_preempt_restore(rs, pss, true); - return true; - } - } else { + if (!block) { /* * Poll write faults too if background snapshot is enabled; that's * when we have vcpus got blocked by the write protected pages. @@ -2122,9 +2069,6 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss) * really rare. */ pss->complete_round = false; - /* Mark it an urgent request, meanwhile using POSTCOPY channel */ - pss->postcopy_requested = true; - pss->postcopy_target_channel = RAM_CHANNEL_POSTCOPY; } return !!block; @@ -2202,6 +2146,56 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len) return -1; } + /* + * When with postcopy preempt, we send back the page directly in the + * rp-return thread. + */ + if (postcopy_preempt_active()) { + ram_addr_t page_start = start >> TARGET_PAGE_BITS; + size_t page_size = qemu_ram_pagesize(ramblock); + PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_POSTCOPY]; + int ret = 0; + + qemu_mutex_lock(&rs->bitmap_mutex); + + pss_init(pss, ramblock, page_start); + /* + * Always use the preempt channel, and make sure it's there. It's + * safe to access without lock, because when rp-thread is running + * we should be the only one who operates on the qemufile + */ + pss->pss_channel = migrate_get_current()->postcopy_qemufile_src; + assert(pss->pss_channel); + + /* + * It must be either one or multiple of host page size. Just + * assert; if something wrong we're mostly split brain anyway. + */ + assert(len % page_size == 0); + while (len) { + if (ram_save_host_page_urgent(pss)) { + error_report("%s: ram_save_host_page_urgent() failed: " + "ramblock=%s, start_addr=0x"RAM_ADDR_FMT, + __func__, ramblock->idstr, start); + ret = -1; + break; + } + /* + * NOTE: after ram_save_host_page_urgent() succeeded, pss->page + * will automatically be moved and point to the next host page + * we're going to send, so no need to update here. + * + * Normally QEMU never sends >1 host page in requests, so + * logically we don't even need that as the loop should only + * run once, but just to be consistent. + */ + len -= page_size; + }; + qemu_mutex_unlock(&rs->bitmap_mutex); + + return ret; + } + struct RAMSrcPageRequest *new_entry = g_new0(struct RAMSrcPageRequest, 1); new_entry->rb = ramblock; @@ -2240,7 +2234,8 @@ static bool save_page_use_compression(RAMState *rs) * has been properly handled by compression, otherwise needs other * paths to handle it */ -static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset) +static bool save_compress_page(RAMState *rs, PageSearchStatus *pss, + RAMBlock *block, ram_addr_t offset) { if (!save_page_use_compression(rs)) { return false; @@ -2256,12 +2251,12 @@ static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset) * We post the fist page as normal page as compression will take * much CPU resource. */ - if (block != rs->last_sent_block) { + if (block != pss->last_sent_block) { flush_compressed_data(rs); return false; } - if (compress_page_with_multi_thread(rs, block, offset) > 0) { + if (compress_page_with_multi_thread(block, offset) > 0) { return true; } @@ -2283,20 +2278,20 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss) ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; int res; - if (control_save_page(rs, block, offset, &res)) { + if (control_save_page(pss, block, offset, &res)) { return res; } - if (save_compress_page(rs, block, offset)) { + if (save_compress_page(rs, pss, block, offset)) { return 1; } - res = save_zero_page(rs, block, offset); + res = save_zero_page(pss, block, offset); if (res > 0) { /* Must let xbzrle know, otherwise a previous (now 0'd) cached * page would be stale */ - if (!save_page_use_compression(rs)) { + if (rs->xbzrle_enabled) { XBZRLE_cache_lock(); xbzrle_cache_zero_page(rs, block->offset + offset); XBZRLE_cache_unlock(); @@ -2311,133 +2306,97 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss) * still see partially copied pages which is data corruption. */ if (migrate_use_multifd() && !migration_in_postcopy()) { - return ram_save_multifd_page(rs, block, offset); + return ram_save_multifd_page(pss->pss_channel, block, offset); } return ram_save_page(rs, pss); } -static bool postcopy_needs_preempt(RAMState *rs, PageSearchStatus *pss) +/* Should be called before sending a host page */ +static void pss_host_page_prepare(PageSearchStatus *pss) { - MigrationState *ms = migrate_get_current(); - - /* Not enabled eager preempt? Then never do that. */ - if (!migrate_postcopy_preempt()) { - return false; - } + /* How many guest pages are there in one host page? */ + size_t guest_pfns = qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; - /* If the user explicitly disabled breaking of huge page, skip */ - if (!ms->postcopy_preempt_break_huge) { - return false; - } + pss->host_page_sending = true; + pss->host_page_start = ROUND_DOWN(pss->page, guest_pfns); + pss->host_page_end = ROUND_UP(pss->page + 1, guest_pfns); +} - /* If the ramblock we're sending is a small page? Never bother. */ - if (qemu_ram_pagesize(pss->block) == TARGET_PAGE_SIZE) { - return false; - } +/* + * Whether the page pointed by PSS is within the host page being sent. + * Must be called after a previous pss_host_page_prepare(). + */ +static bool pss_within_range(PageSearchStatus *pss) +{ + ram_addr_t ram_addr; - /* Not in postcopy at all? */ - if (!migration_in_postcopy()) { - return false; - } + assert(pss->host_page_sending); - /* - * If we're already handling a postcopy request, don't preempt as this page - * has got the same high priority. - */ - if (pss->postcopy_requested) { + /* Over host-page boundary? */ + if (pss->page >= pss->host_page_end) { return false; } - /* If there's postcopy requests, then check it up! */ - return postcopy_has_request(rs); -} - -/* Returns true if we preempted precopy, false otherwise */ -static void postcopy_do_preempt(RAMState *rs, PageSearchStatus *pss) -{ - PostcopyPreemptState *p_state = &rs->postcopy_preempt_state; - - trace_postcopy_preempt_triggered(pss->block->idstr, pss->page); + ram_addr = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; - /* - * Time to preempt precopy. Cache current PSS into preempt state, so that - * after handling the postcopy pages we can recover to it. We need to do - * so because the dest VM will have partial of the precopy huge page kept - * over in its tmp huge page caches; better move on with it when we can. - */ - p_state->ram_block = pss->block; - p_state->ram_page = pss->page; - p_state->preempted = true; + return offset_in_ramblock(pss->block, ram_addr); } -/* Whether we're preempted by a postcopy request during sending a huge page */ -static bool postcopy_preempt_triggered(RAMState *rs) +static void pss_host_page_finish(PageSearchStatus *pss) { - return rs->postcopy_preempt_state.preempted; + pss->host_page_sending = false; + /* This is not needed, but just to reset it */ + pss->host_page_start = pss->host_page_end = 0; } -static void postcopy_preempt_restore(RAMState *rs, PageSearchStatus *pss, - bool postcopy_requested) +/* + * Send an urgent host page specified by `pss'. Need to be called with + * bitmap_mutex held. + * + * Returns 0 if save host page succeeded, false otherwise. + */ +static int ram_save_host_page_urgent(PageSearchStatus *pss) { - PostcopyPreemptState *state = &rs->postcopy_preempt_state; - - assert(state->preempted); + bool page_dirty, sent = false; + RAMState *rs = ram_state; + int ret = 0; - pss->block = state->ram_block; - pss->page = state->ram_page; + trace_postcopy_preempt_send_host_page(pss->block->idstr, pss->page); + pss_host_page_prepare(pss); - /* Whether this is a postcopy request? */ - pss->postcopy_requested = postcopy_requested; /* - * When restoring a preempted page, the old data resides in PRECOPY - * slow channel, even if postcopy_requested is set. So always use - * PRECOPY channel here. + * If precopy is sending the same page, let it be done in precopy, or + * we could send the same page in two channels and none of them will + * receive the whole page. */ - pss->postcopy_target_channel = RAM_CHANNEL_PRECOPY; - - trace_postcopy_preempt_restored(pss->block->idstr, pss->page); - - /* Reset preempt state, most importantly, set preempted==false */ - postcopy_preempt_reset(rs); -} - -static void postcopy_preempt_choose_channel(RAMState *rs, PageSearchStatus *pss) -{ - MigrationState *s = migrate_get_current(); - unsigned int channel = pss->postcopy_target_channel; - QEMUFile *next; - - if (channel != rs->postcopy_channel) { - if (channel == RAM_CHANNEL_PRECOPY) { - next = s->to_dst_file; - } else { - next = s->postcopy_qemufile_src; - } - /* Update and cache the current channel */ - rs->f = next; - rs->postcopy_channel = channel; - - /* - * If channel switched, reset last_sent_block since the old sent block - * may not be on the same channel. - */ - rs->last_sent_block = NULL; - - trace_postcopy_preempt_switch_channel(channel); + if (pss_overlap(pss, &ram_state->pss[RAM_CHANNEL_PRECOPY])) { + trace_postcopy_preempt_hit(pss->block->idstr, + pss->page << TARGET_PAGE_BITS); + return 0; } - trace_postcopy_preempt_send_host_page(pss->block->idstr, pss->page); -} - -/* We need to make sure rs->f always points to the default channel elsewhere */ -static void postcopy_preempt_reset_channel(RAMState *rs) -{ - if (migrate_postcopy_preempt() && migration_in_postcopy()) { - rs->postcopy_channel = RAM_CHANNEL_PRECOPY; - rs->f = migrate_get_current()->to_dst_file; - trace_postcopy_preempt_reset_channel(); + do { + page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page); + + if (page_dirty) { + /* Be strict to return code; it must be 1, or what else? */ + if (ram_save_target_page(rs, pss) != 1) { + error_report_once("%s: ram_save_target_page failed", __func__); + ret = -1; + goto out; + } + sent = true; + } + pss_find_next_dirty(pss); + } while (pss_within_range(pss)); +out: + pss_host_page_finish(pss); + /* For urgent requests, flush immediately if sent */ + if (sent) { + qemu_fflush(pss->pss_channel); } + return ret; } /** @@ -2448,9 +2407,14 @@ static void postcopy_preempt_reset_channel(RAMState *rs) * a host page in which case the remainder of the hostpage is sent. * Only dirty target pages are sent. Note that the host page size may * be a huge page for this block. + * * The saving stops at the boundary of the used_length of the block * if the RAMBlock isn't a multiple of the host page size. * + * The caller must be with ram_state.bitmap_mutex held to call this + * function. Note that this function can temporarily release the lock, but + * when the function is returned it'll make sure the lock is still held. + * * Returns the number of pages written or negative on error * * @rs: current RAM state @@ -2458,11 +2422,10 @@ static void postcopy_preempt_reset_channel(RAMState *rs) */ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) { + bool page_dirty, preempt_active = postcopy_preempt_active(); int tmppages, pages = 0; size_t pagesize_bits = qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; - unsigned long hostpage_boundary = - QEMU_ALIGN_UP(pss->page + 1, pagesize_bits); unsigned long start_page = pss->page; int res; @@ -2471,51 +2434,49 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) return 0; } - if (migrate_postcopy_preempt() && migration_in_postcopy()) { - postcopy_preempt_choose_channel(rs, pss); - } + /* Update host page boundary information */ + pss_host_page_prepare(pss); do { - if (postcopy_needs_preempt(rs, pss)) { - postcopy_do_preempt(rs, pss); - break; - } + page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page); /* Check the pages is dirty and if it is send it */ - if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) { - tmppages = ram_save_target_page(rs, pss); - if (tmppages < 0) { - return tmppages; - } - - pages += tmppages; + if (page_dirty) { /* - * Allow rate limiting to happen in the middle of huge pages if - * something is sent in the current iteration. + * Properly yield the lock only in postcopy preempt mode + * because both migration thread and rp-return thread can + * operate on the bitmaps. */ - if (pagesize_bits > 1 && tmppages > 0) { - migration_rate_limit(); + if (preempt_active) { + qemu_mutex_unlock(&rs->bitmap_mutex); + } + tmppages = ram_save_target_page(rs, pss); + if (tmppages >= 0) { + pages += tmppages; + /* + * Allow rate limiting to happen in the middle of huge pages if + * something is sent in the current iteration. + */ + if (pagesize_bits > 1 && tmppages > 0) { + migration_rate_limit(); + } } + if (preempt_active) { + qemu_mutex_lock(&rs->bitmap_mutex); + } + } else { + tmppages = 0; } - pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page); - } while ((pss->page < hostpage_boundary) && - offset_in_ramblock(pss->block, - ((ram_addr_t)pss->page) << TARGET_PAGE_BITS)); - /* The offset we leave with is the min boundary of host page and block */ - pss->page = MIN(pss->page, hostpage_boundary); - /* - * When with postcopy preempt mode, flush the data as soon as possible for - * postcopy requests, because we've already sent a whole huge page, so the - * dst node should already have enough resource to atomically filling in - * the current missing page. - * - * More importantly, when using separate postcopy channel, we must do - * explicit flush or it won't flush until the buffer is full. - */ - if (migrate_postcopy_preempt() && pss->postcopy_requested) { - qemu_fflush(rs->f); - } + if (tmppages < 0) { + pss_host_page_finish(pss); + return tmppages; + } + + pss_find_next_dirty(pss); + } while (pss_within_range(pss)); + + pss_host_page_finish(pss); res = ram_save_release_protection(rs, pss, start_page); return (res < 0 ? res : pages); @@ -2536,7 +2497,7 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) */ static int ram_find_and_save_block(RAMState *rs) { - PageSearchStatus pss; + PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY]; int pages = 0; bool again, found; @@ -2557,35 +2518,24 @@ static int ram_find_and_save_block(RAMState *rs) rs->last_page = 0; } - pss.block = rs->last_seen_block; - pss.page = rs->last_page; - pss.complete_round = false; + pss_init(pss, rs->last_seen_block, rs->last_page); do { again = true; - found = get_queued_page(rs, &pss); + found = get_queued_page(rs, pss); if (!found) { - /* - * Recover previous precopy ramblock/offset if postcopy has - * preempted precopy. Otherwise find the next dirty bit. - */ - if (postcopy_preempt_triggered(rs)) { - postcopy_preempt_restore(rs, &pss, false); - found = true; - } else { - /* priority queue empty, so just search for something dirty */ - found = find_dirty_block(rs, &pss, &again); - } + /* priority queue empty, so just search for something dirty */ + found = find_dirty_block(rs, pss, &again); } if (found) { - pages = ram_save_host_page(rs, &pss); + pages = ram_save_host_page(rs, pss); } } while (!pages && again); - rs->last_seen_block = pss.block; - rs->last_page = pss.page; + rs->last_seen_block = pss->block; + rs->last_page = pss->page; return pages; } @@ -2595,9 +2545,9 @@ void acct_update_position(QEMUFile *f, size_t size, bool zero) uint64_t pages = size / TARGET_PAGE_SIZE; if (zero) { - ram_counters.duplicate += pages; + stat64_add(&ram_atomic_counters.duplicate, pages); } else { - ram_counters.normal += pages; + stat64_add(&ram_atomic_counters.normal, pages); ram_transferred_add(size); qemu_file_credit_transfer(f, size); } @@ -2699,13 +2649,16 @@ static void ram_save_cleanup(void *opaque) static void ram_state_reset(RAMState *rs) { + int i; + + for (i = 0; i < RAM_CHANNEL_MAX; i++) { + rs->pss[i].last_sent_block = NULL; + } + rs->last_seen_block = NULL; - rs->last_sent_block = NULL; rs->last_page = 0; rs->last_version = ram_list.version; rs->xbzrle_enabled = false; - postcopy_preempt_reset(rs); - rs->postcopy_channel = RAM_CHANNEL_PRECOPY; } #define MAX_WAIT 50 /* ms, half buffered_file limit */ @@ -2894,8 +2847,8 @@ void ram_postcopy_send_discard_bitmap(MigrationState *ms) migration_bitmap_sync(rs); /* Easiest way to make sure we don't resume in the middle of a host-page */ + rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL; rs->last_seen_block = NULL; - rs->last_sent_block = NULL; rs->last_page = 0; postcopy_each_ram_send_discard(ms); @@ -3132,7 +3085,7 @@ static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out) ram_state_reset(rs); /* Update RAMState cache of output QEMUFile */ - rs->f = out; + rs->pss[RAM_CHANNEL_PRECOPY].pss_channel = out; trace_ram_state_resume_prepare(pages); } @@ -3223,7 +3176,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque) return -1; } } - (*rsp)->f = f; + (*rsp)->pss[RAM_CHANNEL_PRECOPY].pss_channel = f; WITH_RCU_READ_LOCK_GUARD() { qemu_put_be64(f, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE); @@ -3349,8 +3302,6 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) } qemu_mutex_unlock(&rs->bitmap_mutex); - postcopy_preempt_reset_channel(rs); - /* * Must occur before EOS (or any QEMUFile operation) * because of RDMA protocol. @@ -3360,7 +3311,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) out: if (ret >= 0 && migration_is_setup_or_active(migrate_get_current()->state)) { - ret = multifd_send_sync_main(rs->f); + ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel); if (ret < 0) { return ret; } @@ -3406,6 +3357,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) /* try transferring iterative blocks of memory */ /* flush all remaining blocks regardless of rate limiting */ + qemu_mutex_lock(&rs->bitmap_mutex); while (true) { int pages; @@ -3419,6 +3371,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) break; } } + qemu_mutex_unlock(&rs->bitmap_mutex); flush_compressed_data(rs); ram_control_after_iterate(f, RAM_CONTROL_FINISH); @@ -3428,9 +3381,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) return ret; } - postcopy_preempt_reset_channel(rs); - - ret = multifd_send_sync_main(rs->f); + ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel); if (ret < 0) { return ret; } diff --git a/migration/ram.h b/migration/ram.h index c7af65ac74..81cbb0947c 100644 --- a/migration/ram.h +++ b/migration/ram.h @@ -32,7 +32,27 @@ #include "qapi/qapi-types-migration.h" #include "exec/cpu-common.h" #include "io/channel.h" +#include "qemu/stats64.h" +/* + * These are the migration statistic counters that need to be updated using + * atomic ops (can be accessed by more than one thread). Here since we + * cannot modify MigrationStats directly to use Stat64 as it was defined in + * the QAPI scheme, we define an internal structure to hold them, and we + * propagate the real values when QMP queries happen. + * + * IOW, the corresponding fields within ram_counters on these specific + * fields will be always zero and not being used at all; they're just + * placeholders to make it QAPI-compatible. + */ +typedef struct { + Stat64 transferred; + Stat64 duplicate; + Stat64 normal; + Stat64 postcopy_bytes; +} MigrationAtomicStats; + +extern MigrationAtomicStats ram_atomic_counters; extern MigrationStats ram_counters; extern XBZRLECacheStats xbzrle_counters; extern CompressionStats compression_counters; @@ -65,6 +85,9 @@ int ram_load_postcopy(QEMUFile *f, int channel); void ram_handle_compressed(void *host, uint8_t ch, uint64_t size); +void ram_transferred_add(uint64_t bytes); +void ram_release_page(const char *rbname, uint64_t offset); + int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr); bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset); void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr); diff --git a/migration/tls.c b/migration/tls.c index 73e8c9d3c2..4d2166a209 100644 --- a/migration/tls.c +++ b/migration/tls.c @@ -126,7 +126,6 @@ QIOChannelTLS *migration_tls_client_create(MigrationState *s, Error **errp) { QCryptoTLSCreds *creds; - QIOChannelTLS *tioc; creds = migration_tls_get_creds( s, QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT, errp); @@ -138,10 +137,7 @@ QIOChannelTLS *migration_tls_client_create(MigrationState *s, hostname = s->parameters.tls_hostname; } - tioc = qio_channel_tls_new_client( - ioc, creds, hostname, errp); - - return tioc; + return qio_channel_tls_new_client(ioc, creds, hostname, errp); } void migration_tls_channel_connect(MigrationState *s, diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c index 01b789a79e..b847b26041 100644 --- a/monitor/hmp-cmds.c +++ b/monitor/hmp-cmds.c @@ -104,7 +104,7 @@ void hmp_info_name(Monitor *mon, const QDict *qdict) NameInfo *info; info = qmp_query_name(NULL); - if (info->has_name) { + if (info->name) { monitor_printf(mon, "%s\n", info->name); } qapi_free_NameInfo(info); @@ -219,8 +219,7 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict) if (info->has_status) { monitor_printf(mon, "Migration status: %s", MigrationStatus_str(info->status)); - if (info->status == MIGRATION_STATUS_FAILED && - info->has_error_desc) { + if (info->status == MIGRATION_STATUS_FAILED && info->error_desc) { monitor_printf(mon, " (%s)\n", info->error_desc); } else { monitor_printf(mon, "\n"); @@ -242,7 +241,7 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict) } } - if (info->has_ram) { + if (info->ram) { monitor_printf(mon, "transferred ram: %" PRIu64 " kbytes\n", info->ram->transferred >> 10); monitor_printf(mon, "throughput: %0.2f mbps\n", @@ -295,7 +294,7 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict) } } - if (info->has_disk) { + if (info->disk) { monitor_printf(mon, "transferred disk: %" PRIu64 " kbytes\n", info->disk->transferred >> 10); monitor_printf(mon, "remaining disk: %" PRIu64 " kbytes\n", @@ -304,7 +303,7 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict) info->disk->total >> 10); } - if (info->has_xbzrle_cache) { + if (info->xbzrle_cache) { monitor_printf(mon, "cache size: %" PRIu64 " bytes\n", info->xbzrle_cache->cache_size); monitor_printf(mon, "xbzrle transferred: %" PRIu64 " kbytes\n", @@ -321,7 +320,7 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict) info->xbzrle_cache->overflow); } - if (info->has_compression) { + if (info->compression) { monitor_printf(mon, "compression pages: %" PRIu64 " pages\n", info->compression->pages); monitor_printf(mon, "compression busy: %" PRIu64 "\n", @@ -368,7 +367,7 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict) monitor_printf(mon, "]\n"); } - if (info->has_vfio) { + if (info->vfio) { monitor_printf(mon, "vfio device transferred: %" PRIu64 " kbytes\n", info->vfio->transferred >> 10); } @@ -448,11 +447,11 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict) monitor_printf(mon, "%s: %u\n", MigrationParameter_str(MIGRATION_PARAMETER_MAX_CPU_THROTTLE), params->max_cpu_throttle); - assert(params->has_tls_creds); + assert(params->tls_creds); monitor_printf(mon, "%s: '%s'\n", MigrationParameter_str(MIGRATION_PARAMETER_TLS_CREDS), params->tls_creds); - assert(params->has_tls_hostname); + assert(params->tls_hostname); monitor_printf(mon, "%s: '%s'\n", MigrationParameter_str(MIGRATION_PARAMETER_TLS_HOSTNAME), params->tls_hostname); @@ -549,11 +548,9 @@ static void hmp_info_vnc_clients(Monitor *mon, VncClientInfoList *client) hmp_info_VncBasicInfo(mon, qapi_VncClientInfo_base(cinfo), "Client"); monitor_printf(mon, " x509_dname: %s\n", - cinfo->has_x509_dname ? - cinfo->x509_dname : "none"); + cinfo->x509_dname ?: "none"); monitor_printf(mon, " sasl_username: %s\n", - cinfo->has_sasl_username ? - cinfo->sasl_username : "none"); + cinfo->sasl_username ?: "none"); client = client->next; } @@ -598,7 +595,7 @@ void hmp_info_vnc(Monitor *mon, const QDict *qdict) hmp_info_vnc_authcrypt(mon, " ", info->auth, info->has_vencrypt ? &info->vencrypt : NULL); } - if (info->has_display) { + if (info->display) { monitor_printf(mon, " Display: %s\n", info->display); } info2l = info2l->next; @@ -710,7 +707,7 @@ static void hmp_info_pci_device(Monitor *mon, const PciDeviceInfo *dev) dev->slot, dev->function); monitor_printf(mon, " "); - if (dev->class_info->has_desc) { + if (dev->class_info->desc) { monitor_puts(mon, dev->class_info->desc); } else { monitor_printf(mon, "Class %04" PRId64, dev->class_info->q_class); @@ -728,7 +725,7 @@ static void hmp_info_pci_device(Monitor *mon, const PciDeviceInfo *dev) dev->irq, (char)('A' + dev->irq_pin - 1)); } - if (dev->has_pci_bridge) { + if (dev->pci_bridge) { monitor_printf(mon, " BUS %" PRId64 ".\n", dev->pci_bridge->bus->number); monitor_printf(mon, " secondary bus %" PRId64 ".\n", @@ -774,7 +771,7 @@ static void hmp_info_pci_device(Monitor *mon, const PciDeviceInfo *dev) monitor_printf(mon, " id \"%s\"\n", dev->qdev_id); - if (dev->has_pci_bridge) { + if (dev->pci_bridge) { if (dev->pci_bridge->has_devices) { PciDeviceInfoList *cdev; for (cdev = dev->pci_bridge->devices; cdev; cdev = cdev->next) { @@ -865,10 +862,10 @@ void hmp_info_tpm(Monitor *mon, const QDict *qdict) case TPM_TYPE_PASSTHROUGH: tpo = ti->options->u.passthrough.data; monitor_printf(mon, "%s%s%s%s", - tpo->has_path ? ",path=" : "", - tpo->has_path ? tpo->path : "", - tpo->has_cancel_path ? ",cancel-path=" : "", - tpo->has_cancel_path ? tpo->cancel_path : ""); + tpo->path ? ",path=" : "", + tpo->path ?: "", + tpo->cancel_path ? ",cancel-path=" : "", + tpo->cancel_path ?: ""); break; case TPM_TYPE_EMULATOR: teo = ti->options->u.emulator.data; @@ -1105,7 +1102,6 @@ void hmp_announce_self(Monitor *mon, const QDict *qdict) params->interfaces = strList_from_comma_list(interfaces_str); params->has_interfaces = params->interfaces != NULL; params->id = g_strdup(id); - params->has_id = !!params->id; qmp_announce_self(params, NULL); qapi_free_AnnounceParameters(params); } @@ -1237,19 +1233,16 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict) visit_type_uint8(v, param, &p->max_cpu_throttle, &err); break; case MIGRATION_PARAMETER_TLS_CREDS: - p->has_tls_creds = true; p->tls_creds = g_new0(StrOrNull, 1); p->tls_creds->type = QTYPE_QSTRING; visit_type_str(v, param, &p->tls_creds->u.s, &err); break; case MIGRATION_PARAMETER_TLS_HOSTNAME: - p->has_tls_hostname = true; p->tls_hostname = g_new0(StrOrNull, 1); p->tls_hostname->type = QTYPE_QSTRING; visit_type_str(v, param, &p->tls_hostname->u.s, &err); break; case MIGRATION_PARAMETER_TLS_AUTHZ: - p->has_tls_authz = true; p->tls_authz = g_new0(StrOrNull, 1); p->tls_authz->type = QTYPE_QSTRING; visit_type_str(v, param, &p->tls_authz->u.s, &err); @@ -1361,7 +1354,7 @@ void hmp_client_migrate_info(Monitor *mon, const QDict *qdict) qmp_client_migrate_info(protocol, hostname, has_port, port, has_tls_port, tls_port, - !!cert_subject, cert_subject, &err); + cert_subject, &err); hmp_handle_error(mon, err); } @@ -1406,7 +1399,6 @@ void hmp_set_password(Monitor *mon, const QDict *qdict) } if (opts.protocol == DISPLAY_PROTOCOL_VNC) { - opts.u.vnc.has_display = !!display; opts.u.vnc.display = (char *)display; } @@ -1434,7 +1426,6 @@ void hmp_expire_password(Monitor *mon, const QDict *qdict) } if (opts.protocol == DISPLAY_PROTOCOL_VNC) { - opts.u.vnc.has_display = !!display; opts.u.vnc.display = (char *)display; } @@ -1496,8 +1487,7 @@ void hmp_change(Monitor *mon, const QDict *qdict) } } - qmp_blockdev_change_medium(true, device, false, NULL, target, - !!arg, arg, true, force, + qmp_blockdev_change_medium(device, NULL, target, arg, true, force, !!read_only, read_only_mode, &err); } @@ -1520,7 +1510,7 @@ static void hmp_migrate_status_cb(void *opaque) info = qmp_query_migrate(NULL); if (!info->has_status || info->status == MIGRATION_STATUS_ACTIVE || info->status == MIGRATION_STATUS_SETUP) { - if (info->has_disk) { + if (info->disk) { int progress; if (info->disk->remaining) { @@ -1538,7 +1528,7 @@ static void hmp_migrate_status_cb(void *opaque) if (status->is_block_migration) { monitor_printf(status->mon, "\n"); } - if (info->has_error_desc) { + if (info->error_desc) { error_report("%s", info->error_desc); } monitor_resume(status->mon); @@ -1720,7 +1710,7 @@ hmp_screendump(Monitor *mon, const QDict *qdict) goto end; } - qmp_screendump(filename, id != NULL, id, id != NULL, head, + qmp_screendump(filename, id, id != NULL, head, input_format != NULL, format, &err); end: hmp_handle_error(mon, err); @@ -2016,35 +2006,35 @@ void hmp_rocker_of_dpa_flows(Monitor *mon, const QDict *qdict) } } - if (key->has_eth_src) { + if (key->eth_src) { if ((strcmp(key->eth_src, "01:00:00:00:00:00") == 0) && - (mask->has_eth_src) && + mask->eth_src && (strcmp(mask->eth_src, "01:00:00:00:00:00") == 0)) { monitor_printf(mon, " src <any mcast/bcast>"); } else if ((strcmp(key->eth_src, "00:00:00:00:00:00") == 0) && - (mask->has_eth_src) && + mask->eth_src && (strcmp(mask->eth_src, "01:00:00:00:00:00") == 0)) { monitor_printf(mon, " src <any ucast>"); } else { monitor_printf(mon, " src %s", key->eth_src); - if (mask->has_eth_src) { + if (mask->eth_src) { monitor_printf(mon, "(%s)", mask->eth_src); } } } - if (key->has_eth_dst) { + if (key->eth_dst) { if ((strcmp(key->eth_dst, "01:00:00:00:00:00") == 0) && - (mask->has_eth_dst) && + mask->eth_dst && (strcmp(mask->eth_dst, "01:00:00:00:00:00") == 0)) { monitor_printf(mon, " dst <any mcast/bcast>"); } else if ((strcmp(key->eth_dst, "00:00:00:00:00:00") == 0) && - (mask->has_eth_dst) && + mask->eth_dst && (strcmp(mask->eth_dst, "01:00:00:00:00:00") == 0)) { monitor_printf(mon, " dst <any ucast>"); } else { monitor_printf(mon, " dst %s", key->eth_dst); - if (mask->has_eth_dst) { + if (mask->eth_dst) { monitor_printf(mon, "(%s)", mask->eth_dst); } } @@ -2064,7 +2054,7 @@ void hmp_rocker_of_dpa_flows(Monitor *mon, const QDict *qdict) } } - if (key->has_ip_dst) { + if (key->ip_dst) { monitor_printf(mon, " dst %s", key->ip_dst); } @@ -2143,7 +2133,7 @@ void hmp_rocker_of_dpa_groups(Monitor *mon, const QDict *qdict) group->set_vlan_id & VLAN_VID_MASK); } - if (group->has_set_eth_src) { + if (group->set_eth_src) { if (!set) { set = true; monitor_printf(mon, " set"); @@ -2151,7 +2141,7 @@ void hmp_rocker_of_dpa_groups(Monitor *mon, const QDict *qdict) monitor_printf(mon, " src %s", group->set_eth_src); } - if (group->has_set_eth_dst) { + if (group->set_eth_dst) { if (!set) { monitor_printf(mon, " set"); } @@ -2559,7 +2549,7 @@ void hmp_virtio_status(Monitor *mon, const QDict *qdict) monitor_printf(mon, "%s:\n", path); monitor_printf(mon, " device_name: %s %s\n", - s->name, s->has_vhost_dev ? "(vhost)" : ""); + s->name, s->vhost_dev ? "(vhost)" : ""); monitor_printf(mon, " device_id: %d\n", s->device_id); monitor_printf(mon, " vhost_started: %s\n", s->vhost_started ? "true" : "false"); @@ -2595,7 +2585,7 @@ void hmp_virtio_status(Monitor *mon, const QDict *qdict) monitor_printf(mon, " Backend features:\n"); hmp_virtio_dump_features(mon, s->backend_features); - if (s->has_vhost_dev) { + if (s->vhost_dev) { monitor_printf(mon, " VHost:\n"); monitor_printf(mon, " nvqs: %d\n", s->vhost_dev->nvqs); diff --git a/monitor/misc.c b/monitor/misc.c index 205487e2b9..bf3f1c67ca 100644 --- a/monitor/misc.c +++ b/monitor/misc.c @@ -25,10 +25,8 @@ #include "qemu/osdep.h" #include "monitor-internal.h" #include "monitor/qdev.h" -#include "hw/usb.h" #include "hw/pci/pci.h" #include "sysemu/watchdog.h" -#include "hw/loader.h" #include "exec/gdbstub.h" #include "net/net.h" #include "net/slirp.h" @@ -39,16 +37,13 @@ #include "ui/input.h" #include "audio/audio.h" #include "disas/disas.h" -#include "sysemu/balloon.h" #include "qemu/timer.h" #include "qemu/log.h" #include "sysemu/hw_accel.h" #include "sysemu/runstate.h" #include "authz/list.h" #include "qapi/util.h" -#include "sysemu/blockdev.h" #include "sysemu/sysemu.h" -#include "sysemu/tpm.h" #include "sysemu/device_tree.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qerror.h" @@ -77,7 +72,6 @@ #include "qapi/qapi-init-commands.h" #include "qapi/error.h" #include "qapi/qmp-event.h" -#include "sysemu/cpus.h" #include "qemu/cutils.h" #if defined(TARGET_S390X) @@ -398,7 +392,7 @@ static void hmp_info_trace_events(Monitor *mon, const QDict *qdict) void qmp_client_migrate_info(const char *protocol, const char *hostname, bool has_port, int64_t port, bool has_tls_port, int64_t tls_port, - bool has_cert_subject, const char *cert_subject, + const char *cert_subject, Error **errp) { if (strcmp(protocol, "spice") == 0) { @@ -1086,6 +1080,7 @@ int monitor_get_fd(Monitor *mon, const char *fdname, Error **errp) } fd = monfd->fd; + assert(fd >= 0); /* caller takes ownership of fd */ QLIST_REMOVE(monfd, next); @@ -1132,7 +1127,7 @@ void monitor_fdsets_cleanup(void) } } -AddfdInfo *qmp_add_fd(bool has_fdset_id, int64_t fdset_id, bool has_opaque, +AddfdInfo *qmp_add_fd(bool has_fdset_id, int64_t fdset_id, const char *opaque, Error **errp) { int fd; @@ -1145,8 +1140,7 @@ AddfdInfo *qmp_add_fd(bool has_fdset_id, int64_t fdset_id, bool has_opaque, goto error; } - fdinfo = monitor_fdset_add_fd(fd, has_fdset_id, fdset_id, - has_opaque, opaque, errp); + fdinfo = monitor_fdset_add_fd(fd, has_fdset_id, fdset_id, opaque, errp); if (fdinfo) { return fdinfo; } @@ -1214,12 +1208,7 @@ FdsetInfoList *qmp_query_fdsets(Error **errp) fdsetfd_info = g_malloc0(sizeof(*fdsetfd_info)); fdsetfd_info->fd = mon_fdset_fd->fd; - if (mon_fdset_fd->opaque) { - fdsetfd_info->has_opaque = true; - fdsetfd_info->opaque = g_strdup(mon_fdset_fd->opaque); - } else { - fdsetfd_info->has_opaque = false; - } + fdsetfd_info->opaque = g_strdup(mon_fdset_fd->opaque); QAPI_LIST_PREPEND(fdset_info->fds, fdsetfd_info); } @@ -1231,8 +1220,7 @@ FdsetInfoList *qmp_query_fdsets(Error **errp) } AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id, - bool has_opaque, const char *opaque, - Error **errp) + const char *opaque, Error **errp) { MonFdset *mon_fdset = NULL; MonFdsetFd *mon_fdset_fd; @@ -1300,9 +1288,7 @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id, mon_fdset_fd = g_malloc0(sizeof(*mon_fdset_fd)); mon_fdset_fd->fd = fd; mon_fdset_fd->removed = false; - if (has_opaque) { - mon_fdset_fd->opaque = g_strdup(opaque); - } + mon_fdset_fd->opaque = g_strdup(opaque); QLIST_INSERT_HEAD(&mon_fdset->fds, mon_fdset_fd, next); fdinfo = g_malloc0(sizeof(*fdinfo)); @@ -1403,23 +1389,16 @@ void monitor_fdset_dup_fd_remove(int dup_fd) int monitor_fd_param(Monitor *mon, const char *fdname, Error **errp) { int fd; - Error *local_err = NULL; if (!qemu_isdigit(fdname[0]) && mon) { - fd = monitor_get_fd(mon, fdname, &local_err); + fd = monitor_get_fd(mon, fdname, errp); } else { fd = qemu_parse_fd(fdname); - if (fd == -1) { - error_setg(&local_err, "Invalid file descriptor number '%s'", + if (fd < 0) { + error_setg(errp, "Invalid file descriptor number '%s'", fdname); } } - if (local_err) { - error_propagate(errp, local_err); - assert(fd == -1); - } else { - assert(fd != -1); - } return fd; } diff --git a/monitor/monitor.c b/monitor/monitor.c index 86949024f6..7ed7bd5342 100644 --- a/monitor/monitor.c +++ b/monitor/monitor.c @@ -711,8 +711,8 @@ void monitor_init_globals_core(void) int monitor_init(MonitorOptions *opts, bool allow_hmp, Error **errp) { + ERRP_GUARD(); Chardev *chr; - Error *local_err = NULL; chr = qemu_chr_find(opts->chardev); if (chr == NULL) { @@ -726,7 +726,7 @@ int monitor_init(MonitorOptions *opts, bool allow_hmp, Error **errp) switch (opts->mode) { case MONITOR_MODE_CONTROL: - monitor_init_qmp(chr, opts->pretty, &local_err); + monitor_init_qmp(chr, opts->pretty, errp); break; case MONITOR_MODE_READLINE: if (!allow_hmp) { @@ -737,17 +737,13 @@ int monitor_init(MonitorOptions *opts, bool allow_hmp, Error **errp) error_setg(errp, "'pretty' is not compatible with HMP monitors"); return -1; } - monitor_init_hmp(chr, true, &local_err); + monitor_init_hmp(chr, true, errp); break; default: g_assert_not_reached(); } - if (local_err) { - error_propagate(errp, local_err); - return -1; - } - return 0; + return *errp ? -1 : 0; } int monitor_init_opts(QemuOpts *opts, Error **errp) diff --git a/monitor/qmp-cmds.c b/monitor/qmp-cmds.c index 81c8fdadf8..2932b3f3a5 100644 --- a/monitor/qmp-cmds.c +++ b/monitor/qmp-cmds.c @@ -50,11 +50,7 @@ NameInfo *qmp_query_name(Error **errp) { NameInfo *info = g_malloc0(sizeof(*info)); - if (qemu_name) { - info->has_name = true; - info->name = g_strdup(qemu_name); - } - + info->name = g_strdup(qemu_name); return info; } @@ -474,9 +470,9 @@ static bool invoke_stats_cb(StatsCallbacks *entry, StatsFilter *filter, StatsRequest *request, Error **errp) { + ERRP_GUARD(); strList *targets = NULL; strList *names = NULL; - ERRP_GUARD(); if (request) { if (request->provider != entry->provider) { @@ -541,9 +537,9 @@ StatsSchemaList *qmp_query_stats_schemas(bool has_provider, StatsProvider provider, Error **errp) { + ERRP_GUARD(); StatsSchemaList *stats_results = NULL; StatsCallbacks *entry; - ERRP_GUARD(); QTAILQ_FOREACH(entry, &stats_callbacks, next) { if (!has_provider || provider == entry->provider) { @@ -564,10 +560,7 @@ void add_stats_entry(StatsResultList **stats_results, StatsProvider provider, StatsResult *entry = g_new0(StatsResult, 1); entry->provider = provider; - if (qom_path) { - entry->has_qom_path = true; - entry->qom_path = g_strdup(qom_path); - } + entry->qom_path = g_strdup(qom_path); entry->stats = stats_list; QAPI_LIST_PREPEND(*stats_results, entry); diff --git a/nbd/server.c b/nbd/server.c index ada16089f3..67ed333578 100644 --- a/nbd/server.c +++ b/nbd/server.c @@ -1638,6 +1638,7 @@ static int nbd_export_create(BlockExport *blk_exp, BlockExportOptions *exp_args, { NBDExport *exp = container_of(blk_exp, NBDExport, common); BlockExportOptionsNbd *arg = &exp_args->u.nbd; + const char *name = arg->name ?: exp_args->node_name; BlockBackend *blk = blk_exp->blk; int64_t size; uint64_t perm, shared_perm; @@ -1653,12 +1654,8 @@ static int nbd_export_create(BlockExport *blk_exp, BlockExportOptions *exp_args, return -EINVAL; } - if (!arg->has_name) { - arg->name = exp_args->node_name; - } - - if (strlen(arg->name) > NBD_MAX_STRING_SIZE) { - error_setg(errp, "export name '%s' too long", arg->name); + if (strlen(name) > NBD_MAX_STRING_SIZE) { + error_setg(errp, "export name '%s' too long", name); return -EINVAL; } @@ -1667,8 +1664,8 @@ static int nbd_export_create(BlockExport *blk_exp, BlockExportOptions *exp_args, return -EINVAL; } - if (nbd_export_find(arg->name)) { - error_setg(errp, "NBD server already has export named '%s'", arg->name); + if (nbd_export_find(name)) { + error_setg(errp, "NBD server already has export named '%s'", name); return -EEXIST; } @@ -1688,7 +1685,7 @@ static int nbd_export_create(BlockExport *blk_exp, BlockExportOptions *exp_args, } QTAILQ_INIT(&exp->clients); - exp->name = g_strdup(arg->name); + exp->name = g_strdup(name); exp->description = g_strdup(arg->description); exp->nbdflags = (NBD_FLAG_HAS_FLAGS | NBD_FLAG_SEND_FLUSH | NBD_FLAG_SEND_FUA | NBD_FLAG_SEND_CACHE); @@ -1991,7 +1988,7 @@ static int coroutine_fn nbd_co_send_structured_error(NBDClient *client, } /* Do a sparse read and send the structured reply to the client. - * Returns -errno if sending fails. bdrv_block_status_above() failure is + * Returns -errno if sending fails. blk_co_block_status_above() failure is * reported to the client, at which point this function succeeds. */ static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client, @@ -2007,10 +2004,10 @@ static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client, while (progress < size) { int64_t pnum; - int status = bdrv_block_status_above(blk_bs(exp->common.blk), NULL, - offset + progress, - size - progress, &pnum, NULL, - NULL); + int status = blk_co_block_status_above(exp->common.blk, NULL, + offset + progress, + size - progress, &pnum, NULL, + NULL); bool final; if (status < 0) { @@ -2141,14 +2138,15 @@ static int nbd_extent_array_add(NBDExtentArray *ea, return 0; } -static int blockstatus_to_extents(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, NBDExtentArray *ea) +static int coroutine_fn blockstatus_to_extents(BlockBackend *blk, + uint64_t offset, uint64_t bytes, + NBDExtentArray *ea) { while (bytes) { uint32_t flags; int64_t num; - int ret = bdrv_block_status_above(bs, NULL, offset, bytes, &num, - NULL, NULL); + int ret = blk_co_block_status_above(blk, NULL, offset, bytes, &num, + NULL, NULL); if (ret < 0) { return ret; @@ -2168,13 +2166,14 @@ static int blockstatus_to_extents(BlockDriverState *bs, uint64_t offset, return 0; } -static int blockalloc_to_extents(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, NBDExtentArray *ea) +static int coroutine_fn blockalloc_to_extents(BlockBackend *blk, + uint64_t offset, uint64_t bytes, + NBDExtentArray *ea) { while (bytes) { int64_t num; - int ret = bdrv_is_allocated_above(bs, NULL, false, offset, bytes, - &num); + int ret = blk_co_is_allocated_above(blk, NULL, false, offset, bytes, + &num); if (ret < 0) { return ret; @@ -2220,20 +2219,21 @@ static int nbd_co_send_extents(NBDClient *client, uint64_t handle, } /* Get block status from the exported device and send it to the client */ -static int nbd_co_send_block_status(NBDClient *client, uint64_t handle, - BlockDriverState *bs, uint64_t offset, - uint32_t length, bool dont_fragment, - bool last, uint32_t context_id, - Error **errp) +static int +coroutine_fn nbd_co_send_block_status(NBDClient *client, uint64_t handle, + BlockBackend *blk, uint64_t offset, + uint32_t length, bool dont_fragment, + bool last, uint32_t context_id, + Error **errp) { int ret; unsigned int nb_extents = dont_fragment ? 1 : NBD_MAX_BLOCK_STATUS_EXTENTS; g_autoptr(NBDExtentArray) ea = nbd_extent_array_new(nb_extents); if (context_id == NBD_META_ID_BASE_ALLOCATION) { - ret = blockstatus_to_extents(bs, offset, length, ea); + ret = blockstatus_to_extents(blk, offset, length, ea); } else { - ret = blockalloc_to_extents(bs, offset, length, ea); + ret = blockalloc_to_extents(blk, offset, length, ea); } if (ret < 0) { return nbd_co_send_structured_error( @@ -2560,7 +2560,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client, if (client->export_meta.base_allocation) { ret = nbd_co_send_block_status(client, request->handle, - blk_bs(exp->common.blk), + exp->common.blk, request->from, request->len, dont_fragment, !--contexts_remaining, @@ -2573,7 +2573,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client, if (client->export_meta.allocation_depth) { ret = nbd_co_send_block_status(client, request->handle, - blk_bs(exp->common.blk), + exp->common.blk, request->from, request->len, dont_fragment, !--contexts_remaining, diff --git a/net/announce.c b/net/announce.c index 62c60192a3..9e99044422 100644 --- a/net/announce.c +++ b/net/announce.c @@ -46,7 +46,7 @@ void qemu_announce_timer_del(AnnounceTimer *timer, bool free_named) } qapi_free_strList(timer->params.interfaces); timer->params.interfaces = NULL; - if (free_named && timer->params.has_id) { + if (free_named && timer->params.id) { AnnounceTimer *list_timer; /* * Sanity check: There should only be one timer on the list with @@ -157,7 +157,7 @@ static void qemu_announce_self_iter(NICState *nic, void *opaque) skip = false; } - trace_qemu_announce_self_iter(timer->params.has_id ? timer->params.id : "_", + trace_qemu_announce_self_iter(timer->params.id ?: "_", nic->ncs->name, qemu_ether_ntoa(&nic->conf->macaddr), skip); @@ -199,9 +199,9 @@ void qemu_announce_self(AnnounceTimer *timer, AnnounceParameters *params) void qmp_announce_self(AnnounceParameters *params, Error **errp) { AnnounceTimer *named_timer; - if (!params->has_id) { + + if (!params->id) { params->id = g_strdup(""); - params->has_id = true; } named_timer = g_datalist_get_data(&named_timers, params->id); diff --git a/net/colo-compare.c b/net/colo-compare.c index 787c740f14..7f9e6f89ce 100644 --- a/net/colo-compare.c +++ b/net/colo-compare.c @@ -1135,22 +1135,17 @@ static void set_max_queue_size(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Error *local_err = NULL; uint64_t value; - visit_type_uint64(v, name, &value, &local_err); - if (local_err) { - goto out; + if (!visit_type_uint64(v, name, &value, errp)) { + return; } if (!value) { - error_setg(&local_err, "Property '%s.%s' requires a positive value", + error_setg(errp, "Property '%s.%s' requires a positive value", object_get_typename(obj), name); - goto out; + return; } max_queue_size = value; - -out: - error_propagate(errp, local_err); } static void compare_pri_rs_finalize(SocketReadState *pri_rs) @@ -274,7 +274,7 @@ int net_init_hubport(const Netdev *netdev, const char *name, assert(!peer); hubport = &netdev->u.hubport; - if (hubport->has_netdev) { + if (hubport->netdev) { hubpeer = qemu_find_netdev(hubport->netdev); if (!hubpeer) { error_setg(errp, "netdev '%s' not found", hubport->netdev); diff --git a/net/l2tpv3.c b/net/l2tpv3.c index 350041a0d6..5852e42738 100644 --- a/net/l2tpv3.c +++ b/net/l2tpv3.c @@ -578,7 +578,7 @@ int net_init_l2tpv3(const Netdev *netdev, if (l2tpv3->has_udp && l2tpv3->udp) { s->udp = true; - if (!(l2tpv3->has_srcport && l2tpv3->has_dstport)) { + if (!(l2tpv3->srcport && l2tpv3->dstport)) { error_setg(errp, "need both src and dst port for udp"); goto outerr; } else { @@ -964,7 +964,7 @@ static int net_init_nic(const Netdev *netdev, const char *name, memset(nd, 0, sizeof(*nd)); - if (nic->has_netdev) { + if (nic->netdev) { nd->netdev = qemu_find_netdev(nic->netdev); if (!nd->netdev) { error_setg(errp, "netdev '%s' not found", nic->netdev); @@ -975,19 +975,19 @@ static int net_init_nic(const Netdev *netdev, const char *name, nd->netdev = peer; } nd->name = g_strdup(name); - if (nic->has_model) { + if (nic->model) { nd->model = g_strdup(nic->model); } - if (nic->has_addr) { + if (nic->addr) { nd->devaddr = g_strdup(nic->addr); } - if (nic->has_macaddr && + if (nic->macaddr && net_parse_macaddr(nd->macaddr.a, nic->macaddr) < 0) { error_setg(errp, "invalid syntax for ethernet address"); return -1; } - if (nic->has_macaddr && + if (nic->macaddr && is_multicast_ether_addr(nd->macaddr.a)) { error_setg(errp, "NIC cannot have multicast MAC address (odd 1st byte)"); @@ -1081,7 +1081,7 @@ static int net_client_init1(const Netdev *netdev, bool is_netdev, Error **errp) /* Do not add to a hub if it's a nic with a netdev= parameter. */ if (netdev->type != NET_CLIENT_DRIVER_NIC || - !netdev->u.nic.has_netdev) { + !netdev->u.nic.netdev) { peer = net_hub_add_port(0, NULL, NULL); } } @@ -1295,8 +1295,7 @@ void print_net_client(Monitor *mon, NetClientState *nc) } } -RxFilterInfoList *qmp_query_rx_filter(bool has_name, const char *name, - Error **errp) +RxFilterInfoList *qmp_query_rx_filter(const char *name, Error **errp) { NetClientState *nc; RxFilterInfoList *filter_list = NULL, **tail = &filter_list; @@ -1304,13 +1303,13 @@ RxFilterInfoList *qmp_query_rx_filter(bool has_name, const char *name, QTAILQ_FOREACH(nc, &net_clients, next) { RxFilterInfo *info; - if (has_name && strcmp(nc->name, name) != 0) { + if (name && strcmp(nc->name, name) != 0) { continue; } /* only query rx-filter information of NIC */ if (nc->info->type != NET_CLIENT_DRIVER_NIC) { - if (has_name) { + if (name) { error_setg(errp, "net client(%s) isn't a NIC", name); assert(!filter_list); return NULL; @@ -1327,19 +1326,19 @@ RxFilterInfoList *qmp_query_rx_filter(bool has_name, const char *name, if (nc->info->query_rx_filter) { info = nc->info->query_rx_filter(nc); QAPI_LIST_APPEND(tail, info); - } else if (has_name) { + } else if (name) { error_setg(errp, "net client(%s) doesn't support" " rx-filter querying", name); assert(!filter_list); return NULL; } - if (has_name) { + if (name) { break; } } - if (filter_list == NULL && has_name) { + if (filter_list == NULL && name) { error_setg(errp, "invalid net client name: %s", name); } diff --git a/net/slirp.c b/net/slirp.c index 14a8d59277..2ee3f1a0d7 100644 --- a/net/slirp.c +++ b/net/slirp.c @@ -1153,8 +1153,8 @@ int net_init_slirp(const Netdev *netdev, const char *name, ipv6 = 0; } - vnet = user->has_net ? g_strdup(user->net) : - user->has_ip ? g_strdup_printf("%s/24", user->ip) : + vnet = user->net ? g_strdup(user->net) : + user->ip ? g_strdup_printf("%s/24", user->ip) : NULL; dnssearch = slirp_dnssearch(user->dnssearch); diff --git a/net/socket.c b/net/socket.c index e62137c839..b67437a1f0 100644 --- a/net/socket.c +++ b/net/socket.c @@ -705,19 +705,19 @@ int net_init_socket(const Netdev *netdev, const char *name, assert(netdev->type == NET_CLIENT_DRIVER_SOCKET); sock = &netdev->u.socket; - if (sock->has_fd + sock->has_listen + sock->has_connect + sock->has_mcast + - sock->has_udp != 1) { + if (!!sock->fd + !!sock->listen + !!sock->connect + !!sock->mcast + + !!sock->udp != 1) { error_setg(errp, "exactly one of listen=, connect=, mcast= or udp=" " is required"); return -1; } - if (sock->has_localaddr && !sock->has_mcast && !sock->has_udp) { + if (sock->localaddr && !sock->mcast && !sock->udp) { error_setg(errp, "localaddr= is only valid with mcast= or udp="); return -1; } - if (sock->has_fd) { + if (sock->fd) { int fd, ret; fd = monitor_fd_param(monitor_cur(), sock->fd, errp); @@ -737,7 +737,7 @@ int net_init_socket(const Netdev *netdev, const char *name, return 0; } - if (sock->has_listen) { + if (sock->listen) { if (net_socket_listen_init(peer, "socket", name, sock->listen, errp) < 0) { return -1; @@ -745,7 +745,7 @@ int net_init_socket(const Netdev *netdev, const char *name, return 0; } - if (sock->has_connect) { + if (sock->connect) { if (net_socket_connect_init(peer, "socket", name, sock->connect, errp) < 0) { return -1; @@ -753,7 +753,7 @@ int net_init_socket(const Netdev *netdev, const char *name, return 0; } - if (sock->has_mcast) { + if (sock->mcast) { /* if sock->localaddr is missing, it has been initialized to "all bits * zero" */ if (net_socket_mcast_init(peer, "socket", name, sock->mcast, @@ -763,8 +763,8 @@ int net_init_socket(const Netdev *netdev, const char *name, return 0; } - assert(sock->has_udp); - if (!sock->has_localaddr) { + assert(sock->udp); + if (!sock->localaddr) { error_setg(errp, "localaddr= is mandatory with udp="); return -1; } diff --git a/net/tap-win32.c b/net/tap-win32.c index a49c28ba5d..f327d62ab0 100644 --- a/net/tap-win32.c +++ b/net/tap-win32.c @@ -807,7 +807,7 @@ int net_init_tap(const Netdev *netdev, const char *name, assert(netdev->type == NET_CLIENT_DRIVER_TAP); tap = &netdev->u.tap; - if (!tap->has_ifname) { + if (!tap->ifname) { error_report("tap: no interface name"); return -1; } @@ -611,8 +611,8 @@ int net_init_bridge(const Netdev *netdev, const char *name, assert(netdev->type == NET_CLIENT_DRIVER_BRIDGE); bridge = &netdev->u.bridge; - helper = bridge->has_helper ? bridge->helper : NULL; - br = bridge->has_br ? bridge->br : DEFAULT_BRIDGE_INTERFACE; + helper = bridge->helper; + br = bridge->br ?: DEFAULT_BRIDGE_INTERFACE; fd = net_bridge_run_helper(helper, br, errp); if (fd == -1) { @@ -688,9 +688,9 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, goto failed; } - if (tap->has_fd || tap->has_fds) { + if (tap->fd || tap->fds) { qemu_set_info_str(&s->nc, "fd=%d", fd); - } else if (tap->has_helper) { + } else if (tap->helper) { qemu_set_info_str(&s->nc, "helper=%s", tap->helper); } else { qemu_set_info_str(&s->nc, "ifname=%s,script=%s,downscript=%s", ifname, @@ -812,21 +812,21 @@ int net_init_tap(const Netdev *netdev, const char *name, assert(netdev->type == NET_CLIENT_DRIVER_TAP); tap = &netdev->u.tap; queues = tap->has_queues ? tap->queues : 1; - vhostfdname = tap->has_vhostfd ? tap->vhostfd : NULL; - script = tap->has_script ? tap->script : NULL; - downscript = tap->has_downscript ? tap->downscript : NULL; + vhostfdname = tap->vhostfd; + script = tap->script; + downscript = tap->downscript; /* QEMU hubs do not support multiqueue tap, in this case peer is set. * For -netdev, peer is always NULL. */ - if (peer && (tap->has_queues || tap->has_fds || tap->has_vhostfds)) { + if (peer && (tap->has_queues || tap->fds || tap->vhostfds)) { error_setg(errp, "Multiqueue tap cannot be used with hubs"); return -1; } - if (tap->has_fd) { - if (tap->has_ifname || tap->has_script || tap->has_downscript || - tap->has_vnet_hdr || tap->has_helper || tap->has_queues || - tap->has_fds || tap->has_vhostfds) { + if (tap->fd) { + if (tap->ifname || tap->script || tap->downscript || + tap->has_vnet_hdr || tap->helper || tap->has_queues || + tap->fds || tap->vhostfds) { error_setg(errp, "ifname=, script=, downscript=, vnet_hdr=, " "helper=, queues=, fds=, and vhostfds= " "are invalid with fd="); @@ -859,14 +859,14 @@ int net_init_tap(const Netdev *netdev, const char *name, close(fd); return -1; } - } else if (tap->has_fds) { + } else if (tap->fds) { char **fds; char **vhost_fds; int nfds = 0, nvhosts = 0; - if (tap->has_ifname || tap->has_script || tap->has_downscript || - tap->has_vnet_hdr || tap->has_helper || tap->has_queues || - tap->has_vhostfd) { + if (tap->ifname || tap->script || tap->downscript || + tap->has_vnet_hdr || tap->helper || tap->has_queues || + tap->vhostfd) { error_setg(errp, "ifname=, script=, downscript=, vnet_hdr=, " "helper=, queues=, and vhostfd= " "are invalid with fds="); @@ -877,7 +877,7 @@ int net_init_tap(const Netdev *netdev, const char *name, vhost_fds = g_new0(char *, MAX_TAP_QUEUES); nfds = get_fds(tap->fds, fds, MAX_TAP_QUEUES); - if (tap->has_vhostfds) { + if (tap->vhostfds) { nvhosts = get_fds(tap->vhostfds, vhost_fds, MAX_TAP_QUEUES); if (nfds != nvhosts) { error_setg(errp, "The number of fds passed does not match " @@ -916,7 +916,7 @@ int net_init_tap(const Netdev *netdev, const char *name, net_init_tap_one(tap, peer, "tap", name, ifname, script, downscript, - tap->has_vhostfds ? vhost_fds[i] : NULL, + tap->vhostfds ? vhost_fds[i] : NULL, vnet_hdr, fd, &err); if (err) { error_propagate(errp, err); @@ -935,17 +935,16 @@ free_fail: g_free(fds); g_free(vhost_fds); return ret; - } else if (tap->has_helper) { - if (tap->has_ifname || tap->has_script || tap->has_downscript || - tap->has_vnet_hdr || tap->has_queues || tap->has_vhostfds) { + } else if (tap->helper) { + if (tap->ifname || tap->script || tap->downscript || + tap->has_vnet_hdr || tap->has_queues || tap->vhostfds) { error_setg(errp, "ifname=, script=, downscript=, vnet_hdr=, " "queues=, and vhostfds= are invalid with helper="); return -1; } fd = net_bridge_run_helper(tap->helper, - tap->has_br ? - tap->br : DEFAULT_BRIDGE_INTERFACE, + tap->br ?: DEFAULT_BRIDGE_INTERFACE, errp); if (fd == -1) { return -1; @@ -972,7 +971,7 @@ free_fail: } else { g_autofree char *default_script = NULL; g_autofree char *default_downscript = NULL; - if (tap->has_vhostfds) { + if (tap->vhostfds) { error_setg(errp, "vhostfds= is invalid if fds= wasn't specified"); return -1; } @@ -985,7 +984,7 @@ free_fail: get_relocated_path(DEFAULT_NETWORK_DOWN_SCRIPT); } - if (tap->has_ifname) { + if (tap->ifname) { pstrcpy(ifname, sizeof ifname, tap->ifname); } else { ifname[0] = '\0'; @@ -998,7 +997,7 @@ free_fail: return -1; } - if (queues > 1 && i == 0 && !tap->has_ifname) { + if (queues > 1 && i == 0 && !tap->ifname) { if (tap_fd_get_ifname(fd, ifname)) { error_setg(errp, "Fail to get ifname"); close(fd); diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 2b4b85d8f8..260e474863 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -635,19 +635,19 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA); opts = &netdev->u.vhost_vdpa; - if (!opts->has_vhostdev && !opts->has_vhostfd) { + if (!opts->vhostdev && !opts->vhostfd) { error_setg(errp, "vhost-vdpa: neither vhostdev= nor vhostfd= was specified"); return -1; } - if (opts->has_vhostdev && opts->has_vhostfd) { + if (opts->vhostdev && opts->vhostfd) { error_setg(errp, "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive"); return -1; } - if (opts->has_vhostdev) { + if (opts->vhostdev) { vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp); if (vdpa_device_fd == -1) { return -errno; diff --git a/net/vmnet-host.c b/net/vmnet-host.c index 05f8d78864..1f95f7343a 100644 --- a/net/vmnet-host.c +++ b/net/vmnet-host.c @@ -26,7 +26,7 @@ static bool validate_options(const Netdev *netdev, Error **errp) MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0 QemuUUID net_uuid; - if (options->has_net_uuid && + if (options->net_uuid && qemu_uuid_parse(options->net_uuid, &net_uuid) < 0) { error_setg(errp, "Invalid UUID provided in 'net-uuid'"); return false; @@ -39,7 +39,7 @@ static bool validate_options(const Netdev *netdev, Error **errp) return false; } - if (options->has_net_uuid) { + if (options->net_uuid) { error_setg(errp, "vmnet-host.net-uuid feature is " "unavailable: outdated vmnet.framework API"); @@ -47,12 +47,12 @@ static bool validate_options(const Netdev *netdev, Error **errp) } #endif - if ((options->has_start_address || - options->has_end_address || - options->has_subnet_mask) && - !(options->has_start_address && - options->has_end_address && - options->has_subnet_mask)) { + if ((options->start_address || + options->end_address || + options->subnet_mask) && + !(options->start_address && + options->end_address && + options->subnet_mask)) { error_setg(errp, "'start-address', 'end-address', 'subnet-mask' " "should be provided together"); @@ -79,7 +79,7 @@ static xpc_object_t build_if_desc(const Netdev *netdev) options->isolated); QemuUUID net_uuid; - if (options->has_net_uuid) { + if (options->net_uuid) { qemu_uuid_parse(options->net_uuid, &net_uuid); xpc_dictionary_set_uuid(if_desc, vmnet_network_identifier_key, @@ -87,7 +87,7 @@ static xpc_object_t build_if_desc(const Netdev *netdev) } #endif - if (options->has_start_address) { + if (options->start_address) { xpc_dictionary_set_string(if_desc, vmnet_start_address_key, options->start_address); diff --git a/net/vmnet-shared.c b/net/vmnet-shared.c index 18cadc72bd..40c7306a75 100644 --- a/net/vmnet-shared.c +++ b/net/vmnet-shared.c @@ -31,12 +31,12 @@ static bool validate_options(const Netdev *netdev, Error **errp) } #endif - if ((options->has_start_address || - options->has_end_address || - options->has_subnet_mask) && - !(options->has_start_address && - options->has_end_address && - options->has_subnet_mask)) { + if ((options->start_address || + options->end_address || + options->subnet_mask) && + !(options->start_address && + options->end_address && + options->subnet_mask)) { error_setg(errp, "'start-address', 'end-address', 'subnet-mask' " "should be provided together" @@ -58,13 +58,13 @@ static xpc_object_t build_if_desc(const Netdev *netdev) VMNET_SHARED_MODE ); - if (options->has_nat66_prefix) { + if (options->nat66_prefix) { xpc_dictionary_set_string(if_desc, vmnet_nat66_prefix_key, options->nat66_prefix); } - if (options->has_start_address) { + if (options->start_address) { xpc_dictionary_set_string(if_desc, vmnet_start_address_key, options->start_address); diff --git a/qemu-img.c b/qemu-img.c index a9b3a8103c..439d8de1e3 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -2915,15 +2915,15 @@ static ImageInfoList *collect_image_info_list(bool image_opts, image_opts = false; if (chain) { - if (info->has_full_backing_filename) { + if (info->full_backing_filename) { filename = info->full_backing_filename; - } else if (info->has_backing_filename) { + } else if (info->backing_filename) { error_report("Could not determine absolute backing filename," " but backing filename '%s' present", info->backing_filename); goto err; } - if (info->has_backing_filename_format) { + if (info->backing_filename_format) { fmt = info->backing_filename_format; } } @@ -3046,7 +3046,7 @@ static int dump_map_entry(OutputFormat output_format, MapEntry *e, printf("%#-16"PRIx64"%#-16"PRIx64"%#-16"PRIx64"%s\n", e->start, e->length, e->has_offset ? e->offset : 0, - e->has_filename ? e->filename : ""); + e->filename ?: ""); } /* This format ignores the distinction between 0, ZERO and ZERO|DATA. * Modify the flags here to allow more coalescing. @@ -3127,7 +3127,6 @@ static int get_block_status(BlockDriverState *bs, int64_t offset, .has_offset = has_offset, .depth = depth, .present = !!(ret & BDRV_BLOCK_ALLOCATED), - .has_filename = filename, .filename = filename, }; @@ -3143,11 +3142,11 @@ static inline bool entry_mergeable(const MapEntry *curr, const MapEntry *next) curr->data != next->data || curr->depth != next->depth || curr->present != next->present || - curr->has_filename != next->has_filename || + !curr->filename != !next->filename || curr->has_offset != next->has_offset) { return false; } - if (curr->has_filename && strcmp(curr->filename, next->filename)) { + if (curr->filename && strcmp(curr->filename, next->filename)) { return false; } if (curr->has_offset && curr->offset + curr->length != next->offset) { diff --git a/qemu-nbd.c b/qemu-nbd.c index 0cd5aa6f02..6ff45308a9 100644 --- a/qemu-nbd.c +++ b/qemu-nbd.c @@ -1107,9 +1107,7 @@ int main(int argc, char **argv) .has_writable = true, .writable = !readonly, .u.nbd = { - .has_name = true, .name = g_strdup(export_name), - .has_description = !!export_description, .description = g_strdup(export_description), .has_bitmaps = !!bitmaps, .bitmaps = bitmaps, diff --git a/qga/commands-posix.c b/qga/commands-posix.c index 32493d6383..1a28326ec7 100644 --- a/qga/commands-posix.c +++ b/qga/commands-posix.c @@ -71,7 +71,7 @@ static void ga_wait_child(pid_t pid, int *status, Error **errp) g_assert(rpid == pid); } -void qmp_guest_shutdown(bool has_mode, const char *mode, Error **errp) +void qmp_guest_shutdown(const char *mode, Error **errp) { const char *shutdown_flag; Error *local_err = NULL; @@ -93,7 +93,7 @@ void qmp_guest_shutdown(bool has_mode, const char *mode, Error **errp) #endif slog("guest-shutdown called, mode: %s", mode); - if (!has_mode || strcmp(mode, "powerdown") == 0) { + if (!mode || strcmp(mode, "powerdown") == 0) { shutdown_flag = powerdown_flag; } else if (strcmp(mode, "halt") == 0) { shutdown_flag = halt_flag; @@ -404,14 +404,14 @@ end: return f; } -int64_t qmp_guest_file_open(const char *path, bool has_mode, const char *mode, +int64_t qmp_guest_file_open(const char *path, const char *mode, Error **errp) { FILE *fh; Error *local_err = NULL; int64_t handle; - if (!has_mode) { + if (!mode) { mode = "r"; } slog("guest-file-open called, filepath: %s, mode: %s", path, mode); @@ -1037,7 +1037,6 @@ static bool build_guest_fsinfo_for_ccw_dev(char const *syspath, return false; } - disk->has_ccw_address = true; disk->ccw_address = g_new0(GuestCCWAddress, 1); disk->ccw_address->cssid = cssid; disk->ccw_address->ssid = ssid; @@ -1084,12 +1083,10 @@ static void build_guest_fsinfo_for_real_device(char const *syspath, devnode = udev_device_get_devnode(udevice); if (devnode != NULL) { disk->dev = g_strdup(devnode); - disk->has_dev = true; } serial = udev_device_get_property_value(udevice, "ID_SERIAL"); if (serial != NULL && *serial != 0) { disk->serial = g_strdup(serial); - disk->has_serial = true; } } @@ -1108,7 +1105,7 @@ static void build_guest_fsinfo_for_real_device(char const *syspath, has_hwinf = false; } - if (has_hwinf || disk->has_dev || disk->has_serial) { + if (has_hwinf || disk->dev || disk->serial) { QAPI_LIST_PREPEND(fs->disk, disk); } else { qapi_free_GuestDiskAddress(disk); @@ -1411,7 +1408,6 @@ static void get_nvme_smart(GuestDiskInfo *disk) return; } - disk->has_smart = true; disk->smart = g_new0(GuestDiskSmart, 1); disk->smart->type = GUEST_DISK_BUS_TYPE_NVME; @@ -1449,7 +1445,7 @@ static void get_nvme_smart(GuestDiskInfo *disk) static void get_disk_smart(GuestDiskInfo *disk) { - if (disk->has_address + if (disk->address && (disk->address->bus_type == GUEST_DISK_BUS_TYPE_NVME)) { get_nvme_smart(disk); } @@ -1502,7 +1498,6 @@ GuestDiskInfoList *qmp_guest_get_disks(Error **errp) disk->name = dev_name; disk->partition = false; disk->alias = get_alias_for_syspath(disk_dir); - disk->has_alias = (disk->alias != NULL); QAPI_LIST_PREPEND(ret, disk); /* Get address for non-virtual devices */ @@ -1522,8 +1517,6 @@ GuestDiskInfoList *qmp_guest_get_disks(Error **errp) error_get_pretty(local_err)); error_free(local_err); local_err = NULL; - } else if (disk->address != NULL) { - disk->has_address = true; } } @@ -1641,7 +1634,6 @@ qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **errp) if (fd == -1) { result->error = g_strdup_printf("failed to open: %s", strerror(errno)); - result->has_error = true; continue; } @@ -1656,7 +1648,6 @@ qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **errp) r.minlen = has_minimum ? minimum : 0; ret = ioctl(fd, FITRIM, &r); if (ret == -1) { - result->has_error = true; if (errno == ENOTTY || errno == EOPNOTSUPP) { result->error = g_strdup("trim not supported"); } else { @@ -2967,7 +2958,7 @@ GuestNetworkInterfaceList *qmp_guest_network_get_interfaces(Error **errp) QAPI_LIST_APPEND(tail, info); } - if (!info->has_hardware_address) { + if (!info->hardware_address) { if (!guest_get_hw_addr(ifa, mac_addr, &obtained, errp)) { goto error; } @@ -2977,7 +2968,6 @@ GuestNetworkInterfaceList *qmp_guest_network_get_interfaces(Error **errp) (int) mac_addr[0], (int) mac_addr[1], (int) mac_addr[2], (int) mac_addr[3], (int) mac_addr[4], (int) mac_addr[5]); - info->has_hardware_address = true; } } @@ -3037,14 +3027,12 @@ GuestNetworkInterfaceList *qmp_guest_network_get_interfaces(Error **errp) info->has_ip_addresses = true; - if (!info->has_statistics) { + if (!info->statistics) { interface_stat = g_malloc0(sizeof(*interface_stat)); if (guest_get_network_stats(info->name, interface_stat) == -1) { - info->has_statistics = false; g_free(interface_stat); } else { info->statistics = interface_stat; - info->has_statistics = true; } } } @@ -3351,11 +3339,8 @@ GuestOSInfo *qmp_guest_get_osinfo(Error **errp) if (uname(&kinfo) != 0) { error_setg_errno(errp, errno, "uname failed"); } else { - info->has_kernel_version = true; info->kernel_version = g_strdup(kinfo.version); - info->has_kernel_release = true; info->kernel_release = g_strdup(kinfo.release); - info->has_machine = true; info->machine = g_strdup(kinfo.machine); } @@ -3375,7 +3360,6 @@ GuestOSInfo *qmp_guest_get_osinfo(Error **errp) value = g_key_file_get_value(osrelease, "os-release", osfield, NULL); \ if (value != NULL) { \ ga_osrelease_replace_special(value); \ - info->has_ ## field = true; \ info->field = value; \ } \ } while (0) diff --git a/qga/commands-win32.c b/qga/commands-win32.c index ec9f55b453..4df50ea710 100644 --- a/qga/commands-win32.c +++ b/qga/commands-win32.c @@ -193,8 +193,7 @@ static void handle_set_nonblocking(HANDLE fh) SetNamedPipeHandleState(fh, &pipe_state, NULL, NULL); } -int64_t qmp_guest_file_open(const char *path, bool has_mode, - const char *mode, Error **errp) +int64_t qmp_guest_file_open(const char *path, const char *mode, Error **errp) { int64_t fd = -1; HANDLE fh; @@ -206,7 +205,7 @@ int64_t qmp_guest_file_open(const char *path, bool has_mode, GError *gerr = NULL; wchar_t *w_path = NULL; - if (!has_mode) { + if (!mode) { mode = "r"; } slog("guest-file-open called, filepath: %s, mode: %s", path, mode); @@ -275,13 +274,12 @@ static void acquire_privilege(const char *name, Error **errp) { HANDLE token = NULL; TOKEN_PRIVILEGES priv; - Error *local_err = NULL; if (OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token)) { if (!LookupPrivilegeValue(NULL, name, &priv.Privileges[0].Luid)) { - error_setg(&local_err, QERR_QGA_COMMAND_FAILED, + error_setg(errp, QERR_QGA_COMMAND_FAILED, "no luid for requested privilege"); goto out; } @@ -290,13 +288,13 @@ static void acquire_privilege(const char *name, Error **errp) priv.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; if (!AdjustTokenPrivileges(token, FALSE, &priv, 0, NULL, 0)) { - error_setg(&local_err, QERR_QGA_COMMAND_FAILED, + error_setg(errp, QERR_QGA_COMMAND_FAILED, "unable to acquire requested privilege"); goto out; } } else { - error_setg(&local_err, QERR_QGA_COMMAND_FAILED, + error_setg(errp, QERR_QGA_COMMAND_FAILED, "failed to open privilege token"); } @@ -304,7 +302,6 @@ out: if (token) { CloseHandle(token); } - error_propagate(errp, local_err); } static void execute_async(DWORD WINAPI (*func)(LPVOID), LPVOID opaque, @@ -317,14 +314,14 @@ static void execute_async(DWORD WINAPI (*func)(LPVOID), LPVOID opaque, } } -void qmp_guest_shutdown(bool has_mode, const char *mode, Error **errp) +void qmp_guest_shutdown(const char *mode, Error **errp) { Error *local_err = NULL; UINT shutdown_flag = EWX_FORCE; slog("guest-shutdown called, mode: %s", mode); - if (!has_mode || strcmp(mode, "powerdown") == 0) { + if (!mode || strcmp(mode, "powerdown") == 0) { shutdown_flag |= EWX_POWEROFF; } else if (strcmp(mode, "halt") == 0) { shutdown_flag |= EWX_SHUTDOWN; @@ -833,7 +830,6 @@ static void get_disk_properties(HANDLE vol_h, GuestDiskAddress *disk, g_debug("serial number \"%s\"", serial); if (*serial != 0) { disk->serial = g_strndup(serial, len); - disk->has_serial = true; } } out_free: @@ -951,7 +947,6 @@ static GuestDiskAddressList *build_guest_disk_info(char *guid, Error **errp) /* Possibly CD-ROM or a shared drive. Try to pass the volume */ g_debug("volume not on disk"); disk = g_new0(GuestDiskAddress, 1); - disk->has_dev = true; disk->dev = g_strdup(name); get_single_disk_info(0xffffffff, disk, &local_err); if (local_err) { @@ -983,7 +978,6 @@ static GuestDiskAddressList *build_guest_disk_info(char *guid, Error **errp) * See also Naming Files, Paths and Namespaces: * https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file#win32-device-namespaces */ - disk->has_dev = true; disk->dev = g_strdup_printf("\\\\.\\PhysicalDrive%lu", extents->Extents[i].DiskNumber); @@ -1078,7 +1072,6 @@ GuestDiskInfoList *qmp_guest_get_disks(Error **errp) g_debug(" number: %lu", sdn.DeviceNumber); address = g_new0(GuestDiskAddress, 1); - address->has_dev = true; address->dev = g_strdup(disk->name); get_single_disk_info(sdn.DeviceNumber, address, &local_err); if (local_err) { @@ -1089,7 +1082,6 @@ GuestDiskInfoList *qmp_guest_get_disks(Error **errp) address = NULL; } else { disk->address = address; - disk->has_address = true; } QAPI_LIST_PREPEND(ret, disk); @@ -1369,7 +1361,6 @@ qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **errp) g_free(uc_path); if (!path) { - res->has_error = true; res->error = g_strdup(gerr->message); g_error_free(gerr); break; @@ -1387,7 +1378,6 @@ qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **errp) if (!g_spawn_sync(NULL, argv, NULL, G_SPAWN_SEARCH_PATH, NULL, NULL, &out /* stdout */, NULL /* stdin */, NULL, &gerr)) { - res->has_error = true; res->error = g_strdup(gerr->message); g_error_free(gerr); } else { @@ -1403,7 +1393,6 @@ qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **errp) if (g_strstr_len(lines[i], -1, "(0x") == NULL) { continue; } - res->has_error = true; res->error = g_strdup(lines[i]); break; } @@ -1683,8 +1672,6 @@ GuestNetworkInterfaceList *qmp_guest_network_get_interfaces(Error **errp) (int) mac_addr[0], (int) mac_addr[1], (int) mac_addr[2], (int) mac_addr[3], (int) mac_addr[4], (int) mac_addr[5]); - - info->has_hardware_address = true; } head_addr = NULL; @@ -1713,15 +1700,13 @@ GuestNetworkInterfaceList *qmp_guest_network_get_interfaces(Error **errp) info->has_ip_addresses = true; info->ip_addresses = head_addr; } - if (!info->has_statistics) { + if (!info->statistics) { interface_stat = g_malloc0(sizeof(*interface_stat)); - if (guest_get_network_stats(addr->AdapterName, - interface_stat) == -1) { - info->has_statistics = false; + if (guest_get_network_stats(addr->AdapterName, interface_stat) + == -1) { g_free(interface_stat); } else { info->statistics = interface_stat; - info->has_statistics = true; } } } @@ -2113,7 +2098,6 @@ GuestUserList *qmp_guest_get_users(Error **errp) user->user = g_strdup(info->UserName); user->domain = g_strdup(info->Domain); - user->has_domain = true; user->login_time = login_time; @@ -2332,29 +2316,19 @@ GuestOSInfo *qmp_guest_get_osinfo(Error **errp) info = g_new0(GuestOSInfo, 1); - info->has_kernel_version = true; info->kernel_version = g_strdup_printf("%lu.%lu", os_version.dwMajorVersion, os_version.dwMinorVersion); - info->has_kernel_release = true; info->kernel_release = g_strdup_printf("%lu", os_version.dwBuildNumber); - info->has_machine = true; info->machine = ga_get_current_arch(); - info->has_id = true; info->id = g_strdup("mswindows"); - info->has_name = true; info->name = g_strdup("Microsoft Windows"); - info->has_pretty_name = true; info->pretty_name = product_name; - info->has_version = true; info->version = ga_get_win_name(&os_version, false); - info->has_version_id = true; info->version_id = ga_get_win_name(&os_version, true); - info->has_variant = true; info->variant = g_strdup(server ? "server" : "client"); - info->has_variant_id = true; info->variant_id = g_strdup(server ? "server" : "client"); return info; @@ -2478,7 +2452,6 @@ GuestDeviceInfoList *qmp_guest_get_devices(Error **errp) device_id = g_match_info_fetch(match_info, 2); device->id = g_new0(GuestDeviceId, 1); - device->has_id = true; device->id->type = GUEST_DEVICE_TYPE_PCI; id = &device->id->u.pci; id->vendor_id = g_ascii_strtoull(vendor_id, NULL, 16); @@ -2502,7 +2475,6 @@ GuestDeviceInfoList *qmp_guest_get_devices(Error **errp) error_setg(errp, "conversion to utf8 failed (driver version)"); return NULL; } - device->has_driver_version = true; date = (LPFILETIME)cm_get_property(dev_info_data.DevInst, &qga_DEVPKEY_Device_DriverDate, &cm_type); diff --git a/qga/commands.c b/qga/commands.c index 7ff551d092..360077364e 100644 --- a/qga/commands.c +++ b/qga/commands.c @@ -206,14 +206,12 @@ GuestExecStatus *qmp_guest_exec_status(int64_t pid, Error **errp) } #endif if (gei->out.length > 0) { - ges->has_out_data = true; ges->out_data = g_base64_encode(gei->out.data, gei->out.length); g_free(gei->out.data); ges->has_out_truncated = gei->out.truncated; } if (gei->err.length > 0) { - ges->has_err_data = true; ges->err_data = g_base64_encode(gei->err.data, gei->err.length); g_free(gei->err.data); ges->has_err_truncated = gei->err.truncated; @@ -385,7 +383,7 @@ close: GuestExec *qmp_guest_exec(const char *path, bool has_arg, strList *arg, bool has_env, strList *env, - bool has_input_data, const char *input_data, + const char *input_data, bool has_capture_output, bool capture_output, Error **errp) { @@ -406,7 +404,7 @@ GuestExec *qmp_guest_exec(const char *path, arglist.value = (char *)path; arglist.next = has_arg ? arg : NULL; - if (has_input_data) { + if (input_data) { input = qbase64_decode(input_data, -1, &ninput, errp); if (!input) { return NULL; @@ -423,7 +421,7 @@ GuestExec *qmp_guest_exec(const char *path, } ret = g_spawn_async_with_pipes(NULL, argv, envp, flags, - guest_exec_task_setup, NULL, &pid, has_input_data ? &in_fd : NULL, + guest_exec_task_setup, NULL, &pid, input_data ? &in_fd : NULL, has_output ? &out_fd : NULL, has_output ? &err_fd : NULL, &gerr); if (!ret) { error_setg(errp, QERR_QGA_COMMAND_FAILED, gerr->message); @@ -438,7 +436,7 @@ GuestExec *qmp_guest_exec(const char *path, gei->has_output = has_output; g_child_watch_add(pid, guest_exec_child_watch, gei); - if (has_input_data) { + if (input_data) { gei->in.data = g_steal_pointer(&input); gei->in.size = ninput; #ifdef G_OS_WIN32 @@ -547,7 +545,6 @@ GuestTimezone *qmp_guest_get_timezone(Error **errp) info->offset = g_time_zone_get_offset(tz, intv); name = g_time_zone_get_abbreviation(tz, intv); if (name != NULL) { - info->has_zone = true; info->zone = g_strdup(name); } g_time_zone_unref(tz); diff --git a/qom/qom-qmp-cmds.c b/qom/qom-qmp-cmds.c index 2e63a4c184..7c087299de 100644 --- a/qom/qom-qmp-cmds.c +++ b/qom/qom-qmp-cmds.c @@ -99,15 +99,13 @@ static void qom_list_types_tramp(ObjectClass *klass, void *data) info->name = g_strdup(object_class_get_name(klass)); info->has_abstract = info->abstract = object_class_is_abstract(klass); if (parent) { - info->has_parent = true; info->parent = g_strdup(object_class_get_name(parent)); } QAPI_LIST_PREPEND(*pret, info); } -ObjectTypeInfoList *qmp_qom_list_types(bool has_implements, - const char *implements, +ObjectTypeInfoList *qmp_qom_list_types(const char *implements, bool has_abstract, bool abstract, Error **errp) @@ -168,10 +166,8 @@ ObjectPropertyInfoList *qmp_device_list_properties(const char *typename, info = g_new0(ObjectPropertyInfo, 1); info->name = g_strdup(prop->name); info->type = g_strdup(prop->type); - info->has_description = !!prop->description; info->description = g_strdup(prop->description); info->default_value = qobject_ref(prop->defval); - info->has_default_value = !!info->default_value; QAPI_LIST_PREPEND(prop_list, info); } @@ -215,7 +211,6 @@ ObjectPropertyInfoList *qmp_qom_list_properties(const char *typename, info = g_malloc0(sizeof(*info)); info->name = g_strdup(prop->name); info->type = g_strdup(prop->type); - info->has_description = !!prop->description; info->description = g_strdup(prop->description); QAPI_LIST_PREPEND(prop_list, info); diff --git a/replay/replay-debugging.c b/replay/replay-debugging.c index 1cde50e9f3..3e60549a4a 100644 --- a/replay/replay-debugging.c +++ b/replay/replay-debugging.c @@ -50,7 +50,6 @@ ReplayInfo *qmp_query_replay(Error **errp) retval->mode = replay_mode; if (replay_get_filename()) { retval->filename = g_strdup(replay_get_filename()); - retval->has_filename = true; } retval->icount = replay_get_current_icount(); return retval; diff --git a/replay/replay-time.c b/replay/replay-time.c index 00ebcb7a49..ee0ebfcf09 100644 --- a/replay/replay-time.c +++ b/replay/replay-time.c @@ -48,7 +48,6 @@ void replay_read_next_clock(ReplayClockKind kind) /*! Reads next clock event from the input. */ int64_t replay_read_clock(ReplayClockKind kind, int64_t raw_icount) { - int64_t ret; g_assert(replay_file && replay_mutex_locked()); replay_advance_current_icount(raw_icount); @@ -56,7 +55,5 @@ int64_t replay_read_clock(ReplayClockKind kind, int64_t raw_icount) if (replay_next_event_is(EVENT_CLOCK + kind)) { replay_read_next_clock(kind); } - ret = replay_state.cached_clock[kind]; - - return ret; + return replay_state.cached_clock[kind]; } diff --git a/scripts/block-coroutine-wrapper.py b/scripts/block-coroutine-wrapper.py index 08be813407..6e087fa0b7 100644 --- a/scripts/block-coroutine-wrapper.py +++ b/scripts/block-coroutine-wrapper.py @@ -2,7 +2,7 @@ """Generate coroutine wrappers for block subsystem. The program parses one or several concatenated c files from stdin, -searches for functions with the 'generated_co_wrapper' specifier +searches for functions with the 'co_wrapper' specifier and generates corresponding wrappers on stdout. Usage: block-coroutine-wrapper.py generated-file.c FILE.[ch]... @@ -62,10 +62,28 @@ class ParamDecl: class FuncDecl: - def __init__(self, return_type: str, name: str, args: str) -> None: + def __init__(self, return_type: str, name: str, args: str, + variant: str) -> None: self.return_type = return_type.strip() self.name = name.strip() + self.struct_name = snake_to_camel(self.name) self.args = [ParamDecl(arg.strip()) for arg in args.split(',')] + self.create_only_co = 'mixed' not in variant + self.graph_rdlock = 'bdrv_rdlock' in variant + + subsystem, subname = self.name.split('_', 1) + self.co_name = f'{subsystem}_co_{subname}' + + t = self.args[0].type + if t == 'BlockDriverState *': + ctx = 'bdrv_get_aio_context(bs)' + elif t == 'BdrvChild *': + ctx = 'bdrv_get_aio_context(child->bs)' + elif t == 'BlockBackend *': + ctx = 'blk_get_aio_context(blk)' + else: + ctx = 'qemu_get_aio_context()' + self.ctx = ctx def gen_list(self, format: str) -> str: return ', '.join(format.format_map(arg.__dict__) for arg in self.args) @@ -74,17 +92,20 @@ class FuncDecl: return '\n'.join(format.format_map(arg.__dict__) for arg in self.args) -# Match wrappers declared with a generated_co_wrapper mark -func_decl_re = re.compile(r'^int\s*generated_co_wrapper\s*' +# Match wrappers declared with a co_wrapper mark +func_decl_re = re.compile(r'^(?P<return_type>[a-zA-Z][a-zA-Z0-9_]* [\*]?)' + r'\s*co_wrapper' + r'(?P<variant>(_[a-z][a-z0-9_]*)?)\s*' r'(?P<wrapper_name>[a-z][a-z0-9_]*)' r'\((?P<args>[^)]*)\);$', re.MULTILINE) def func_decl_iter(text: str) -> Iterator: for m in func_decl_re.finditer(text): - yield FuncDecl(return_type='int', + yield FuncDecl(return_type=m.group('return_type'), name=m.group('wrapper_name'), - args=m.group('args')) + args=m.group('args'), + variant=m.group('variant')) def snake_to_camel(func_name: str) -> str: @@ -97,24 +118,75 @@ def snake_to_camel(func_name: str) -> str: return ''.join(words) +def create_mixed_wrapper(func: FuncDecl) -> str: + """ + Checks if we are already in coroutine + """ + name = func.co_name + struct_name = func.struct_name + graph_assume_lock = 'assume_graph_lock();' if func.graph_rdlock else '' + + return f"""\ +{func.return_type} {func.name}({ func.gen_list('{decl}') }) +{{ + if (qemu_in_coroutine()) {{ + {graph_assume_lock} + return {name}({ func.gen_list('{name}') }); + }} else {{ + {struct_name} s = {{ + .poll_state.ctx = {func.ctx}, + .poll_state.in_progress = true, + +{ func.gen_block(' .{name} = {name},') } + }}; + + s.poll_state.co = qemu_coroutine_create({name}_entry, &s); + + bdrv_poll_co(&s.poll_state); + return s.ret; + }} +}}""" + + +def create_co_wrapper(func: FuncDecl) -> str: + """ + Assumes we are not in coroutine, and creates one + """ + name = func.co_name + struct_name = func.struct_name + return f"""\ +{func.return_type} {func.name}({ func.gen_list('{decl}') }) +{{ + {struct_name} s = {{ + .poll_state.ctx = {func.ctx}, + .poll_state.in_progress = true, + +{ func.gen_block(' .{name} = {name},') } + }}; + assert(!qemu_in_coroutine()); + + s.poll_state.co = qemu_coroutine_create({name}_entry, &s); + + bdrv_poll_co(&s.poll_state); + return s.ret; +}}""" + + def gen_wrapper(func: FuncDecl) -> str: assert not '_co_' in func.name - assert func.return_type == 'int' - assert func.args[0].type in ['BlockDriverState *', 'BdrvChild *', - 'BlockBackend *'] - subsystem, subname = func.name.split('_', 1) + name = func.co_name + struct_name = func.struct_name - name = f'{subsystem}_co_{subname}' + graph_lock='' + graph_unlock='' + if func.graph_rdlock: + graph_lock=' bdrv_graph_co_rdlock();' + graph_unlock=' bdrv_graph_co_rdunlock();' - t = func.args[0].type - if t == 'BlockDriverState *': - bs = 'bs' - elif t == 'BdrvChild *': - bs = 'child->bs' - else: - bs = 'blk_bs(blk)' - struct_name = snake_to_camel(name) + creation_function = create_mixed_wrapper + if func.create_only_co: + creation_function = create_co_wrapper return f"""\ /* @@ -123,6 +195,7 @@ def gen_wrapper(func: FuncDecl) -> str: typedef struct {struct_name} {{ BdrvPollCo poll_state; + {func.return_type} ret; { func.gen_block(' {decl};') } }} {struct_name}; @@ -130,29 +203,15 @@ static void coroutine_fn {name}_entry(void *opaque) {{ {struct_name} *s = opaque; - s->poll_state.ret = {name}({ func.gen_list('s->{name}') }); +{graph_lock} + s->ret = {name}({ func.gen_list('s->{name}') }); +{graph_unlock} s->poll_state.in_progress = false; aio_wait_kick(); }} -int {func.name}({ func.gen_list('{decl}') }) -{{ - if (qemu_in_coroutine()) {{ - return {name}({ func.gen_list('{name}') }); - }} else {{ - {struct_name} s = {{ - .poll_state.bs = {bs}, - .poll_state.in_progress = true, - -{ func.gen_block(' .{name} = {name},') } - }}; - - s.poll_state.co = qemu_coroutine_create({name}_entry, &s); - - return bdrv_poll_co(&s.poll_state); - }} -}}""" +{creation_function(func)}""" def gen_wrappers(input_code: str) -> str: diff --git a/scripts/coccinelle/return_directly.cocci b/scripts/coccinelle/return_directly.cocci index 4cf50e75ea..6cb1b3c99a 100644 --- a/scripts/coccinelle/return_directly.cocci +++ b/scripts/coccinelle/return_directly.cocci @@ -11,9 +11,8 @@ identifier F; - T VAR; ... when != VAR -- VAR = -+ return - E; +- VAR = (E); - return VAR; ++ return E; ... when != VAR } diff --git a/scripts/make-release b/scripts/make-release index 05b14ecc95..44a9d86a04 100755 --- a/scripts/make-release +++ b/scripts/make-release @@ -10,14 +10,22 @@ # This work is licensed under the terms of the GNU GPLv2 or later. # See the COPYING file in the top-level directory. +if [ $# -ne 2 ]; then + echo "Usage:" + echo " $0 gitrepo version" + exit 0 +fi + src="$1" version="$2" destination=qemu-${version} -git clone "${src}" ${destination} +git clone --single-branch -b "v${version}" -c advice.detachedHead=false \ + "${src}" ${destination} + pushd ${destination} -git checkout "v${version}" -git submodule update --init + +git submodule update --init --single-branch (cd roms/seabios && git describe --tags --long --dirty > .version) (cd roms/skiboot && ./make_version.sh > .version) # Fetch edk2 submodule's submodules, since it won't have access to them via @@ -28,7 +36,7 @@ git submodule update --init # submodule dependencies, so we continue to handle these on a case-by-case # basis for now. (cd roms/edk2 && \ - git submodule update --init -- \ + git submodule update --init --depth 1 -- \ ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3 \ BaseTools/Source/C/BrotliCompress/brotli \ CryptoPkg/Library/OpensslLib/openssl \ diff --git a/scripts/qapi/commands.py b/scripts/qapi/commands.py index 38ca38a7b9..79c5e5c3a9 100644 --- a/scripts/qapi/commands.py +++ b/scripts/qapi/commands.py @@ -64,7 +64,7 @@ def gen_call(name: str, elif arg_type: assert not arg_type.variants for memb in arg_type.members: - if memb.optional: + if memb.need_has(): argstr += 'arg.has_%s, ' % c_name(memb.name) argstr += 'arg.%s, ' % c_name(memb.name) @@ -83,7 +83,7 @@ def gen_call(name: str, trace_qmp_enter_%(name)s(req_json->str); } - ''', +''', upper=upper, name=name) ret += mcgen(''' @@ -124,13 +124,13 @@ def gen_call(name: str, trace_qmp_exit_%(name)s(ret_json->str, true); } - ''', +''', upper=upper, name=name) else: ret += mcgen(''' trace_qmp_exit_%(name)s("{}", true); - ''', +''', name=name) return ret @@ -316,7 +316,6 @@ class QAPISchemaGenCommandVisitor(QAPISchemaModularCVisitor): #include "qapi/error.h" #include "%(visit)s.h" #include "%(commands)s.h" - ''', commands=commands, visit=visit)) diff --git a/scripts/qapi/events.py b/scripts/qapi/events.py index 27b44c49f5..3cf01e96b6 100644 --- a/scripts/qapi/events.py +++ b/scripts/qapi/events.py @@ -60,7 +60,7 @@ def gen_param_var(typ: QAPISchemaObjectType) -> str: for memb in typ.members: ret += sep sep = ', ' - if memb.optional: + if memb.need_has(): ret += 'has_' + c_name(memb.name) + sep if memb.type.name == 'str': # Cast away const added in build_params() @@ -196,7 +196,6 @@ class QAPISchemaGenEventVisitor(QAPISchemaModularCVisitor): #include "qapi/error.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp-event.h" - ''', events=events, visit=visit, prefix=self._prefix)) diff --git a/scripts/qapi/gen.py b/scripts/qapi/gen.py index 113b49134d..b5a8d03e8e 100644 --- a/scripts/qapi/gen.py +++ b/scripts/qapi/gen.py @@ -121,7 +121,7 @@ def build_params(arg_type: Optional[QAPISchemaObjectType], for memb in arg_type.members: ret += sep sep = ', ' - if memb.optional: + if memb.need_has(): ret += 'bool has_%s, ' % c_name(memb.name) ret += '%s %s' % (memb.type.c_param_type(), c_name(memb.name)) diff --git a/scripts/qapi/schema.py b/scripts/qapi/schema.py index 3728340c37..cd8661125c 100644 --- a/scripts/qapi/schema.py +++ b/scripts/qapi/schema.py @@ -253,6 +253,11 @@ class QAPISchemaType(QAPISchemaEntity): return None return self.name + def need_has_if_optional(self): + # When FOO is a pointer, has_FOO == !!FOO, i.e. has_FOO is redundant. + # Except for arrays; see QAPISchemaArrayType.need_has_if_optional(). + return not self.c_type().endswith(POINTER_SUFFIX) + def check(self, schema): QAPISchemaEntity.check(self, schema) for feat in self.features: @@ -352,6 +357,11 @@ class QAPISchemaArrayType(QAPISchemaType): self._element_type_name = element_type self.element_type = None + def need_has_if_optional(self): + # When FOO is an array, we still need has_FOO to distinguish + # absent (!has_FOO) from present and empty (has_FOO && !FOO). + return True + def check(self, schema): super().check(schema) self.element_type = schema.resolve_type( @@ -745,6 +755,10 @@ class QAPISchemaObjectTypeMember(QAPISchemaMember): self.optional = optional self.features = features or [] + def need_has(self): + assert self.type + return self.optional and self.type.need_has_if_optional() + def check(self, schema): assert self.defined_in self.type = schema.resolve_type(self._type_name, self.info, diff --git a/scripts/qapi/types.py b/scripts/qapi/types.py index 477d027001..c39d054d2c 100644 --- a/scripts/qapi/types.py +++ b/scripts/qapi/types.py @@ -142,7 +142,7 @@ def gen_struct_members(members: List[QAPISchemaObjectTypeMember]) -> str: ret = '' for memb in members: ret += memb.ifcond.gen_if() - if memb.optional: + if memb.need_has(): ret += mcgen(''' bool has_%(c_name)s; ''', diff --git a/scripts/qapi/visit.py b/scripts/qapi/visit.py index 380fa197f5..26a584ee4c 100644 --- a/scripts/qapi/visit.py +++ b/scripts/qapi/visit.py @@ -71,6 +71,16 @@ bool visit_type_%(c_name)s_members(Visitor *v, %(c_name)s *obj, Error **errp) ''', c_name=c_name(name)) + sep = '' + for memb in members: + if memb.optional and not memb.need_has(): + ret += mcgen(''' + bool has_%(c_name)s = !!obj->%(c_name)s; +''', + c_name=c_name(memb.name)) + sep = '\n' + ret += sep + if base: ret += mcgen(''' if (!visit_type_%(c_type)s_members(v, (%(c_type)s *)obj, errp)) { @@ -82,10 +92,13 @@ bool visit_type_%(c_name)s_members(Visitor *v, %(c_name)s *obj, Error **errp) for memb in members: ret += memb.ifcond.gen_if() if memb.optional: + has = 'has_' + c_name(memb.name) + if memb.need_has(): + has = 'obj->' + has ret += mcgen(''' - if (visit_optional(v, "%(name)s", &obj->has_%(c_name)s)) { + if (visit_optional(v, "%(name)s", &%(has)s)) { ''', - name=memb.name, c_name=c_name(memb.name)) + name=memb.name, has=has) indent.increase() special_features = gen_special_features(memb.features) if special_features != '0': diff --git a/semihosting/console.c b/semihosting/console.c index 0f976fe8cb..5d61e8207e 100644 --- a/semihosting/console.c +++ b/semihosting/console.c @@ -43,10 +43,8 @@ static SemihostingConsole console; static int console_can_read(void *opaque) { SemihostingConsole *c = opaque; - int ret; g_assert(qemu_mutex_iothread_locked()); - ret = (int) fifo8_num_free(&c->fifo); - return ret; + return (int)fifo8_num_free(&c->fifo); } static void console_wake_up(gpointer data, gpointer user_data) diff --git a/softmmu/memory.c b/softmmu/memory.c index bc0be3f62c..e05332d07f 100644 --- a/softmmu/memory.c +++ b/softmmu/memory.c @@ -2372,20 +2372,15 @@ void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, int memory_region_get_fd(MemoryRegion *mr) { - int fd; - RCU_READ_LOCK_GUARD(); while (mr->alias) { mr = mr->alias; } - fd = mr->ram_block->fd; - - return fd; + return mr->ram_block->fd; } void *memory_region_get_ram_ptr(MemoryRegion *mr) { - void *ptr; uint64_t offset = 0; RCU_READ_LOCK_GUARD(); @@ -2394,9 +2389,7 @@ void *memory_region_get_ram_ptr(MemoryRegion *mr) mr = mr->alias; } assert(mr->ram_block); - ptr = qemu_map_ram_ptr(mr->ram_block, offset); - - return ptr; + return qemu_map_ram_ptr(mr->ram_block, offset); } MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset) diff --git a/softmmu/physmem.c b/softmmu/physmem.c index 1b606a3002..edec095c7a 100644 --- a/softmmu/physmem.c +++ b/softmmu/physmem.c @@ -3236,7 +3236,6 @@ void *address_space_map(AddressSpace *as, hwaddr len = *plen; hwaddr l, xlat; MemoryRegion *mr; - void *ptr; FlatView *fv; if (len == 0) { @@ -3275,9 +3274,7 @@ void *address_space_map(AddressSpace *as, *plen = flatview_extend_translation(fv, addr, len, mr, xlat, l, is_write, attrs); fuzz_dma_read_cb(addr, *plen, mr); - ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true); - - return ptr; + return qemu_ram_ptr_length(mr->ram_block, xlat, plen, true); } /* Unmaps a memory region previously mapped by address_space_map(). @@ -3545,15 +3542,13 @@ bool cpu_physical_memory_is_io(hwaddr phys_addr) { MemoryRegion*mr; hwaddr l = 1; - bool res; RCU_READ_LOCK_GUARD(); mr = address_space_translate(&address_space_memory, phys_addr, &phys_addr, &l, false, MEMTXATTRS_UNSPECIFIED); - res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr)); - return res; + return !(memory_region_is_ram(mr) || memory_region_is_romd(mr)); } int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) diff --git a/softmmu/runstate.c b/softmmu/runstate.c index 3dd83d5e5d..cab9f6fc07 100644 --- a/softmmu/runstate.c +++ b/softmmu/runstate.c @@ -484,18 +484,15 @@ void qemu_system_guest_panicked(GuestPanicInformation *info) */ if (panic_action == PANIC_ACTION_PAUSE || (panic_action == PANIC_ACTION_SHUTDOWN && shutdown_action == SHUTDOWN_ACTION_PAUSE)) { - qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE, - !!info, info); + qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE, info); vm_stop(RUN_STATE_GUEST_PANICKED); } else if (panic_action == PANIC_ACTION_SHUTDOWN || panic_action == PANIC_ACTION_EXIT_FAILURE) { - qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_POWEROFF, - !!info, info); + qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_POWEROFF, info); vm_stop(RUN_STATE_GUEST_PANICKED); qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_PANIC); } else { - qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_RUN, - !!info, info); + qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_RUN, info); } if (info) { @@ -522,13 +519,8 @@ void qemu_system_guest_panicked(GuestPanicInformation *info) void qemu_system_guest_crashloaded(GuestPanicInformation *info) { qemu_log_mask(LOG_GUEST_ERROR, "Guest crash loaded"); - - qapi_event_send_guest_crashloaded(GUEST_PANIC_ACTION_RUN, - !!info, info); - - if (info) { - qapi_free_GuestPanicInformation(info); - } + qapi_event_send_guest_crashloaded(GUEST_PANIC_ACTION_RUN, info); + qapi_free_GuestPanicInformation(info); } void qemu_system_reset_request(ShutdownCause reason) diff --git a/softmmu/vl.c b/softmmu/vl.c index 5115221efe..798e1dc933 100644 --- a/softmmu/vl.c +++ b/softmmu/vl.c @@ -611,7 +611,7 @@ static int parse_add_fd(void *opaque, QemuOpts *opts, Error **errp) } /* add the duplicate fd, and optionally the opaque string, to the fd set */ - fdinfo = monitor_fdset_add_fd(dupfd, true, fdset_id, !!fd_opaque, fd_opaque, + fdinfo = monitor_fdset_add_fd(dupfd, true, fdset_id, fd_opaque, &error_abort); g_free(fdinfo); diff --git a/stubs/graph-lock.c b/stubs/graph-lock.c new file mode 100644 index 0000000000..177aa0a8ba --- /dev/null +++ b/stubs/graph-lock.c @@ -0,0 +1,10 @@ +#include "qemu/osdep.h" +#include "block/graph-lock.h" + +void register_aiocontext(AioContext *ctx) +{ +} + +void unregister_aiocontext(AioContext *ctx) +{ +} diff --git a/stubs/meson.build b/stubs/meson.build index c96a74f095..981585cbdf 100644 --- a/stubs/meson.build +++ b/stubs/meson.build @@ -13,6 +13,7 @@ stub_ss.add(files('error-printf.c')) stub_ss.add(files('fdset.c')) stub_ss.add(files('gdbstub.c')) stub_ss.add(files('get-vm-name.c')) +stub_ss.add(files('graph-lock.c')) if linux_io_uring.found() stub_ss.add(files('io_uring.c')) endif diff --git a/stubs/qdev.c b/stubs/qdev.c index 187659f707..6869f6f90a 100644 --- a/stubs/qdev.c +++ b/stubs/qdev.c @@ -15,15 +15,13 @@ #include "qemu/osdep.h" #include "qapi/qapi-events-qdev.h" -void qapi_event_send_device_deleted(bool has_device, - const char *device, +void qapi_event_send_device_deleted(const char *device, const char *path) { /* Nothing to do. */ } -void qapi_event_send_device_unplug_guest_error(bool has_device, - const char *device, +void qapi_event_send_device_unplug_guest_error(const char *device, const char *path) { /* Nothing to do. */ diff --git a/target/arm/cpu-qom.h b/target/arm/cpu-qom.h index 64c44cef2d..514c22ced9 100644 --- a/target/arm/cpu-qom.h +++ b/target/arm/cpu-qom.h @@ -43,7 +43,7 @@ void aarch64_cpu_register(const ARMCPUInfo *info); /** * ARMCPUClass: * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. + * @parent_phases: The parent class' reset phase handlers. * * An ARM CPU model. */ @@ -54,7 +54,7 @@ struct ARMCPUClass { const ARMCPUInfo *info; DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; }; diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 38d066c294..2fa022f62b 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -202,14 +202,16 @@ static void cp_reg_check_reset(gpointer key, gpointer value, gpointer opaque) assert(oldvalue == newvalue); } -static void arm_cpu_reset(DeviceState *dev) +static void arm_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(dev); + CPUState *s = CPU(obj); ARMCPU *cpu = ARM_CPU(s); ARMCPUClass *acc = ARM_CPU_GET_CLASS(cpu); CPUARMState *env = &cpu->env; - acc->parent_reset(dev); + if (acc->parent_phases.hold) { + acc->parent_phases.hold(obj); + } memset(env, 0, offsetof(CPUARMState, end_reset_fields)); @@ -528,7 +530,7 @@ static void arm_cpu_reset(DeviceState *dev) arm_rebuild_hflags(env); } -#ifndef CONFIG_USER_ONLY +#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, unsigned int target_el, @@ -725,7 +727,8 @@ static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) cc->tcg_ops->do_interrupt(cs); return true; } -#endif /* !CONFIG_USER_ONLY */ + +#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */ void arm_cpu_update_virq(ARMCPU *cpu) { @@ -2210,12 +2213,15 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data) ARMCPUClass *acc = ARM_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(acc); DeviceClass *dc = DEVICE_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); device_class_set_parent_realize(dc, arm_cpu_realizefn, &acc->parent_realize); device_class_set_props(dc, arm_cpu_properties); - device_class_set_parent_reset(dc, arm_cpu_reset, &acc->parent_reset); + + resettable_class_set_parent_phases(rc, NULL, arm_cpu_reset_hold, NULL, + &acc->parent_phases); cc->class_by_name = arm_cpu_class_by_name; cc->has_work = arm_cpu_has_work; diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 9aeed3c848..2b4bd20f9d 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -3757,6 +3757,16 @@ static inline bool isar_feature_aa32_tts2uxn(const ARMISARegisters *id) return FIELD_EX32(id->id_mmfr4, ID_MMFR4, XNX) != 0; } +static inline bool isar_feature_aa32_half_evt(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_mmfr4, ID_MMFR4, EVT) >= 1; +} + +static inline bool isar_feature_aa32_evt(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_mmfr4, ID_MMFR4, EVT) >= 2; +} + static inline bool isar_feature_aa32_dit(const ARMISARegisters *id) { return FIELD_EX32(id->id_pfr0, ID_PFR0, DIT) != 0; @@ -4029,6 +4039,16 @@ static inline bool isar_feature_aa64_ids(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, IDS) != 0; } +static inline bool isar_feature_aa64_half_evt(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, EVT) >= 1; +} + +static inline bool isar_feature_aa64_evt(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, EVT) >= 2; +} + static inline bool isar_feature_aa64_bti(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0; @@ -4313,6 +4333,16 @@ static inline bool isar_feature_any_ras(const ARMISARegisters *id) return isar_feature_aa64_ras(id) || isar_feature_aa32_ras(id); } +static inline bool isar_feature_any_half_evt(const ARMISARegisters *id) +{ + return isar_feature_aa64_half_evt(id) || isar_feature_aa32_half_evt(id); +} + +static inline bool isar_feature_any_evt(const ARMISARegisters *id) +{ + return isar_feature_aa64_evt(id) || isar_feature_aa32_evt(id); +} + /* * Forward to the above feature tests given an ARMCPU pointer. */ diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index 3d74f134f5..2cf2ca4ce5 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -792,6 +792,74 @@ static void aarch64_a53_initfn(Object *obj) define_cortex_a72_a57_a53_cp_reginfo(cpu); } +static void aarch64_a55_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + cpu->dtb_compatible = "arm,cortex-a55"; + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + + /* Ordered by B2.4 AArch64 registers by functional group */ + cpu->clidr = 0x82000023; + cpu->ctr = 0x84448004; /* L1Ip = VIPT */ + cpu->dcz_blocksize = 4; /* 64 bytes */ + cpu->isar.id_aa64dfr0 = 0x0000000010305408ull; + cpu->isar.id_aa64isar0 = 0x0000100010211120ull; + cpu->isar.id_aa64isar1 = 0x0000000000100001ull; + cpu->isar.id_aa64mmfr0 = 0x0000000000101122ull; + cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull; + cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull; + cpu->isar.id_aa64pfr0 = 0x0000000010112222ull; + cpu->isar.id_aa64pfr1 = 0x0000000000000010ull; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_dfr0 = 0x04010088; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00011142; + cpu->isar.id_isar5 = 0x01011121; + cpu->isar.id_isar6 = 0x00000010; + cpu->isar.id_mmfr0 = 0x10201105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02122211; + cpu->isar.id_mmfr4 = 0x00021110; + cpu->isar.id_pfr0 = 0x10010131; + cpu->isar.id_pfr1 = 0x00011011; + cpu->isar.id_pfr2 = 0x00000011; + cpu->midr = 0x412FD050; /* r2p0 */ + cpu->revidr = 0; + + /* From B2.23 CCSIDR_EL1 */ + cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ + cpu->ccsidr[1] = 0x200fe01a; /* 32KB L1 icache */ + cpu->ccsidr[2] = 0x703fe07a; /* 512KB L2 cache */ + + /* From B2.96 SCTLR_EL3 */ + cpu->reset_sctlr = 0x30c50838; + + /* From B4.45 ICH_VTR_EL2 */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; + cpu->gic_pribits = 5; + + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x13211111; + cpu->isar.mvfr2 = 0x00000043; + + /* From D5.4 AArch64 PMU register summary */ + cpu->isar.reset_pmcr_el0 = 0x410b3000; +} + static void aarch64_a72_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); @@ -1186,6 +1254,7 @@ static void aarch64_max_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64MMFR2, FWB, 1); /* FEAT_S2FWB */ t = FIELD_DP64(t, ID_AA64MMFR2, TTL, 1); /* FEAT_TTL */ t = FIELD_DP64(t, ID_AA64MMFR2, BBM, 2); /* FEAT_BBM at level 2 */ + t = FIELD_DP64(t, ID_AA64MMFR2, EVT, 2); /* FEAT_EVT */ t = FIELD_DP64(t, ID_AA64MMFR2, E0PD, 1); /* FEAT_E0PD */ cpu->isar.id_aa64mmfr2 = t; @@ -1243,6 +1312,7 @@ static const ARMCPUInfo aarch64_cpus[] = { { .name = "cortex-a35", .initfn = aarch64_a35_initfn }, { .name = "cortex-a57", .initfn = aarch64_a57_initfn }, { .name = "cortex-a53", .initfn = aarch64_a53_initfn }, + { .name = "cortex-a55", .initfn = aarch64_a55_initfn }, { .name = "cortex-a72", .initfn = aarch64_a72_initfn }, { .name = "cortex-a76", .initfn = aarch64_a76_initfn }, { .name = "a64fx", .initfn = aarch64_a64fx_initfn }, diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c index 9a2cef7d05..568cbcfc52 100644 --- a/target/arm/cpu_tcg.c +++ b/target/arm/cpu_tcg.c @@ -65,6 +65,7 @@ void aa32_max_features(ARMCPU *cpu) t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */ t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* FEAT_TTCNP */ t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* FEAT_XNX */ + t = FIELD_DP32(t, ID_MMFR4, EVT, 2); /* FEAT_EVT */ cpu->isar.id_mmfr4 = t; t = cpu->isar.id_mmfr5; diff --git a/target/arm/helper.c b/target/arm/helper.c index d8c8223ec3..bac2ea62c4 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -362,6 +362,30 @@ static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri, return CP_ACCESS_OK; } +/* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBIS. */ +static CPAccessResult access_ttlbis(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && + (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBIS))) { + return CP_ACCESS_TRAP_EL2; + } + return CP_ACCESS_OK; +} + +#ifdef TARGET_AARCH64 +/* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBOS. */ +static CPAccessResult access_ttlbos(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && + (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBOS))) { + return CP_ACCESS_TRAP_EL2; + } + return CP_ACCESS_OK; +} +#endif + static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); @@ -1871,11 +1895,12 @@ static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri) scr_write(env, ri, 0); } -static CPAccessResult access_aa64_tid2(CPUARMState *env, - const ARMCPRegInfo *ri, - bool isread) +static CPAccessResult access_tid4(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) { - if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) { + if (arm_current_el(env) == 1 && + (arm_hcr_el2_eff(env) & (HCR_TID2 | HCR_TID4))) { return CP_ACCESS_TRAP_EL2; } @@ -2106,12 +2131,12 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, .access = PL1_R, - .accessfn = access_aa64_tid2, + .accessfn = access_tid4, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, .access = PL1_RW, - .accessfn = access_aa64_tid2, + .accessfn = access_tid4, .writefn = csselr_write, .resetvalue = 0, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), offsetof(CPUARMState, cp15.csselr_ns) } }, @@ -2206,16 +2231,16 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { static const ARMCPRegInfo v7mp_cp_reginfo[] = { /* 32 bit TLB invalidates, Inner Shareable */ { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, .writefn = tlbiall_is_write }, { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, .writefn = tlbimva_is_write }, { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, .writefn = tlbiasid_is_write }, { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, .writefn = tlbimvaa_is_write }, }; @@ -4249,9 +4274,7 @@ static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env, return CP_ACCESS_OK; } -static CPAccessResult aa64_cacheop_pou_access(CPUARMState *env, - const ARMCPRegInfo *ri, - bool isread) +static CPAccessResult do_cacheop_pou_access(CPUARMState *env, uint64_t hcrflags) { /* Cache invalidate/clean to Point of Unification... */ switch (arm_current_el(env)) { @@ -4262,8 +4285,8 @@ static CPAccessResult aa64_cacheop_pou_access(CPUARMState *env, } /* fall through */ case 1: - /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set. */ - if (arm_hcr_el2_eff(env) & HCR_TPU) { + /* ... EL1 must trap to EL2 if relevant HCR_EL2 flags are set. */ + if (arm_hcr_el2_eff(env) & hcrflags) { return CP_ACCESS_TRAP_EL2; } break; @@ -4271,6 +4294,18 @@ static CPAccessResult aa64_cacheop_pou_access(CPUARMState *env, return CP_ACCESS_OK; } +static CPAccessResult access_ticab(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + return do_cacheop_pou_access(env, HCR_TICAB | HCR_TPU); +} + +static CPAccessResult access_tocu(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + return do_cacheop_pou_access(env, HCR_TOCU | HCR_TPU); +} + /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions * Page D4-1736 (DDI0487A.b) */ @@ -4911,15 +4946,15 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, .access = PL1_W, .type = ARM_CP_NOP, - .accessfn = aa64_cacheop_pou_access }, + .accessfn = access_ticab }, { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, .access = PL1_W, .type = ARM_CP_NOP, - .accessfn = aa64_cacheop_pou_access }, + .accessfn = access_tocu }, { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1, .access = PL0_W, .type = ARM_CP_NOP, - .accessfn = aa64_cacheop_pou_access }, + .accessfn = access_tocu }, { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, .access = PL1_W, .accessfn = aa64_cacheop_poc_access, @@ -4937,7 +4972,7 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1, .access = PL0_W, .type = ARM_CP_NOP, - .accessfn = aa64_cacheop_pou_access }, + .accessfn = access_tocu }, { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1, .access = PL0_W, .type = ARM_CP_NOP, @@ -4948,27 +4983,27 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { /* TLBI operations */ { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vmalle1is_write }, { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae1is_write }, { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vmalle1is_write }, { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae1is_write }, { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae1is_write }, { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae1is_write }, { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, @@ -5078,10 +5113,10 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { #endif /* TLB invalidate last level of translation table walk */ { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, .writefn = tlbimva_is_write }, { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, .writefn = tlbimvaa_is_write }, { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, @@ -5114,13 +5149,13 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .writefn = tlbiipas2is_hyp_write }, /* 32 bit cache operations */ { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, - .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, + .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_ticab }, { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6, .type = ARM_CP_NOP, .access = PL1_W }, { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, - .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, + .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu }, { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1, - .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, + .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu }, { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6, .type = ARM_CP_NOP, .access = PL1_W }, { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7, @@ -5134,7 +5169,7 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1, - .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, + .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu }, { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1, .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, @@ -5267,6 +5302,12 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) } } + if (cpu_isar_feature(any_evt, cpu)) { + valid_mask |= HCR_TTLBIS | HCR_TTLBOS | HCR_TICAB | HCR_TOCU | HCR_TID4; + } else if (cpu_isar_feature(any_half_evt, cpu)) { + valid_mask |= HCR_TICAB | HCR_TOCU | HCR_TID4; + } + /* Clear RES0 bits. */ value &= valid_mask; @@ -6720,35 +6761,35 @@ static const ARMCPRegInfo pauth_reginfo[] = { static const ARMCPRegInfo tlbirange_reginfo[] = { { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1is_write }, { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1is_write }, { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1is_write }, { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1is_write }, { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1is_write }, { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1is_write }, { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1is_write }, { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_rvae1is_write }, { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, @@ -6835,27 +6876,27 @@ static const ARMCPRegInfo tlbirange_reginfo[] = { static const ARMCPRegInfo tlbios_reginfo[] = { { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vmalle1is_write }, { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae1is_write }, { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vmalle1is_write }, { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae1is_write }, { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae1is_write }, { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vae1is_write }, { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0, @@ -7241,7 +7282,7 @@ static const ARMCPRegInfo ccsidr2_reginfo[] = { { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2, .access = PL1_R, - .accessfn = access_aa64_tid2, + .accessfn = access_tid4, .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW }, }; @@ -7541,7 +7582,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .name = "CLIDR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, - .accessfn = access_aa64_tid2, + .accessfn = access_tid4, .resetvalue = cpu->clidr }; define_one_arm_cp_reg(cpu, &clidr); diff --git a/target/arm/kvm-consts.h b/target/arm/kvm-consts.h index faacf96fdc..09967ec5e6 100644 --- a/target/arm/kvm-consts.h +++ b/target/arm/kvm-consts.h @@ -14,16 +14,16 @@ #ifndef ARM_KVM_CONSTS_H #define ARM_KVM_CONSTS_H +#ifdef NEED_CPU_H #ifdef CONFIG_KVM #include <linux/kvm.h> #include <linux/psci.h> - #define MISMATCH_CHECK(X, Y) QEMU_BUILD_BUG_ON(X != Y) +#endif +#endif -#else - +#ifndef MISMATCH_CHECK #define MISMATCH_CHECK(X, Y) QEMU_BUILD_BUG_ON(0) - #endif #define CP_REG_SIZE_SHIFT 52 diff --git a/target/arm/monitor.c b/target/arm/monitor.c index 80c64fa355..ecdd5ee817 100644 --- a/target/arm/monitor.c +++ b/target/arm/monitor.c @@ -221,7 +221,6 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, qobject_unref(qdict_out); } else { expansion_info->model->props = QOBJECT(qdict_out); - expansion_info->model->has_props = true; } object_unref(obj); diff --git a/target/avr/cpu-qom.h b/target/avr/cpu-qom.h index b5c3507d6d..01ea5f160b 100644 --- a/target/avr/cpu-qom.h +++ b/target/avr/cpu-qom.h @@ -31,7 +31,7 @@ OBJECT_DECLARE_CPU_TYPE(AVRCPU, AVRCPUClass, AVR_CPU) /** * AVRCPUClass: * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. + * @parent_phases: The parent class' reset phase handlers. * * A AVR CPU model. */ @@ -40,7 +40,7 @@ struct AVRCPUClass { CPUClass parent_class; /*< public >*/ DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; }; diff --git a/target/avr/cpu.c b/target/avr/cpu.c index c7295b488d..d0139804b9 100644 --- a/target/avr/cpu.c +++ b/target/avr/cpu.c @@ -67,14 +67,16 @@ static void avr_restore_state_to_opc(CPUState *cs, env->pc_w = data[0]; } -static void avr_cpu_reset(DeviceState *ds) +static void avr_cpu_reset_hold(Object *obj) { - CPUState *cs = CPU(ds); + CPUState *cs = CPU(obj); AVRCPU *cpu = AVR_CPU(cs); AVRCPUClass *mcc = AVR_CPU_GET_CLASS(cpu); CPUAVRState *env = &cpu->env; - mcc->parent_reset(ds); + if (mcc->parent_phases.hold) { + mcc->parent_phases.hold(obj); + } env->pc_w = 0; env->sregI = 1; @@ -223,9 +225,12 @@ static void avr_cpu_class_init(ObjectClass *oc, void *data) DeviceClass *dc = DEVICE_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); AVRCPUClass *mcc = AVR_CPU_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); device_class_set_parent_realize(dc, avr_cpu_realizefn, &mcc->parent_realize); - device_class_set_parent_reset(dc, avr_cpu_reset, &mcc->parent_reset); + + resettable_class_set_parent_phases(rc, NULL, avr_cpu_reset_hold, NULL, + &mcc->parent_phases); cc->class_by_name = avr_cpu_class_by_name; diff --git a/target/avr/cpu.h b/target/avr/cpu.h index 96419c0c2b..f19dd72926 100644 --- a/target/avr/cpu.h +++ b/target/avr/cpu.h @@ -215,8 +215,7 @@ static inline int cpu_interrupts_enabled(CPUAVRState *env) static inline uint8_t cpu_get_sreg(CPUAVRState *env) { - uint8_t sreg; - sreg = (env->sregC) << 0 + return (env->sregC) << 0 | (env->sregZ) << 1 | (env->sregN) << 2 | (env->sregV) << 3 @@ -224,7 +223,6 @@ static inline uint8_t cpu_get_sreg(CPUAVRState *env) | (env->sregH) << 5 | (env->sregT) << 6 | (env->sregI) << 7; - return sreg; } static inline void cpu_set_sreg(CPUAVRState *env, uint8_t sreg) diff --git a/target/cris/cpu-qom.h b/target/cris/cpu-qom.h index 71e8af0e70..431a1d536a 100644 --- a/target/cris/cpu-qom.h +++ b/target/cris/cpu-qom.h @@ -30,7 +30,7 @@ OBJECT_DECLARE_CPU_TYPE(CRISCPU, CRISCPUClass, CRIS_CPU) /** * CRISCPUClass: * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. + * @parent_phases: The parent class' reset phase handlers. * @vr: Version Register value. * * A CRIS CPU model. @@ -41,7 +41,7 @@ struct CRISCPUClass { /*< public >*/ DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; uint32_t vr; }; diff --git a/target/cris/cpu.c b/target/cris/cpu.c index fb05dc6f9a..a6a93c2359 100644 --- a/target/cris/cpu.c +++ b/target/cris/cpu.c @@ -56,15 +56,17 @@ static bool cris_cpu_has_work(CPUState *cs) return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); } -static void cris_cpu_reset(DeviceState *dev) +static void cris_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(dev); + CPUState *s = CPU(obj); CRISCPU *cpu = CRIS_CPU(s); CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(cpu); CPUCRISState *env = &cpu->env; uint32_t vr; - ccc->parent_reset(dev); + if (ccc->parent_phases.hold) { + ccc->parent_phases.hold(obj); + } vr = env->pregs[PR_VR]; memset(env, 0, offsetof(CPUCRISState, end_reset_fields)); @@ -305,11 +307,13 @@ static void cris_cpu_class_init(ObjectClass *oc, void *data) DeviceClass *dc = DEVICE_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); CRISCPUClass *ccc = CRIS_CPU_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); device_class_set_parent_realize(dc, cris_cpu_realizefn, &ccc->parent_realize); - device_class_set_parent_reset(dc, cris_cpu_reset, &ccc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, cris_cpu_reset_hold, NULL, + &ccc->parent_phases); cc->class_by_name = cris_cpu_class_by_name; cc->has_work = cris_cpu_has_work; diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c index 03221fbdc2..658ca4ff78 100644 --- a/target/hexagon/cpu.c +++ b/target/hexagon/cpu.c @@ -281,14 +281,16 @@ static void hexagon_restore_state_to_opc(CPUState *cs, env->gpr[HEX_REG_PC] = data[0]; } -static void hexagon_cpu_reset(DeviceState *dev) +static void hexagon_cpu_reset_hold(Object *obj) { - CPUState *cs = CPU(dev); + CPUState *cs = CPU(obj); HexagonCPU *cpu = HEXAGON_CPU(cs); HexagonCPUClass *mcc = HEXAGON_CPU_GET_CLASS(cpu); CPUHexagonState *env = &cpu->env; - mcc->parent_reset(dev); + if (mcc->parent_phases.hold) { + mcc->parent_phases.hold(obj); + } set_default_nan_mode(1, &env->fp_status); set_float_detect_tininess(float_tininess_before_rounding, &env->fp_status); @@ -339,11 +341,13 @@ static void hexagon_cpu_class_init(ObjectClass *c, void *data) HexagonCPUClass *mcc = HEXAGON_CPU_CLASS(c); CPUClass *cc = CPU_CLASS(c); DeviceClass *dc = DEVICE_CLASS(c); + ResettableClass *rc = RESETTABLE_CLASS(c); device_class_set_parent_realize(dc, hexagon_cpu_realize, &mcc->parent_realize); - device_class_set_parent_reset(dc, hexagon_cpu_reset, &mcc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, hexagon_cpu_reset_hold, NULL, + &mcc->parent_phases); cc->class_by_name = hexagon_cpu_class_by_name; cc->has_work = hexagon_cpu_has_work; diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h index 2a65a57bab..794a0453fd 100644 --- a/target/hexagon/cpu.h +++ b/target/hexagon/cpu.h @@ -137,7 +137,7 @@ typedef struct HexagonCPUClass { CPUClass parent_class; /*< public >*/ DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; } HexagonCPUClass; struct ArchCPU { diff --git a/target/i386/cpu-qom.h b/target/i386/cpu-qom.h index c557a522e1..2350f4ae60 100644 --- a/target/i386/cpu-qom.h +++ b/target/i386/cpu-qom.h @@ -42,7 +42,7 @@ typedef struct X86CPUModel X86CPUModel; * @migration_safe: See CpuDefinitionInfo::migration_safe * @static_model: See CpuDefinitionInfo::static * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. + * @parent_phases: The parent class' reset phase handlers. * * An x86 CPU model or family. */ @@ -67,7 +67,7 @@ struct X86CPUClass { DeviceRealize parent_realize; DeviceUnrealize parent_unrealize; - DeviceReset parent_reset; + ResettablePhases parent_phases; }; diff --git a/target/i386/cpu-sysemu.c b/target/i386/cpu-sysemu.c index a6f47b7d11..fc97213a73 100644 --- a/target/i386/cpu-sysemu.c +++ b/target/i386/cpu-sysemu.c @@ -187,10 +187,8 @@ qmp_query_cpu_model_expansion(CpuModelExpansionType type, QDict *props = NULL; const char *base_name; - xc = x86_cpu_from_model(model->name, - model->has_props ? - qobject_to(QDict, model->props) : - NULL, &err); + xc = x86_cpu_from_model(model->name, qobject_to(QDict, model->props), + &err); if (err) { goto out; } @@ -198,7 +196,6 @@ qmp_query_cpu_model_expansion(CpuModelExpansionType type, props = qdict_new(); ret->model = g_new0(CpuModelInfo, 1); ret->model->props = QOBJECT(props); - ret->model->has_props = true; switch (type) { case CPU_MODEL_EXPANSION_TYPE_STATIC: diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 22b681ca37..3410e5e470 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -4901,7 +4901,6 @@ static void x86_cpu_definition_entry(gpointer data, gpointer user_data) */ if (default_cpu_version != CPU_VERSION_LEGACY) { info->alias_of = x86_cpu_class_get_alias_of(cc); - info->has_alias_of = !!info->alias_of; } QAPI_LIST_PREPEND(*cpu_list, info); @@ -5878,9 +5877,9 @@ static void x86_cpu_set_sgxlepubkeyhash(CPUX86State *env) #endif } -static void x86_cpu_reset(DeviceState *dev) +static void x86_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(dev); + CPUState *s = CPU(obj); X86CPU *cpu = X86_CPU(s); X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); CPUX86State *env = &cpu->env; @@ -5888,7 +5887,9 @@ static void x86_cpu_reset(DeviceState *dev) uint64_t xcr0; int i; - xcc->parent_reset(dev); + if (xcc->parent_phases.hold) { + xcc->parent_phases.hold(obj); + } memset(env, 0, offsetof(CPUX86State, end_reset_fields)); @@ -7112,6 +7113,7 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data) X86CPUClass *xcc = X86_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); FeatureWord w; device_class_set_parent_realize(dc, x86_cpu_realizefn, @@ -7120,7 +7122,8 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data) &xcc->parent_unrealize); device_class_set_props(dc, x86_cpu_properties); - device_class_set_parent_reset(dc, x86_cpu_reset, &xcc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, x86_cpu_reset_hold, NULL, + &xcc->parent_phases); cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; cc->class_by_name = x86_cpu_class_by_name; diff --git a/target/i386/hax/hax-all.c b/target/i386/hax/hax-all.c index b185ee8de4..b7fb5385b2 100644 --- a/target/i386/hax/hax-all.c +++ b/target/i386/hax/hax-all.c @@ -388,7 +388,7 @@ static int hax_handle_io(CPUArchState *env, uint32_t df, uint16_t port, MemTxAttrs attrs = { 0 }; if (!df) { - ptr = (uint8_t *) buffer; + ptr = buffer; } else { ptr = buffer + size * count - size; } diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index a213209379..0ab4e0734a 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -5689,7 +5689,6 @@ static void kvm_arch_set_notify_window(Object *obj, Visitor *v, Error **errp) { KVMState *s = KVM_STATE(obj); - Error *error = NULL; uint32_t value; if (s->fd != -1) { @@ -5697,9 +5696,7 @@ static void kvm_arch_set_notify_window(Object *obj, Visitor *v, return; } - visit_type_uint32(v, name, &value, &error); - if (error) { - error_propagate(errp, error); + if (!visit_type_uint32(v, name, &value, errp)) { return; } diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c index 46b04cbdad..290ab4d526 100644 --- a/target/loongarch/cpu.c +++ b/target/loongarch/cpu.c @@ -128,13 +128,11 @@ static inline bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env) { uint32_t pending; uint32_t status; - bool r; pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS); status = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE); - r = (pending & status) != 0; - return r; + return (pending & status) != 0; } static void loongarch_cpu_do_interrupt(CPUState *cs) @@ -452,14 +450,16 @@ void loongarch_cpu_list(void) g_slist_free(list); } -static void loongarch_cpu_reset(DeviceState *dev) +static void loongarch_cpu_reset_hold(Object *obj) { - CPUState *cs = CPU(dev); + CPUState *cs = CPU(obj); LoongArchCPU *cpu = LOONGARCH_CPU(cs); LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(cpu); CPULoongArchState *env = &cpu->env; - lacc->parent_reset(dev); + if (lacc->parent_phases.hold) { + lacc->parent_phases.hold(obj); + } env->fcsr0_mask = FCSR0_M1 | FCSR0_M2 | FCSR0_M3; env->fcsr0 = 0x0; @@ -696,10 +696,12 @@ static void loongarch_cpu_class_init(ObjectClass *c, void *data) LoongArchCPUClass *lacc = LOONGARCH_CPU_CLASS(c); CPUClass *cc = CPU_CLASS(c); DeviceClass *dc = DEVICE_CLASS(c); + ResettableClass *rc = RESETTABLE_CLASS(c); device_class_set_parent_realize(dc, loongarch_cpu_realizefn, &lacc->parent_realize); - device_class_set_parent_reset(dc, loongarch_cpu_reset, &lacc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, loongarch_cpu_reset_hold, NULL, + &lacc->parent_phases); cc->class_by_name = loongarch_cpu_class_by_name; cc->has_work = loongarch_cpu_has_work; diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h index e15c633b0b..e35cf65597 100644 --- a/target/loongarch/cpu.h +++ b/target/loongarch/cpu.h @@ -356,7 +356,7 @@ OBJECT_DECLARE_CPU_TYPE(LoongArchCPU, LoongArchCPUClass, /** * LoongArchCPUClass: * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. + * @parent_phases: The parent class' reset phase handlers. * * A LoongArch CPU model. */ @@ -366,7 +366,7 @@ struct LoongArchCPUClass { /*< public >*/ DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; }; /* diff --git a/target/m68k/cpu-qom.h b/target/m68k/cpu-qom.h index cd9687192c..0ec7750a92 100644 --- a/target/m68k/cpu-qom.h +++ b/target/m68k/cpu-qom.h @@ -30,7 +30,7 @@ OBJECT_DECLARE_CPU_TYPE(M68kCPU, M68kCPUClass, M68K_CPU) /* * M68kCPUClass: * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. + * @parent_phases: The parent class' reset phase handlers. * * A Motorola 68k CPU model. */ @@ -40,7 +40,7 @@ struct M68kCPUClass { /*< public >*/ DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; }; diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c index b67ddea2ae..99af1ab541 100644 --- a/target/m68k/cpu.c +++ b/target/m68k/cpu.c @@ -66,16 +66,18 @@ static void m68k_unset_feature(CPUM68KState *env, int feature) env->features &= ~BIT_ULL(feature); } -static void m68k_cpu_reset(DeviceState *dev) +static void m68k_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(dev); + CPUState *s = CPU(obj); M68kCPU *cpu = M68K_CPU(s); M68kCPUClass *mcc = M68K_CPU_GET_CLASS(cpu); CPUM68KState *env = &cpu->env; floatx80 nan = floatx80_default_nan(NULL); int i; - mcc->parent_reset(dev); + if (mcc->parent_phases.hold) { + mcc->parent_phases.hold(obj); + } memset(env, 0, offsetof(CPUM68KState, end_reset_fields)); #ifdef CONFIG_SOFTMMU @@ -552,10 +554,12 @@ static void m68k_cpu_class_init(ObjectClass *c, void *data) M68kCPUClass *mcc = M68K_CPU_CLASS(c); CPUClass *cc = CPU_CLASS(c); DeviceClass *dc = DEVICE_CLASS(c); + ResettableClass *rc = RESETTABLE_CLASS(c); device_class_set_parent_realize(dc, m68k_cpu_realizefn, &mcc->parent_realize); - device_class_set_parent_reset(dc, m68k_cpu_reset, &mcc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, m68k_cpu_reset_hold, NULL, + &mcc->parent_phases); cc->class_by_name = m68k_cpu_class_by_name; cc->has_work = m68k_cpu_has_work; diff --git a/target/microblaze/cpu-qom.h b/target/microblaze/cpu-qom.h index 255b39a45d..cda9220fa9 100644 --- a/target/microblaze/cpu-qom.h +++ b/target/microblaze/cpu-qom.h @@ -30,7 +30,7 @@ OBJECT_DECLARE_CPU_TYPE(MicroBlazeCPU, MicroBlazeCPUClass, MICROBLAZE_CPU) /** * MicroBlazeCPUClass: * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. + * @parent_phases: The parent class' reset phase handlers. * * A MicroBlaze CPU model. */ @@ -40,7 +40,7 @@ struct MicroBlazeCPUClass { /*< public >*/ DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; }; diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c index 89e493f3ff..817681f9b2 100644 --- a/target/microblaze/cpu.c +++ b/target/microblaze/cpu.c @@ -162,14 +162,16 @@ static void microblaze_cpu_set_irq(void *opaque, int irq, int level) } #endif -static void mb_cpu_reset(DeviceState *dev) +static void mb_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(dev); + CPUState *s = CPU(obj); MicroBlazeCPU *cpu = MICROBLAZE_CPU(s); MicroBlazeCPUClass *mcc = MICROBLAZE_CPU_GET_CLASS(cpu); CPUMBState *env = &cpu->env; - mcc->parent_reset(dev); + if (mcc->parent_phases.hold) { + mcc->parent_phases.hold(obj); + } memset(env, 0, offsetof(CPUMBState, end_reset_fields)); env->res_addr = RES_ADDR_NONE; @@ -399,10 +401,12 @@ static void mb_cpu_class_init(ObjectClass *oc, void *data) DeviceClass *dc = DEVICE_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); MicroBlazeCPUClass *mcc = MICROBLAZE_CPU_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); device_class_set_parent_realize(dc, mb_cpu_realizefn, &mcc->parent_realize); - device_class_set_parent_reset(dc, mb_cpu_reset, &mcc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, mb_cpu_reset_hold, NULL, + &mcc->parent_phases); cc->class_by_name = mb_cpu_class_by_name; cc->has_work = mb_cpu_has_work; diff --git a/target/mips/cpu-qom.h b/target/mips/cpu-qom.h index e28b529607..0dffab453b 100644 --- a/target/mips/cpu-qom.h +++ b/target/mips/cpu-qom.h @@ -34,7 +34,7 @@ OBJECT_DECLARE_CPU_TYPE(MIPSCPU, MIPSCPUClass, MIPS_CPU) /** * MIPSCPUClass: * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. + * @parent_phases: The parent class' reset phase handlers. * * A MIPS CPU model. */ @@ -44,7 +44,7 @@ struct MIPSCPUClass { /*< public >*/ DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; const struct mips_def_t *cpu_def; /* Used for the jazz board to modify mips_cpu_do_transaction_failed. */ diff --git a/target/mips/cpu.c b/target/mips/cpu.c index 7a565466cb..c614b04607 100644 --- a/target/mips/cpu.c +++ b/target/mips/cpu.c @@ -182,14 +182,16 @@ static bool mips_cpu_has_work(CPUState *cs) #include "cpu-defs.c.inc" -static void mips_cpu_reset(DeviceState *dev) +static void mips_cpu_reset_hold(Object *obj) { - CPUState *cs = CPU(dev); + CPUState *cs = CPU(obj); MIPSCPU *cpu = MIPS_CPU(cs); MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(cpu); CPUMIPSState *env = &cpu->env; - mcc->parent_reset(dev); + if (mcc->parent_phases.hold) { + mcc->parent_phases.hold(obj); + } memset(env, 0, offsetof(CPUMIPSState, end_reset_fields)); @@ -562,10 +564,12 @@ static void mips_cpu_class_init(ObjectClass *c, void *data) MIPSCPUClass *mcc = MIPS_CPU_CLASS(c); CPUClass *cc = CPU_CLASS(c); DeviceClass *dc = DEVICE_CLASS(c); + ResettableClass *rc = RESETTABLE_CLASS(c); device_class_set_parent_realize(dc, mips_cpu_realizefn, &mcc->parent_realize); - device_class_set_parent_reset(dc, mips_cpu_reset, &mcc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, mips_cpu_reset_hold, NULL, + &mcc->parent_phases); cc->class_by_name = mips_cpu_class_by_name; cc->has_work = mips_cpu_has_work; diff --git a/target/mips/tcg/dsp_helper.c b/target/mips/tcg/dsp_helper.c index 09b6e5fb15..7a4362c8ef 100644 --- a/target/mips/tcg/dsp_helper.c +++ b/target/mips/tcg/dsp_helper.c @@ -3281,15 +3281,12 @@ target_ulong helper_dextr_l(target_ulong ac, target_ulong shift, CPUMIPSState *env) { uint64_t temp[3]; - target_ulong ret; shift = shift & 0x3F; mipsdsp_rndrashift_acc(temp, ac, shift, env); - ret = (temp[1] << 63) | (temp[0] >> 1); - - return ret; + return (temp[1] << 63) | (temp[0] >> 1); } target_ulong helper_dextr_r_l(target_ulong ac, target_ulong shift, @@ -3297,7 +3294,6 @@ target_ulong helper_dextr_r_l(target_ulong ac, target_ulong shift, { uint64_t temp[3]; uint32_t temp128; - target_ulong ret; shift = shift & 0x3F; mipsdsp_rndrashift_acc(temp, ac, shift, env); @@ -3317,9 +3313,7 @@ target_ulong helper_dextr_r_l(target_ulong ac, target_ulong shift, set_DSPControl_overflow_flag(1, 23, env); } - ret = (temp[1] << 63) | (temp[0] >> 1); - - return ret; + return (temp[1] << 63) | (temp[0] >> 1); } target_ulong helper_dextr_rs_l(target_ulong ac, target_ulong shift, @@ -3327,7 +3321,6 @@ target_ulong helper_dextr_rs_l(target_ulong ac, target_ulong shift, { uint64_t temp[3]; uint32_t temp128; - target_ulong ret; shift = shift & 0x3F; mipsdsp_rndrashift_acc(temp, ac, shift, env); @@ -3354,9 +3347,7 @@ target_ulong helper_dextr_rs_l(target_ulong ac, target_ulong shift, set_DSPControl_overflow_flag(1, 23, env); } - ret = (temp[1] << 63) | (temp[0] >> 1); - - return ret; + return (temp[1] << 63) | (temp[0] >> 1); } #endif diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c index 9a5351bc81..cff30823da 100644 --- a/target/nios2/cpu.c +++ b/target/nios2/cpu.c @@ -57,14 +57,16 @@ static bool nios2_cpu_has_work(CPUState *cs) return cs->interrupt_request & CPU_INTERRUPT_HARD; } -static void nios2_cpu_reset(DeviceState *dev) +static void nios2_cpu_reset_hold(Object *obj) { - CPUState *cs = CPU(dev); + CPUState *cs = CPU(obj); Nios2CPU *cpu = NIOS2_CPU(cs); Nios2CPUClass *ncc = NIOS2_CPU_GET_CLASS(cpu); CPUNios2State *env = &cpu->env; - ncc->parent_reset(dev); + if (ncc->parent_phases.hold) { + ncc->parent_phases.hold(obj); + } memset(env->ctrl, 0, sizeof(env->ctrl)); env->pc = cpu->reset_addr; @@ -371,11 +373,13 @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data) DeviceClass *dc = DEVICE_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); Nios2CPUClass *ncc = NIOS2_CPU_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); device_class_set_parent_realize(dc, nios2_cpu_realizefn, &ncc->parent_realize); device_class_set_props(dc, nios2_properties); - device_class_set_parent_reset(dc, nios2_cpu_reset, &ncc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, nios2_cpu_reset_hold, NULL, + &ncc->parent_phases); cc->class_by_name = nios2_cpu_class_by_name; cc->has_work = nios2_cpu_has_work; diff --git a/target/nios2/cpu.h b/target/nios2/cpu.h index f85581ee56..b1a5549074 100644 --- a/target/nios2/cpu.h +++ b/target/nios2/cpu.h @@ -37,7 +37,7 @@ OBJECT_DECLARE_CPU_TYPE(Nios2CPU, Nios2CPUClass, NIOS2_CPU) /** * Nios2CPUClass: - * @parent_reset: The parent class' reset handler. + * @parent_phases: The parent class' reset phase handlers. * * A Nios2 CPU model. */ @@ -47,7 +47,7 @@ struct Nios2CPUClass { /*< public >*/ DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; }; #define TARGET_HAS_ICE 1 diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c index de0176cd20..4c11a1f7ad 100644 --- a/target/openrisc/cpu.c +++ b/target/openrisc/cpu.c @@ -70,13 +70,15 @@ static void openrisc_disas_set_info(CPUState *cpu, disassemble_info *info) info->print_insn = print_insn_or1k; } -static void openrisc_cpu_reset(DeviceState *dev) +static void openrisc_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(dev); + CPUState *s = CPU(obj); OpenRISCCPU *cpu = OPENRISC_CPU(s); OpenRISCCPUClass *occ = OPENRISC_CPU_GET_CLASS(cpu); - occ->parent_reset(dev); + if (occ->parent_phases.hold) { + occ->parent_phases.hold(obj); + } memset(&cpu->env, 0, offsetof(CPUOpenRISCState, end_reset_fields)); @@ -229,10 +231,12 @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data) OpenRISCCPUClass *occ = OPENRISC_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(occ); DeviceClass *dc = DEVICE_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); device_class_set_parent_realize(dc, openrisc_cpu_realizefn, &occ->parent_realize); - device_class_set_parent_reset(dc, openrisc_cpu_reset, &occ->parent_reset); + resettable_class_set_parent_phases(rc, NULL, openrisc_cpu_reset_hold, NULL, + &occ->parent_phases); cc->class_by_name = openrisc_cpu_class_by_name; cc->has_work = openrisc_cpu_has_work; diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h index 1d5efa5ca2..5f60749705 100644 --- a/target/openrisc/cpu.h +++ b/target/openrisc/cpu.h @@ -34,7 +34,7 @@ OBJECT_DECLARE_CPU_TYPE(OpenRISCCPU, OpenRISCCPUClass, OPENRISC_CPU) /** * OpenRISCCPUClass: * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. + * @parent_phases: The parent class' reset phase handlers. * * A OpenRISC CPU model. */ @@ -44,7 +44,7 @@ struct OpenRISCCPUClass { /*< public >*/ DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; }; #define TARGET_INSN_START_EXTRA_WORDS 1 diff --git a/target/ppc/cpu-qom.h b/target/ppc/cpu-qom.h index 89ff88f28c..0fbd8b7246 100644 --- a/target/ppc/cpu-qom.h +++ b/target/ppc/cpu-qom.h @@ -143,7 +143,7 @@ typedef struct PPCHash64Options PPCHash64Options; /** * PowerPCCPUClass: * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. + * @parent_phases: The parent class' reset phase handlers. * * A PowerPC CPU model. */ @@ -154,7 +154,7 @@ struct PowerPCCPUClass { DeviceRealize parent_realize; DeviceUnrealize parent_unrealize; - DeviceReset parent_reset; + ResettablePhases parent_phases; void (*parent_parse_features)(const char *type, char *str, Error **errp); uint32_t pvr; diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c index cbf0081374..95d25856a0 100644 --- a/target/ppc/cpu_init.c +++ b/target/ppc/cpu_init.c @@ -7031,16 +7031,18 @@ static bool ppc_cpu_has_work(CPUState *cs) return cs->interrupt_request & CPU_INTERRUPT_HARD; } -static void ppc_cpu_reset(DeviceState *dev) +static void ppc_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(dev); + CPUState *s = CPU(obj); PowerPCCPU *cpu = POWERPC_CPU(s); PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); CPUPPCState *env = &cpu->env; target_ulong msr; int i; - pcc->parent_reset(dev); + if (pcc->parent_phases.hold) { + pcc->parent_phases.hold(obj); + } msr = (target_ulong)0; msr |= (target_ulong)MSR_HVB; @@ -7267,6 +7269,7 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data) PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); device_class_set_parent_realize(dc, ppc_cpu_realize, &pcc->parent_realize); @@ -7275,7 +7278,8 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data) pcc->pvr_match = ppc_pvr_match_default; device_class_set_props(dc, ppc_cpu_properties); - device_class_set_parent_reset(dc, ppc_cpu_reset, &pcc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, ppc_cpu_reset_hold, NULL, + &pcc->parent_phases); cc->class_by_name = ppc_cpu_class_by_name; cc->has_work = ppc_cpu_has_work; diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index d14e95c9dc..6fe176e483 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -519,18 +519,20 @@ static void riscv_restore_state_to_opc(CPUState *cs, env->bins = data[1]; } -static void riscv_cpu_reset(DeviceState *dev) +static void riscv_cpu_reset_hold(Object *obj) { #ifndef CONFIG_USER_ONLY uint8_t iprio; int i, irq, rdzero; #endif - CPUState *cs = CPU(dev); + CPUState *cs = CPU(obj); RISCVCPU *cpu = RISCV_CPU(cs); RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); CPURISCVState *env = &cpu->env; - mcc->parent_reset(dev); + if (mcc->parent_phases.hold) { + mcc->parent_phases.hold(obj); + } #ifndef CONFIG_USER_ONLY env->misa_mxl = env->misa_mxl_max; env->priv = PRV_M; @@ -1161,11 +1163,13 @@ static void riscv_cpu_class_init(ObjectClass *c, void *data) RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); CPUClass *cc = CPU_CLASS(c); DeviceClass *dc = DEVICE_CLASS(c); + ResettableClass *rc = RESETTABLE_CLASS(c); device_class_set_parent_realize(dc, riscv_cpu_realize, &mcc->parent_realize); - device_class_set_parent_reset(dc, riscv_cpu_reset, &mcc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, + &mcc->parent_phases); cc->class_by_name = riscv_cpu_class_by_name; cc->has_work = riscv_cpu_has_work; diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 3a9e25053f..443d15a47c 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -395,7 +395,7 @@ OBJECT_DECLARE_CPU_TYPE(RISCVCPU, RISCVCPUClass, RISCV_CPU) /** * RISCVCPUClass: * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. + * @parent_phases: The parent class' reset phase handlers. * * A RISCV CPU model. */ @@ -404,7 +404,7 @@ struct RISCVCPUClass { CPUClass parent_class; /*< public >*/ DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; }; struct RISCVCPUConfig { diff --git a/target/riscv/debug.c b/target/riscv/debug.c index 26ea764407..e44848d0d7 100644 --- a/target/riscv/debug.c +++ b/target/riscv/debug.c @@ -243,15 +243,13 @@ static void do_trigger_action(CPURISCVState *env, target_ulong trigger_index) static uint32_t type2_breakpoint_size(CPURISCVState *env, target_ulong ctrl) { - uint32_t size, sizelo, sizehi = 0; + uint32_t sizelo, sizehi = 0; if (riscv_cpu_mxl(env) == MXL_RV64) { sizehi = extract32(ctrl, 21, 2); } sizelo = extract32(ctrl, 16, 2); - size = (sizehi << 2) | sizelo; - - return size; + return (sizehi << 2) | sizelo; } static inline bool type2_breakpoint_enabled(target_ulong ctrl) diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index 0020b9a95d..00de879787 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -2791,31 +2791,25 @@ static inline uint16_t vssrl16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b) { uint8_t round, shift = b & 0xf; - uint16_t res; round = get_round(vxrm, a, shift); - res = (a >> shift) + round; - return res; + return (a >> shift) + round; } static inline uint32_t vssrl32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b) { uint8_t round, shift = b & 0x1f; - uint32_t res; round = get_round(vxrm, a, shift); - res = (a >> shift) + round; - return res; + return (a >> shift) + round; } static inline uint64_t vssrl64(CPURISCVState *env, int vxrm, uint64_t a, uint64_t b) { uint8_t round, shift = b & 0x3f; - uint64_t res; round = get_round(vxrm, a, shift); - res = (a >> shift) + round; - return res; + return (a >> shift) + round; } RVVCALL(OPIVV2_RM, vssrl_vv_b, OP_UUU_B, H1, H1, H1, vssrl8) RVVCALL(OPIVV2_RM, vssrl_vv_h, OP_UUU_H, H2, H2, H2, vssrl16) @@ -2839,41 +2833,33 @@ static inline int8_t vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) { uint8_t round, shift = b & 0x7; - int8_t res; round = get_round(vxrm, a, shift); - res = (a >> shift) + round; - return res; + return (a >> shift) + round; } static inline int16_t vssra16(CPURISCVState *env, int vxrm, int16_t a, int16_t b) { uint8_t round, shift = b & 0xf; - int16_t res; round = get_round(vxrm, a, shift); - res = (a >> shift) + round; - return res; + return (a >> shift) + round; } static inline int32_t vssra32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) { uint8_t round, shift = b & 0x1f; - int32_t res; round = get_round(vxrm, a, shift); - res = (a >> shift) + round; - return res; + return (a >> shift) + round; } static inline int64_t vssra64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) { uint8_t round, shift = b & 0x3f; - int64_t res; round = get_round(vxrm, a, shift); - res = (a >> shift) + round; - return res; + return (a >> shift) + round; } RVVCALL(OPIVV2_RM, vssra_vv_b, OP_SSS_B, H1, H1, H1, vssra8) diff --git a/target/rx/cpu-qom.h b/target/rx/cpu-qom.h index 4533759d96..1c8466a187 100644 --- a/target/rx/cpu-qom.h +++ b/target/rx/cpu-qom.h @@ -31,7 +31,7 @@ OBJECT_DECLARE_CPU_TYPE(RXCPU, RXCPUClass, RX_CPU) /* * RXCPUClass: * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. + * @parent_phases: The parent class' reset phase handlers. * * A RX CPU model. */ @@ -41,7 +41,7 @@ struct RXCPUClass { /*< public >*/ DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; }; #endif diff --git a/target/rx/cpu.c b/target/rx/cpu.c index 9003c6e9fe..219ef28e46 100644 --- a/target/rx/cpu.c +++ b/target/rx/cpu.c @@ -62,14 +62,16 @@ static bool rx_cpu_has_work(CPUState *cs) (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR); } -static void rx_cpu_reset(DeviceState *dev) +static void rx_cpu_reset_hold(Object *obj) { - RXCPU *cpu = RX_CPU(dev); + RXCPU *cpu = RX_CPU(obj); RXCPUClass *rcc = RX_CPU_GET_CLASS(cpu); CPURXState *env = &cpu->env; uint32_t *resetvec; - rcc->parent_reset(dev); + if (rcc->parent_phases.hold) { + rcc->parent_phases.hold(obj); + } memset(env, 0, offsetof(CPURXState, end_reset_fields)); @@ -215,11 +217,12 @@ static void rx_cpu_class_init(ObjectClass *klass, void *data) DeviceClass *dc = DEVICE_CLASS(klass); CPUClass *cc = CPU_CLASS(klass); RXCPUClass *rcc = RX_CPU_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); device_class_set_parent_realize(dc, rx_cpu_realize, &rcc->parent_realize); - device_class_set_parent_reset(dc, rx_cpu_reset, - &rcc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, rx_cpu_reset_hold, NULL, + &rcc->parent_phases); cc->class_by_name = rx_cpu_class_by_name; cc->has_work = rx_cpu_has_work; diff --git a/target/s390x/cpu_models_sysemu.c b/target/s390x/cpu_models_sysemu.c index d8a141a023..63981bf36b 100644 --- a/target/s390x/cpu_models_sysemu.c +++ b/target/s390x/cpu_models_sysemu.c @@ -210,7 +210,6 @@ static void cpu_info_from_model(CpuModelInfo *info, const S390CPUModel *model, qobject_unref(qdict); } else { info->props = QOBJECT(qdict); - info->has_props = true; } } diff --git a/target/s390x/helper.h b/target/s390x/helper.h index bf33d86f74..93923ca153 100644 --- a/target/s390x/helper.h +++ b/target/s390x/helper.h @@ -353,8 +353,8 @@ DEF_HELPER_FLAGS_3(tprot, TCG_CALL_NO_WG, i32, env, i64, i64) DEF_HELPER_2(iske, i64, env, i64) DEF_HELPER_3(sske, void, env, i64, i64) DEF_HELPER_2(rrbe, i32, env, i64) -DEF_HELPER_4(mvcs, i32, env, i64, i64, i64) -DEF_HELPER_4(mvcp, i32, env, i64, i64, i64) +DEF_HELPER_5(mvcs, i32, env, i64, i64, i64, i64) +DEF_HELPER_5(mvcp, i32, env, i64, i64, i64, i64) DEF_HELPER_4(sigp, i32, env, i64, i32, i32) DEF_HELPER_FLAGS_2(sacf, TCG_CALL_NO_WG, void, env, i64) DEF_HELPER_FLAGS_4(idte, TCG_CALL_NO_RWG, void, env, i64, i64, i32) diff --git a/target/s390x/tcg/insn-data.h.inc b/target/s390x/tcg/insn-data.h.inc index 54d4250c9f..79c6ab509a 100644 --- a/target/s390x/tcg/insn-data.h.inc +++ b/target/s390x/tcg/insn-data.h.inc @@ -1355,9 +1355,9 @@ E(0xb24b, LURA, RRE, Z, 0, ra2, new, r1_32, lura, 0, MO_TEUL, IF_PRIV) E(0xb905, LURAG, RRE, Z, 0, ra2, r1, 0, lura, 0, MO_TEUQ, IF_PRIV) /* MOVE TO PRIMARY */ - F(0xda00, MVCP, SS_d, Z, la1, a2, 0, 0, mvcp, 0, IF_PRIV) + C(0xda00, MVCP, SS_d, Z, la1, a2, 0, 0, mvcp, 0) /* MOVE TO SECONDARY */ - F(0xdb00, MVCS, SS_d, Z, la1, a2, 0, 0, mvcs, 0, IF_PRIV) + C(0xdb00, MVCS, SS_d, Z, la1, a2, 0, 0, mvcs, 0) /* PURGE TLB */ F(0xb20d, PTLB, S, Z, 0, 0, 0, 0, ptlb, 0, IF_PRIV) /* RESET REFERENCE BIT EXTENDED */ diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c index 3758b9e688..cb82cd1c1d 100644 --- a/target/s390x/tcg/mem_helper.c +++ b/target/s390x/tcg/mem_helper.c @@ -51,7 +51,7 @@ static inline bool psw_key_valid(CPUS390XState *env, uint8_t psw_key) if (env->psw.mask & PSW_MASK_PSTATE) { /* PSW key has range 0..15, it is valid if the bit is 1 in the PKM */ - return pkm & (0x80 >> psw_key); + return pkm & (0x8000 >> psw_key); } return true; } @@ -2295,7 +2295,8 @@ uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2) return re >> 1; } -uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2) +uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2, + uint64_t key) { const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC; S390Access srca, desta; @@ -2310,6 +2311,10 @@ uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2) s390_program_interrupt(env, PGM_SPECIAL_OP, ra); } + if (!psw_key_valid(env, (key >> 4) & 0xf)) { + s390_program_interrupt(env, PGM_PRIVILEGED, ra); + } + l = wrap_length32(env, l); if (l > 256) { /* max 256 */ @@ -2319,14 +2324,14 @@ uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2) return cc; } - /* TODO: Access key handling */ srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_PRIMARY_IDX, ra); desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_SECONDARY_IDX, ra); access_memmove(env, &desta, &srca, ra); return cc; } -uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2) +uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2, + uint64_t key) { const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC; S390Access srca, desta; @@ -2341,6 +2346,10 @@ uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2) s390_program_interrupt(env, PGM_SPECIAL_OP, ra); } + if (!psw_key_valid(env, (key >> 4) & 0xf)) { + s390_program_interrupt(env, PGM_PRIVILEGED, ra); + } + l = wrap_length32(env, l); if (l > 256) { /* max 256 */ @@ -2350,7 +2359,6 @@ uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2) return cc; } - /* TODO: Access key handling */ srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_SECONDARY_IDX, ra); desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_PRIMARY_IDX, ra); access_memmove(env, &desta, &srca, ra); diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c index 1e599ac259..a339b277e9 100644 --- a/target/s390x/tcg/translate.c +++ b/target/s390x/tcg/translate.c @@ -3476,7 +3476,8 @@ static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o) static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o) { int r1 = get_field(s, l1); - gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2); + int r3 = get_field(s, r3); + gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]); set_cc_static(s); return DISAS_NEXT; } @@ -3484,7 +3485,8 @@ static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o) static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o) { int r1 = get_field(s, l1); - gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2); + int r3 = get_field(s, r3); + gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]); set_cc_static(s); return DISAS_NEXT; } diff --git a/target/sh4/cpu-qom.h b/target/sh4/cpu-qom.h index d4192d1090..89785a90f0 100644 --- a/target/sh4/cpu-qom.h +++ b/target/sh4/cpu-qom.h @@ -34,7 +34,7 @@ OBJECT_DECLARE_CPU_TYPE(SuperHCPU, SuperHCPUClass, SUPERH_CPU) /** * SuperHCPUClass: * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. + * @parent_phases: The parent class' reset phase handlers. * @pvr: Processor Version Register * @prr: Processor Revision Register * @cvr: Cache Version Register @@ -47,7 +47,7 @@ struct SuperHCPUClass { /*< public >*/ DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; uint32_t pvr; uint32_t prr; diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c index 453268392b..951eb6b9c8 100644 --- a/target/sh4/cpu.c +++ b/target/sh4/cpu.c @@ -87,14 +87,16 @@ static bool superh_cpu_has_work(CPUState *cs) return cs->interrupt_request & CPU_INTERRUPT_HARD; } -static void superh_cpu_reset(DeviceState *dev) +static void superh_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(dev); + CPUState *s = CPU(obj); SuperHCPU *cpu = SUPERH_CPU(s); SuperHCPUClass *scc = SUPERH_CPU_GET_CLASS(cpu); CPUSH4State *env = &cpu->env; - scc->parent_reset(dev); + if (scc->parent_phases.hold) { + scc->parent_phases.hold(obj); + } memset(env, 0, offsetof(CPUSH4State, end_reset_fields)); @@ -274,11 +276,13 @@ static void superh_cpu_class_init(ObjectClass *oc, void *data) DeviceClass *dc = DEVICE_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); SuperHCPUClass *scc = SUPERH_CPU_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); device_class_set_parent_realize(dc, superh_cpu_realizefn, &scc->parent_realize); - device_class_set_parent_reset(dc, superh_cpu_reset, &scc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, superh_cpu_reset_hold, NULL, + &scc->parent_phases); cc->class_by_name = superh_cpu_class_by_name; cc->has_work = superh_cpu_has_work; diff --git a/target/sparc/cpu-qom.h b/target/sparc/cpu-qom.h index 86ed37d933..78bf00b9a2 100644 --- a/target/sparc/cpu-qom.h +++ b/target/sparc/cpu-qom.h @@ -35,7 +35,7 @@ typedef struct sparc_def_t sparc_def_t; /** * SPARCCPUClass: * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. + * @parent_phases: The parent class' reset phase handlers. * * A SPARC CPU model. */ @@ -45,7 +45,7 @@ struct SPARCCPUClass { /*< public >*/ DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; sparc_def_t *cpu_def; }; diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c index 4c3d08a875..1734ef8dc6 100644 --- a/target/sparc/cpu.c +++ b/target/sparc/cpu.c @@ -28,14 +28,16 @@ //#define DEBUG_FEATURES -static void sparc_cpu_reset(DeviceState *dev) +static void sparc_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(dev); + CPUState *s = CPU(obj); SPARCCPU *cpu = SPARC_CPU(s); SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(cpu); CPUSPARCState *env = &cpu->env; - scc->parent_reset(dev); + if (scc->parent_phases.hold) { + scc->parent_phases.hold(obj); + } memset(env, 0, offsetof(CPUSPARCState, end_reset_fields)); env->cwp = 0; @@ -889,12 +891,14 @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data) SPARCCPUClass *scc = SPARC_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); device_class_set_parent_realize(dc, sparc_cpu_realizefn, &scc->parent_realize); device_class_set_props(dc, sparc_cpu_properties); - device_class_set_parent_reset(dc, sparc_cpu_reset, &scc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, sparc_cpu_reset_hold, NULL, + &scc->parent_phases); cc->class_by_name = sparc_cpu_class_by_name; cc->parse_features = sparc_cpu_parse_features; diff --git a/target/tricore/cpu-qom.h b/target/tricore/cpu-qom.h index ee24e9fa76..612731daa0 100644 --- a/target/tricore/cpu-qom.h +++ b/target/tricore/cpu-qom.h @@ -32,7 +32,7 @@ struct TriCoreCPUClass { /*< public >*/ DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; }; diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c index 2c54a2825f..594cd1efd5 100644 --- a/target/tricore/cpu.c +++ b/target/tricore/cpu.c @@ -68,14 +68,16 @@ static void tricore_restore_state_to_opc(CPUState *cs, env->PC = data[0]; } -static void tricore_cpu_reset(DeviceState *dev) +static void tricore_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(dev); + CPUState *s = CPU(obj); TriCoreCPU *cpu = TRICORE_CPU(s); TriCoreCPUClass *tcc = TRICORE_CPU_GET_CLASS(cpu); CPUTriCoreState *env = &cpu->env; - tcc->parent_reset(dev); + if (tcc->parent_phases.hold) { + tcc->parent_phases.hold(obj); + } cpu_state_reset(env); } @@ -180,11 +182,13 @@ static void tricore_cpu_class_init(ObjectClass *c, void *data) TriCoreCPUClass *mcc = TRICORE_CPU_CLASS(c); CPUClass *cc = CPU_CLASS(c); DeviceClass *dc = DEVICE_CLASS(c); + ResettableClass *rc = RESETTABLE_CLASS(c); device_class_set_parent_realize(dc, tricore_cpu_realizefn, &mcc->parent_realize); - device_class_set_parent_reset(dc, tricore_cpu_reset, &mcc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, tricore_cpu_reset_hold, NULL, + &mcc->parent_phases); cc->class_by_name = tricore_cpu_class_by_name; cc->has_work = tricore_cpu_has_work; diff --git a/target/xtensa/cpu-qom.h b/target/xtensa/cpu-qom.h index 4fc35ee49b..419c7d8e4a 100644 --- a/target/xtensa/cpu-qom.h +++ b/target/xtensa/cpu-qom.h @@ -41,7 +41,7 @@ typedef struct XtensaConfig XtensaConfig; /** * XtensaCPUClass: * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. + * @parent_phases: The parent class' reset phase handlers. * @config: The CPU core configuration. * * An Xtensa CPU model. @@ -52,7 +52,7 @@ struct XtensaCPUClass { /*< public >*/ DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; const XtensaConfig *config; }; diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c index 09923301c4..2dc8f2d232 100644 --- a/target/xtensa/cpu.c +++ b/target/xtensa/cpu.c @@ -85,16 +85,18 @@ bool xtensa_abi_call0(void) } #endif -static void xtensa_cpu_reset(DeviceState *dev) +static void xtensa_cpu_reset_hold(Object *obj) { - CPUState *s = CPU(dev); + CPUState *s = CPU(obj); XtensaCPU *cpu = XTENSA_CPU(s); XtensaCPUClass *xcc = XTENSA_CPU_GET_CLASS(cpu); CPUXtensaState *env = &cpu->env; bool dfpu = xtensa_option_enabled(env->config, XTENSA_OPTION_DFP_COPROCESSOR); - xcc->parent_reset(dev); + if (xcc->parent_phases.hold) { + xcc->parent_phases.hold(obj); + } env->pc = env->config->exception_vector[EXC_RESET0 + env->static_vectors]; env->sregs[LITBASE] &= ~1; @@ -240,11 +242,13 @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data) DeviceClass *dc = DEVICE_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); XtensaCPUClass *xcc = XTENSA_CPU_CLASS(cc); + ResettableClass *rc = RESETTABLE_CLASS(oc); device_class_set_parent_realize(dc, xtensa_cpu_realizefn, &xcc->parent_realize); - device_class_set_parent_reset(dc, xtensa_cpu_reset, &xcc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, xtensa_cpu_reset_hold, NULL, + &xcc->parent_phases); cc->class_by_name = xtensa_cpu_class_by_name; cc->has_work = xtensa_cpu_has_work; diff --git a/tests/bench/benchmark-crypto-akcipher.c b/tests/bench/benchmark-crypto-akcipher.c index 15e69557ed..5e68cb0a1c 100644 --- a/tests/bench/benchmark-crypto-akcipher.c +++ b/tests/bench/benchmark-crypto-akcipher.c @@ -24,14 +24,12 @@ static QCryptoAkCipher *create_rsa_akcipher(const uint8_t *priv_key, QCryptoHashAlgorithm hash) { QCryptoAkCipherOptions opt; - QCryptoAkCipher *rsa; opt.alg = QCRYPTO_AKCIPHER_ALG_RSA; opt.u.rsa.padding_alg = padding; opt.u.rsa.hash_alg = hash; - rsa = qcrypto_akcipher_new(&opt, QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, - priv_key, keylen, &error_abort); - return rsa; + return qcrypto_akcipher_new(&opt, QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, + priv_key, keylen, &error_abort); } static void test_rsa_speed(const uint8_t *priv_key, size_t keylen, diff --git a/tests/qtest/e1000e-test.c b/tests/qtest/e1000e-test.c index 08adc5226d..3fc92046be 100644 --- a/tests/qtest/e1000e-test.c +++ b/tests/qtest/e1000e-test.c @@ -37,15 +37,15 @@ static void e1000e_send_verify(QE1000E *d, int *test_sockets, QGuestAllocator *alloc) { + static const char test[] = "TEST"; struct e1000_tx_desc descr; - static const int data_len = 64; char buffer[64]; int ret; uint32_t recv_len; /* Prepare test data buffer */ - uint64_t data = guest_alloc(alloc, data_len); - memwrite(data, "TEST", 5); + uint64_t data = guest_alloc(alloc, sizeof(buffer)); + memwrite(data, test, sizeof(test)); /* Prepare TX descriptor */ memset(&descr, 0, sizeof(descr)); @@ -54,7 +54,7 @@ static void e1000e_send_verify(QE1000E *d, int *test_sockets, QGuestAllocator *a E1000_TXD_CMD_EOP | E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | - data_len); + sizeof(buffer)); /* Put descriptor to the ring */ e1000e_tx_ring_push(d, &descr); @@ -69,9 +69,9 @@ static void e1000e_send_verify(QE1000E *d, int *test_sockets, QGuestAllocator *a /* Check data sent to the backend */ ret = recv(test_sockets[0], &recv_len, sizeof(recv_len), 0); g_assert_cmpint(ret, == , sizeof(recv_len)); - ret = recv(test_sockets[0], buffer, 64, 0); - g_assert_cmpint(ret, >=, 5); - g_assert_cmpstr(buffer, == , "TEST"); + ret = recv(test_sockets[0], buffer, sizeof(buffer), 0); + g_assert_cmpint(ret, ==, sizeof(buffer)); + g_assert_cmpstr(buffer, == , test); /* Free test data buffer */ guest_free(alloc, data); @@ -93,7 +93,6 @@ static void e1000e_receive_verify(QE1000E *d, int *test_sockets, QGuestAllocator }, }; - static const int data_len = 64; char buffer[64]; int ret; @@ -102,7 +101,7 @@ static void e1000e_receive_verify(QE1000E *d, int *test_sockets, QGuestAllocator g_assert_cmpint(ret, == , sizeof(test) + sizeof(len)); /* Prepare test data buffer */ - uint64_t data = guest_alloc(alloc, data_len); + uint64_t data = guest_alloc(alloc, sizeof(buffer)); /* Prepare RX descriptor */ memset(&descr, 0, sizeof(descr)); @@ -120,7 +119,7 @@ static void e1000e_receive_verify(QE1000E *d, int *test_sockets, QGuestAllocator /* Check data sent to the backend */ memread(data, buffer, sizeof(buffer)); - g_assert_cmpstr(buffer, == , "TEST"); + g_assert_cmpstr(buffer, == , test); /* Free test data buffer */ guest_free(alloc, data); diff --git a/tests/qtest/erst-test.c b/tests/qtest/erst-test.c index 4e768a126f..974e8bcfe5 100644 --- a/tests/qtest/erst-test.c +++ b/tests/qtest/erst-test.c @@ -154,10 +154,7 @@ static void test_acpi_erst_basic(void) int main(int argc, char **argv) { - int ret; - g_test_init(&argc, &argv, NULL); qtest_add_func("/acpi-erst/basic", test_acpi_erst_basic); - ret = g_test_run(); - return ret; + return g_test_run(); } diff --git a/tests/qtest/fuzz/qos_fuzz.c b/tests/qtest/fuzz/qos_fuzz.c index 3a3d9c16dd..e403d373a0 100644 --- a/tests/qtest/fuzz/qos_fuzz.c +++ b/tests/qtest/fuzz/qos_fuzz.c @@ -50,8 +50,7 @@ static void qos_set_machines_devices_available(void) machines_apply_to_node(mach_info); qapi_free_MachineInfoList(mach_info); - type_info = qmp_qom_list_types(true, "device", true, true, - &error_abort); + type_info = qmp_qom_list_types("device", true, true, &error_abort); types_apply_to_node(type_info); qapi_free_ObjectTypeInfoList(type_info); } diff --git a/tests/qtest/hexloader-test.c b/tests/qtest/hexloader-test.c index 8b7aa2d72d..3023548041 100644 --- a/tests/qtest/hexloader-test.c +++ b/tests/qtest/hexloader-test.c @@ -34,12 +34,8 @@ static void hex_loader_test(void) int main(int argc, char **argv) { - int ret; - g_test_init(&argc, &argv, NULL); qtest_add_func("/tmp/hex_loader", hex_loader_test); - ret = g_test_run(); - - return ret; + return g_test_run(); } diff --git a/tests/qtest/libqos/e1000e.c b/tests/qtest/libqos/e1000e.c index 80b3e3db90..37c794b130 100644 --- a/tests/qtest/libqos/e1000e.c +++ b/tests/qtest/libqos/e1000e.c @@ -32,7 +32,6 @@ #define E1000E_IVAR_TEST_CFG \ (((E1000E_RX0_MSG_ID | E1000_IVAR_INT_ALLOC_VALID) << E1000_IVAR_RXQ0_SHIFT) | \ ((E1000E_TX0_MSG_ID | E1000_IVAR_INT_ALLOC_VALID) << E1000_IVAR_TXQ0_SHIFT) | \ - ((E1000E_OTHER_MSG_ID | E1000_IVAR_INT_ALLOC_VALID) << E1000_IVAR_OTHER_SHIFT) | \ E1000_IVAR_TX_INT_EVERY_WB) #define E1000E_RING_LEN (0x1000) @@ -152,6 +151,7 @@ static void e1000e_pci_start_hw(QOSGraphObject *obj) /* Enable transmit */ e1000e_macreg_write(&d->e1000e, E1000_TCTL, E1000_TCTL_EN); + e1000e_macreg_write(&d->e1000e, E1000_RDBAL, (uint32_t)d->e1000e.rx_ring); e1000e_macreg_write(&d->e1000e, E1000_RDBAH, diff --git a/tests/qtest/libqos/e1000e.h b/tests/qtest/libqos/e1000e.h index a22f5fdbad..3bf285af42 100644 --- a/tests/qtest/libqos/e1000e.h +++ b/tests/qtest/libqos/e1000e.h @@ -24,7 +24,6 @@ #define E1000E_RX0_MSG_ID (0) #define E1000E_TX0_MSG_ID (1) -#define E1000E_OTHER_MSG_ID (2) #define E1000E_TDLEN (0x3808) #define E1000E_TDT (0x3818) diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build index c07a5b1a5f..f0ebb5fac6 100644 --- a/tests/qtest/meson.build +++ b/tests/qtest/meson.build @@ -1,9 +1,3 @@ -# All QTests for now are POSIX-only, but the dependencies are -# really in libqtest, not in the testcases themselves. -if not config_host.has_key('CONFIG_POSIX') - subdir_done() -endif - slow_qtests = { 'ahci-test' : 60, 'bios-tables-test' : 120, diff --git a/tests/qtest/pvpanic-pci-test.c b/tests/qtest/pvpanic-pci-test.c index c82c365c26..2c05b376ba 100644 --- a/tests/qtest/pvpanic-pci-test.c +++ b/tests/qtest/pvpanic-pci-test.c @@ -86,13 +86,9 @@ static void test_panic(void) int main(int argc, char **argv) { - int ret; - g_test_init(&argc, &argv, NULL); qtest_add_func("/pvpanic-pci/panic", test_panic); qtest_add_func("/pvpanic-pci/panic-nopause", test_panic_nopause); - ret = g_test_run(); - - return ret; + return g_test_run(); } diff --git a/tests/qtest/pvpanic-test.c b/tests/qtest/pvpanic-test.c index bc7b7dfc39..78f1cf8186 100644 --- a/tests/qtest/pvpanic-test.c +++ b/tests/qtest/pvpanic-test.c @@ -59,13 +59,9 @@ static void test_panic(void) int main(int argc, char **argv) { - int ret; - g_test_init(&argc, &argv, NULL); qtest_add_func("/pvpanic/panic", test_panic); qtest_add_func("/pvpanic/panic-nopause", test_panic_nopause); - ret = g_test_run(); - - return ret; + return g_test_run(); } diff --git a/tests/qtest/qmp-cmd-test.c b/tests/qtest/qmp-cmd-test.c index 897e4e937b..98caf6fef6 100644 --- a/tests/qtest/qmp-cmd-test.c +++ b/tests/qtest/qmp-cmd-test.c @@ -174,7 +174,7 @@ static bool object_type_has_mandatory_members(SchemaInfo *type) g_assert(type->meta_type == SCHEMA_META_TYPE_OBJECT); for (tail = type->u.object.members; tail; tail = tail->next) { - if (!tail->value->has_q_default) { + if (!tail->value->q_default) { return true; } } diff --git a/tests/qtest/test-filter-mirror.c b/tests/qtest/test-filter-mirror.c index c8b0a92b53..248fc88699 100644 --- a/tests/qtest/test-filter-mirror.c +++ b/tests/qtest/test-filter-mirror.c @@ -76,12 +76,8 @@ static void test_mirror(void) int main(int argc, char **argv) { - int ret; - g_test_init(&argc, &argv, NULL); qtest_add_func("/netfilter/mirror", test_mirror); - ret = g_test_run(); - - return ret; + return g_test_run(); } diff --git a/tests/qtest/vhost-user-blk-test.c b/tests/qtest/vhost-user-blk-test.c index 07a4c2d500..dc37f5af4d 100644 --- a/tests/qtest/vhost-user-blk-test.c +++ b/tests/qtest/vhost-user-blk-test.c @@ -983,6 +983,12 @@ static void register_vhost_user_blk_test(void) .before = vhost_user_blk_test_setup, }; + if (!getenv("QTEST_QEMU_STORAGE_DAEMON_BINARY")) { + g_test_message("QTEST_QEMU_STORAGE_DAEMON_BINARY not defined, " + "skipping vhost-user-blk-test"); + return; + } + /* * tests for vhost-user-blk and vhost-user-blk-pci * The tests are borrowed from tests/virtio-blk-test.c. But some tests diff --git a/tests/qtest/virtio-ccw-test.c b/tests/qtest/virtio-ccw-test.c index d05236407b..2de77bb6fe 100644 --- a/tests/qtest/virtio-ccw-test.c +++ b/tests/qtest/virtio-ccw-test.c @@ -95,8 +95,6 @@ static void virtio_scsi_hotplug(void) int main(int argc, char **argv) { - int ret; - g_test_init(&argc, &argv, NULL); qtest_add_func("/virtio/balloon/nop", virtio_balloon_nop); qtest_add_func("/virtio/console/nop", virtconsole_nop); @@ -109,7 +107,5 @@ int main(int argc, char **argv) qtest_add_func("/virtio/scsi/nop", virtio_scsi_nop); qtest_add_func("/virtio/scsi/hotplug", virtio_scsi_hotplug); - ret = g_test_run(); - - return ret; + return g_test_run(); } diff --git a/tests/tcg/aarch64/system/semiheap.c b/tests/tcg/aarch64/system/semiheap.c index a254bd8982..693a1b037d 100644 --- a/tests/tcg/aarch64/system/semiheap.c +++ b/tests/tcg/aarch64/system/semiheap.c @@ -73,11 +73,11 @@ int main(int argc, char *argv[argc]) ml_printf("stack: %p <- %p\n", info.stack_limit, info.stack_base); /* finally can we read/write the heap */ - ptr_to_heap = (uint32_t *) info.heap_base; + ptr_to_heap = info.heap_base; for (i = 0; i < 512; i++) { *ptr_to_heap++ = i; } - ptr_to_heap = (uint32_t *) info.heap_base; + ptr_to_heap = info.heap_base; for (i = 0; i < 512; i++) { uint32_t tmp = *ptr_to_heap; if (tmp != i) { diff --git a/tests/tcg/multiarch/sha512.c b/tests/tcg/multiarch/sha512.c index e1729828b9..9e701bcf20 100644 --- a/tests/tcg/multiarch/sha512.c +++ b/tests/tcg/multiarch/sha512.c @@ -855,8 +855,6 @@ plan_tests(unsigned int tests) static int exit_status_(void) { - int r; - /* If there's no plan, just return the number of failures */ if(no_plan || !have_plan) { return failures; @@ -865,15 +863,12 @@ exit_status_(void) /* Ran too many tests? Return the number of tests that were run that shouldn't have been */ if(e_tests < test_count) { - r = test_count - e_tests; - return r; + return test_count - e_tests; } /* Return the number of tests that failed + the number of tests that weren't run */ - r = failures + e_tests - test_count; - - return r; + return failures + e_tests - test_count; } int diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c index 09dc4a4891..8cedea4959 100644 --- a/tests/unit/test-bdrv-drain.c +++ b/tests/unit/test-bdrv-drain.c @@ -38,16 +38,26 @@ typedef struct BDRVTestState { bool sleep_in_drain_begin; } BDRVTestState; -static void coroutine_fn bdrv_test_co_drain_begin(BlockDriverState *bs) +static void coroutine_fn sleep_in_drain_begin(void *opaque) +{ + BlockDriverState *bs = opaque; + + qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000); + bdrv_dec_in_flight(bs); +} + +static void bdrv_test_drain_begin(BlockDriverState *bs) { BDRVTestState *s = bs->opaque; s->drain_count++; if (s->sleep_in_drain_begin) { - qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000); + Coroutine *co = qemu_coroutine_create(sleep_in_drain_begin, bs); + bdrv_inc_in_flight(bs); + aio_co_enter(bdrv_get_aio_context(bs), co); } } -static void coroutine_fn bdrv_test_co_drain_end(BlockDriverState *bs) +static void bdrv_test_drain_end(BlockDriverState *bs) { BDRVTestState *s = bs->opaque; s->drain_count--; @@ -101,8 +111,8 @@ static BlockDriver bdrv_test = { .bdrv_close = bdrv_test_close, .bdrv_co_preadv = bdrv_test_co_preadv, - .bdrv_co_drain_begin = bdrv_test_co_drain_begin, - .bdrv_co_drain_end = bdrv_test_co_drain_end, + .bdrv_drain_begin = bdrv_test_drain_begin, + .bdrv_drain_end = bdrv_test_drain_end, .bdrv_child_perm = bdrv_default_perms, @@ -146,7 +156,6 @@ static void call_in_coroutine(void (*entry)(void)) enum drain_type { BDRV_DRAIN_ALL, BDRV_DRAIN, - BDRV_SUBTREE_DRAIN, DRAIN_TYPE_MAX, }; @@ -155,7 +164,6 @@ static void do_drain_begin(enum drain_type drain_type, BlockDriverState *bs) switch (drain_type) { case BDRV_DRAIN_ALL: bdrv_drain_all_begin(); break; case BDRV_DRAIN: bdrv_drained_begin(bs); break; - case BDRV_SUBTREE_DRAIN: bdrv_subtree_drained_begin(bs); break; default: g_assert_not_reached(); } } @@ -165,7 +173,6 @@ static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs) switch (drain_type) { case BDRV_DRAIN_ALL: bdrv_drain_all_end(); break; case BDRV_DRAIN: bdrv_drained_end(bs); break; - case BDRV_SUBTREE_DRAIN: bdrv_subtree_drained_end(bs); break; default: g_assert_not_reached(); } } @@ -261,11 +268,6 @@ static void test_drv_cb_drain(void) test_drv_cb_common(BDRV_DRAIN, false); } -static void test_drv_cb_drain_subtree(void) -{ - test_drv_cb_common(BDRV_SUBTREE_DRAIN, true); -} - static void test_drv_cb_co_drain_all(void) { call_in_coroutine(test_drv_cb_drain_all); @@ -276,11 +278,6 @@ static void test_drv_cb_co_drain(void) call_in_coroutine(test_drv_cb_drain); } -static void test_drv_cb_co_drain_subtree(void) -{ - call_in_coroutine(test_drv_cb_drain_subtree); -} - static void test_quiesce_common(enum drain_type drain_type, bool recursive) { BlockBackend *blk; @@ -299,7 +296,11 @@ static void test_quiesce_common(enum drain_type drain_type, bool recursive) do_drain_begin(drain_type, bs); - g_assert_cmpint(bs->quiesce_counter, ==, 1); + if (drain_type == BDRV_DRAIN_ALL) { + g_assert_cmpint(bs->quiesce_counter, ==, 2); + } else { + g_assert_cmpint(bs->quiesce_counter, ==, 1); + } g_assert_cmpint(backing->quiesce_counter, ==, !!recursive); do_drain_end(drain_type, bs); @@ -322,11 +323,6 @@ static void test_quiesce_drain(void) test_quiesce_common(BDRV_DRAIN, false); } -static void test_quiesce_drain_subtree(void) -{ - test_quiesce_common(BDRV_SUBTREE_DRAIN, true); -} - static void test_quiesce_co_drain_all(void) { call_in_coroutine(test_quiesce_drain_all); @@ -337,11 +333,6 @@ static void test_quiesce_co_drain(void) call_in_coroutine(test_quiesce_drain); } -static void test_quiesce_co_drain_subtree(void) -{ - call_in_coroutine(test_quiesce_drain_subtree); -} - static void test_nested(void) { BlockBackend *blk; @@ -361,8 +352,8 @@ static void test_nested(void) for (outer = 0; outer < DRAIN_TYPE_MAX; outer++) { for (inner = 0; inner < DRAIN_TYPE_MAX; inner++) { - int backing_quiesce = (outer != BDRV_DRAIN) + - (inner != BDRV_DRAIN); + int backing_quiesce = (outer == BDRV_DRAIN_ALL) + + (inner == BDRV_DRAIN_ALL); g_assert_cmpint(bs->quiesce_counter, ==, 0); g_assert_cmpint(backing->quiesce_counter, ==, 0); @@ -372,10 +363,10 @@ static void test_nested(void) do_drain_begin(outer, bs); do_drain_begin(inner, bs); - g_assert_cmpint(bs->quiesce_counter, ==, 2); + g_assert_cmpint(bs->quiesce_counter, ==, 2 + !!backing_quiesce); g_assert_cmpint(backing->quiesce_counter, ==, backing_quiesce); - g_assert_cmpint(s->drain_count, ==, 2); - g_assert_cmpint(backing_s->drain_count, ==, backing_quiesce); + g_assert_cmpint(s->drain_count, ==, 1); + g_assert_cmpint(backing_s->drain_count, ==, !!backing_quiesce); do_drain_end(inner, bs); do_drain_end(outer, bs); @@ -392,158 +383,6 @@ static void test_nested(void) blk_unref(blk); } -static void test_multiparent(void) -{ - BlockBackend *blk_a, *blk_b; - BlockDriverState *bs_a, *bs_b, *backing; - BDRVTestState *a_s, *b_s, *backing_s; - - blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); - bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR, - &error_abort); - a_s = bs_a->opaque; - blk_insert_bs(blk_a, bs_a, &error_abort); - - blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); - bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR, - &error_abort); - b_s = bs_b->opaque; - blk_insert_bs(blk_b, bs_b, &error_abort); - - backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort); - backing_s = backing->opaque; - bdrv_set_backing_hd(bs_a, backing, &error_abort); - bdrv_set_backing_hd(bs_b, backing, &error_abort); - - g_assert_cmpint(bs_a->quiesce_counter, ==, 0); - g_assert_cmpint(bs_b->quiesce_counter, ==, 0); - g_assert_cmpint(backing->quiesce_counter, ==, 0); - g_assert_cmpint(a_s->drain_count, ==, 0); - g_assert_cmpint(b_s->drain_count, ==, 0); - g_assert_cmpint(backing_s->drain_count, ==, 0); - - do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a); - - g_assert_cmpint(bs_a->quiesce_counter, ==, 1); - g_assert_cmpint(bs_b->quiesce_counter, ==, 1); - g_assert_cmpint(backing->quiesce_counter, ==, 1); - g_assert_cmpint(a_s->drain_count, ==, 1); - g_assert_cmpint(b_s->drain_count, ==, 1); - g_assert_cmpint(backing_s->drain_count, ==, 1); - - do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b); - - g_assert_cmpint(bs_a->quiesce_counter, ==, 2); - g_assert_cmpint(bs_b->quiesce_counter, ==, 2); - g_assert_cmpint(backing->quiesce_counter, ==, 2); - g_assert_cmpint(a_s->drain_count, ==, 2); - g_assert_cmpint(b_s->drain_count, ==, 2); - g_assert_cmpint(backing_s->drain_count, ==, 2); - - do_drain_end(BDRV_SUBTREE_DRAIN, bs_b); - - g_assert_cmpint(bs_a->quiesce_counter, ==, 1); - g_assert_cmpint(bs_b->quiesce_counter, ==, 1); - g_assert_cmpint(backing->quiesce_counter, ==, 1); - g_assert_cmpint(a_s->drain_count, ==, 1); - g_assert_cmpint(b_s->drain_count, ==, 1); - g_assert_cmpint(backing_s->drain_count, ==, 1); - - do_drain_end(BDRV_SUBTREE_DRAIN, bs_a); - - g_assert_cmpint(bs_a->quiesce_counter, ==, 0); - g_assert_cmpint(bs_b->quiesce_counter, ==, 0); - g_assert_cmpint(backing->quiesce_counter, ==, 0); - g_assert_cmpint(a_s->drain_count, ==, 0); - g_assert_cmpint(b_s->drain_count, ==, 0); - g_assert_cmpint(backing_s->drain_count, ==, 0); - - bdrv_unref(backing); - bdrv_unref(bs_a); - bdrv_unref(bs_b); - blk_unref(blk_a); - blk_unref(blk_b); -} - -static void test_graph_change_drain_subtree(void) -{ - BlockBackend *blk_a, *blk_b; - BlockDriverState *bs_a, *bs_b, *backing; - BDRVTestState *a_s, *b_s, *backing_s; - - blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); - bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR, - &error_abort); - a_s = bs_a->opaque; - blk_insert_bs(blk_a, bs_a, &error_abort); - - blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); - bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR, - &error_abort); - b_s = bs_b->opaque; - blk_insert_bs(blk_b, bs_b, &error_abort); - - backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort); - backing_s = backing->opaque; - bdrv_set_backing_hd(bs_a, backing, &error_abort); - - g_assert_cmpint(bs_a->quiesce_counter, ==, 0); - g_assert_cmpint(bs_b->quiesce_counter, ==, 0); - g_assert_cmpint(backing->quiesce_counter, ==, 0); - g_assert_cmpint(a_s->drain_count, ==, 0); - g_assert_cmpint(b_s->drain_count, ==, 0); - g_assert_cmpint(backing_s->drain_count, ==, 0); - - do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a); - do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a); - do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a); - do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b); - do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b); - - bdrv_set_backing_hd(bs_b, backing, &error_abort); - g_assert_cmpint(bs_a->quiesce_counter, ==, 5); - g_assert_cmpint(bs_b->quiesce_counter, ==, 5); - g_assert_cmpint(backing->quiesce_counter, ==, 5); - g_assert_cmpint(a_s->drain_count, ==, 5); - g_assert_cmpint(b_s->drain_count, ==, 5); - g_assert_cmpint(backing_s->drain_count, ==, 5); - - bdrv_set_backing_hd(bs_b, NULL, &error_abort); - g_assert_cmpint(bs_a->quiesce_counter, ==, 3); - g_assert_cmpint(bs_b->quiesce_counter, ==, 2); - g_assert_cmpint(backing->quiesce_counter, ==, 3); - g_assert_cmpint(a_s->drain_count, ==, 3); - g_assert_cmpint(b_s->drain_count, ==, 2); - g_assert_cmpint(backing_s->drain_count, ==, 3); - - bdrv_set_backing_hd(bs_b, backing, &error_abort); - g_assert_cmpint(bs_a->quiesce_counter, ==, 5); - g_assert_cmpint(bs_b->quiesce_counter, ==, 5); - g_assert_cmpint(backing->quiesce_counter, ==, 5); - g_assert_cmpint(a_s->drain_count, ==, 5); - g_assert_cmpint(b_s->drain_count, ==, 5); - g_assert_cmpint(backing_s->drain_count, ==, 5); - - do_drain_end(BDRV_SUBTREE_DRAIN, bs_b); - do_drain_end(BDRV_SUBTREE_DRAIN, bs_b); - do_drain_end(BDRV_SUBTREE_DRAIN, bs_a); - do_drain_end(BDRV_SUBTREE_DRAIN, bs_a); - do_drain_end(BDRV_SUBTREE_DRAIN, bs_a); - - g_assert_cmpint(bs_a->quiesce_counter, ==, 0); - g_assert_cmpint(bs_b->quiesce_counter, ==, 0); - g_assert_cmpint(backing->quiesce_counter, ==, 0); - g_assert_cmpint(a_s->drain_count, ==, 0); - g_assert_cmpint(b_s->drain_count, ==, 0); - g_assert_cmpint(backing_s->drain_count, ==, 0); - - bdrv_unref(backing); - bdrv_unref(bs_a); - bdrv_unref(bs_b); - blk_unref(blk_a); - blk_unref(blk_b); -} - static void test_graph_change_drain_all(void) { BlockBackend *blk_a, *blk_b; @@ -763,12 +602,6 @@ static void test_iothread_drain(void) test_iothread_common(BDRV_DRAIN, 1); } -static void test_iothread_drain_subtree(void) -{ - test_iothread_common(BDRV_SUBTREE_DRAIN, 0); - test_iothread_common(BDRV_SUBTREE_DRAIN, 1); -} - typedef struct TestBlockJob { BlockJob common; @@ -853,7 +686,6 @@ enum test_job_result { enum test_job_drain_node { TEST_JOB_DRAIN_SRC, TEST_JOB_DRAIN_SRC_CHILD, - TEST_JOB_DRAIN_SRC_PARENT, }; static void test_blockjob_common_drain_node(enum drain_type drain_type, @@ -891,9 +723,6 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, case TEST_JOB_DRAIN_SRC_CHILD: drain_bs = src_backing; break; - case TEST_JOB_DRAIN_SRC_PARENT: - drain_bs = src_overlay; - break; default: g_assert_not_reached(); } @@ -1045,10 +874,6 @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread, TEST_JOB_DRAIN_SRC); test_blockjob_common_drain_node(drain_type, use_iothread, result, TEST_JOB_DRAIN_SRC_CHILD); - if (drain_type == BDRV_SUBTREE_DRAIN) { - test_blockjob_common_drain_node(drain_type, use_iothread, result, - TEST_JOB_DRAIN_SRC_PARENT); - } } static void test_blockjob_drain_all(void) @@ -1061,11 +886,6 @@ static void test_blockjob_drain(void) test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_SUCCESS); } -static void test_blockjob_drain_subtree(void) -{ - test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_SUCCESS); -} - static void test_blockjob_error_drain_all(void) { test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_RUN); @@ -1078,12 +898,6 @@ static void test_blockjob_error_drain(void) test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_PREPARE); } -static void test_blockjob_error_drain_subtree(void) -{ - test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_FAIL_RUN); - test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_FAIL_PREPARE); -} - static void test_blockjob_iothread_drain_all(void) { test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_SUCCESS); @@ -1094,11 +908,6 @@ static void test_blockjob_iothread_drain(void) test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_SUCCESS); } -static void test_blockjob_iothread_drain_subtree(void) -{ - test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_SUCCESS); -} - static void test_blockjob_iothread_error_drain_all(void) { test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_RUN); @@ -1111,12 +920,6 @@ static void test_blockjob_iothread_error_drain(void) test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_PREPARE); } -static void test_blockjob_iothread_error_drain_subtree(void) -{ - test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_FAIL_RUN); - test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_FAIL_PREPARE); -} - typedef struct BDRVTestTopState { BdrvChild *wait_child; @@ -1263,14 +1066,6 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete, bdrv_drain(child_bs); bdrv_unref(child_bs); break; - case BDRV_SUBTREE_DRAIN: - /* Would have to ref/unref bs here for !detach_instead_of_delete, but - * then the whole test becomes pointless because the graph changes - * don't occur during the drain any more. */ - assert(detach_instead_of_delete); - bdrv_subtree_drained_begin(bs); - bdrv_subtree_drained_end(bs); - break; case BDRV_DRAIN_ALL: bdrv_drain_all_begin(); bdrv_drain_all_end(); @@ -1305,11 +1100,6 @@ static void test_detach_by_drain(void) do_test_delete_by_drain(true, BDRV_DRAIN); } -static void test_detach_by_drain_subtree(void) -{ - do_test_delete_by_drain(true, BDRV_SUBTREE_DRAIN); -} - struct detach_by_parent_data { BlockDriverState *parent_b; @@ -1317,6 +1107,7 @@ struct detach_by_parent_data { BlockDriverState *c; BdrvChild *child_c; bool by_parent_cb; + bool detach_on_drain; }; static struct detach_by_parent_data detach_by_parent_data; @@ -1324,6 +1115,7 @@ static void detach_indirect_bh(void *opaque) { struct detach_by_parent_data *data = opaque; + bdrv_dec_in_flight(data->child_b->bs); bdrv_unref_child(data->parent_b, data->child_b); bdrv_ref(data->c); @@ -1338,12 +1130,21 @@ static void detach_by_parent_aio_cb(void *opaque, int ret) g_assert_cmpint(ret, ==, 0); if (data->by_parent_cb) { + bdrv_inc_in_flight(data->child_b->bs); detach_indirect_bh(data); } } static void detach_by_driver_cb_drained_begin(BdrvChild *child) { + struct detach_by_parent_data *data = &detach_by_parent_data; + + if (!data->detach_on_drain) { + return; + } + data->detach_on_drain = false; + + bdrv_inc_in_flight(data->child_b->bs); aio_bh_schedule_oneshot(qemu_get_current_aio_context(), detach_indirect_bh, &detach_by_parent_data); child_of_bds.drained_begin(child); @@ -1384,8 +1185,14 @@ static void test_detach_indirect(bool by_parent_cb) detach_by_driver_cb_class = child_of_bds; detach_by_driver_cb_class.drained_begin = detach_by_driver_cb_drained_begin; + detach_by_driver_cb_class.drained_end = NULL; + detach_by_driver_cb_class.drained_poll = NULL; } + detach_by_parent_data = (struct detach_by_parent_data) { + .detach_on_drain = false, + }; + /* Create all involved nodes */ parent_a = bdrv_new_open_driver(&bdrv_test, "parent-a", BDRV_O_RDWR, &error_abort); @@ -1437,12 +1244,16 @@ static void test_detach_indirect(bool by_parent_cb) .child_b = child_b, .c = c, .by_parent_cb = by_parent_cb, + .detach_on_drain = true, }; acb = blk_aio_preadv(blk, 0, &qiov, 0, detach_by_parent_aio_cb, NULL); g_assert(acb != NULL); /* Drain and check the expected result */ - bdrv_subtree_drained_begin(parent_b); + bdrv_drained_begin(parent_b); + bdrv_drained_begin(a); + bdrv_drained_begin(b); + bdrv_drained_begin(c); g_assert(detach_by_parent_data.child_c != NULL); @@ -1457,12 +1268,15 @@ static void test_detach_indirect(bool by_parent_cb) g_assert(QLIST_NEXT(child_a, next) == NULL); g_assert_cmpint(parent_a->quiesce_counter, ==, 1); - g_assert_cmpint(parent_b->quiesce_counter, ==, 1); + g_assert_cmpint(parent_b->quiesce_counter, ==, 3); g_assert_cmpint(a->quiesce_counter, ==, 1); - g_assert_cmpint(b->quiesce_counter, ==, 0); + g_assert_cmpint(b->quiesce_counter, ==, 1); g_assert_cmpint(c->quiesce_counter, ==, 1); - bdrv_subtree_drained_end(parent_b); + bdrv_drained_end(parent_b); + bdrv_drained_end(a); + bdrv_drained_end(b); + bdrv_drained_end(c); bdrv_unref(parent_b); blk_unref(blk); @@ -1693,6 +1507,7 @@ static void test_blockjob_commit_by_drained_end(void) bdrv_drained_begin(bs_child); g_assert(!job_has_completed); bdrv_drained_end(bs_child); + aio_poll(qemu_get_aio_context(), false); g_assert(job_has_completed); bdrv_unref(bs_parents[0]); @@ -1848,6 +1663,7 @@ static void test_drop_intermediate_poll(void) g_assert(!job_has_completed); ret = bdrv_drop_intermediate(chain[1], chain[0], NULL); + aio_poll(qemu_get_aio_context(), false); g_assert(ret == 0); g_assert(job_has_completed); @@ -1856,6 +1672,7 @@ static void test_drop_intermediate_poll(void) typedef struct BDRVReplaceTestState { + bool setup_completed; bool was_drained; bool was_undrained; bool has_read; @@ -1916,52 +1733,78 @@ static int coroutine_fn bdrv_replace_test_co_preadv(BlockDriverState *bs, return 0; } +static void coroutine_fn bdrv_replace_test_drain_co(void *opaque) +{ + BlockDriverState *bs = opaque; + BDRVReplaceTestState *s = bs->opaque; + + /* Keep waking io_co up until it is done */ + while (s->io_co) { + aio_co_wake(s->io_co); + s->io_co = NULL; + qemu_coroutine_yield(); + } + s->drain_co = NULL; + bdrv_dec_in_flight(bs); +} + /** * If .drain_count is 0, wake up .io_co if there is one; and set * .was_drained. * Increment .drain_count. */ -static void coroutine_fn bdrv_replace_test_co_drain_begin(BlockDriverState *bs) +static void bdrv_replace_test_drain_begin(BlockDriverState *bs) { BDRVReplaceTestState *s = bs->opaque; - if (!s->drain_count) { - /* Keep waking io_co up until it is done */ - s->drain_co = qemu_coroutine_self(); - while (s->io_co) { - aio_co_wake(s->io_co); - s->io_co = NULL; - qemu_coroutine_yield(); - } - s->drain_co = NULL; + if (!s->setup_completed) { + return; + } + if (!s->drain_count) { + s->drain_co = qemu_coroutine_create(bdrv_replace_test_drain_co, bs); + bdrv_inc_in_flight(bs); + aio_co_enter(bdrv_get_aio_context(bs), s->drain_co); s->was_drained = true; } s->drain_count++; } +static void coroutine_fn bdrv_replace_test_read_entry(void *opaque) +{ + BlockDriverState *bs = opaque; + char data; + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, &data, 1); + int ret; + + /* Queue a read request post-drain */ + ret = bdrv_replace_test_co_preadv(bs, 0, 1, &qiov, 0); + g_assert(ret >= 0); + bdrv_dec_in_flight(bs); +} + /** * Reduce .drain_count, set .was_undrained once it reaches 0. * If .drain_count reaches 0 and the node has a backing file, issue a * read request. */ -static void coroutine_fn bdrv_replace_test_co_drain_end(BlockDriverState *bs) +static void bdrv_replace_test_drain_end(BlockDriverState *bs) { BDRVReplaceTestState *s = bs->opaque; + if (!s->setup_completed) { + return; + } + g_assert(s->drain_count > 0); if (!--s->drain_count) { - int ret; - s->was_undrained = true; if (bs->backing) { - char data; - QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, &data, 1); - - /* Queue a read request post-drain */ - ret = bdrv_replace_test_co_preadv(bs, 0, 1, &qiov, 0); - g_assert(ret >= 0); + Coroutine *co = qemu_coroutine_create(bdrv_replace_test_read_entry, + bs); + bdrv_inc_in_flight(bs); + aio_co_enter(bdrv_get_aio_context(bs), co); } } } @@ -1974,8 +1817,8 @@ static BlockDriver bdrv_replace_test = { .bdrv_close = bdrv_replace_test_close, .bdrv_co_preadv = bdrv_replace_test_co_preadv, - .bdrv_co_drain_begin = bdrv_replace_test_co_drain_begin, - .bdrv_co_drain_end = bdrv_replace_test_co_drain_end, + .bdrv_drain_begin = bdrv_replace_test_drain_begin, + .bdrv_drain_end = bdrv_replace_test_drain_end, .bdrv_child_perm = bdrv_default_perms, }; @@ -2051,6 +1894,7 @@ static void do_test_replace_child_mid_drain(int old_drain_count, bdrv_ref(old_child_bs); bdrv_attach_child(parent_bs, old_child_bs, "child", &child_of_bds, BDRV_CHILD_COW, &error_abort); + parent_s->setup_completed = true; for (i = 0; i < old_drain_count; i++) { bdrv_drained_begin(old_child_bs); @@ -2172,70 +2016,47 @@ int main(int argc, char **argv) g_test_add_func("/bdrv-drain/driver-cb/drain_all", test_drv_cb_drain_all); g_test_add_func("/bdrv-drain/driver-cb/drain", test_drv_cb_drain); - g_test_add_func("/bdrv-drain/driver-cb/drain_subtree", - test_drv_cb_drain_subtree); g_test_add_func("/bdrv-drain/driver-cb/co/drain_all", test_drv_cb_co_drain_all); g_test_add_func("/bdrv-drain/driver-cb/co/drain", test_drv_cb_co_drain); - g_test_add_func("/bdrv-drain/driver-cb/co/drain_subtree", - test_drv_cb_co_drain_subtree); - g_test_add_func("/bdrv-drain/quiesce/drain_all", test_quiesce_drain_all); g_test_add_func("/bdrv-drain/quiesce/drain", test_quiesce_drain); - g_test_add_func("/bdrv-drain/quiesce/drain_subtree", - test_quiesce_drain_subtree); g_test_add_func("/bdrv-drain/quiesce/co/drain_all", test_quiesce_co_drain_all); g_test_add_func("/bdrv-drain/quiesce/co/drain", test_quiesce_co_drain); - g_test_add_func("/bdrv-drain/quiesce/co/drain_subtree", - test_quiesce_co_drain_subtree); g_test_add_func("/bdrv-drain/nested", test_nested); - g_test_add_func("/bdrv-drain/multiparent", test_multiparent); - g_test_add_func("/bdrv-drain/graph-change/drain_subtree", - test_graph_change_drain_subtree); g_test_add_func("/bdrv-drain/graph-change/drain_all", test_graph_change_drain_all); g_test_add_func("/bdrv-drain/iothread/drain_all", test_iothread_drain_all); g_test_add_func("/bdrv-drain/iothread/drain", test_iothread_drain); - g_test_add_func("/bdrv-drain/iothread/drain_subtree", - test_iothread_drain_subtree); g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all); g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain); - g_test_add_func("/bdrv-drain/blockjob/drain_subtree", - test_blockjob_drain_subtree); g_test_add_func("/bdrv-drain/blockjob/error/drain_all", test_blockjob_error_drain_all); g_test_add_func("/bdrv-drain/blockjob/error/drain", test_blockjob_error_drain); - g_test_add_func("/bdrv-drain/blockjob/error/drain_subtree", - test_blockjob_error_drain_subtree); g_test_add_func("/bdrv-drain/blockjob/iothread/drain_all", test_blockjob_iothread_drain_all); g_test_add_func("/bdrv-drain/blockjob/iothread/drain", test_blockjob_iothread_drain); - g_test_add_func("/bdrv-drain/blockjob/iothread/drain_subtree", - test_blockjob_iothread_drain_subtree); g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_all", test_blockjob_iothread_error_drain_all); g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain", test_blockjob_iothread_error_drain); - g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_subtree", - test_blockjob_iothread_error_drain_subtree); g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain); g_test_add_func("/bdrv-drain/detach/drain_all", test_detach_by_drain_all); g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain); - g_test_add_func("/bdrv-drain/detach/drain_subtree", test_detach_by_drain_subtree); g_test_add_func("/bdrv-drain/detach/parent_cb", test_detach_by_parent_cb); g_test_add_func("/bdrv-drain/detach/driver_cb", test_detach_by_driver_cb); diff --git a/tests/unit/test-char.c b/tests/unit/test-char.c index 5b3b48ebac..649fdf64e1 100644 --- a/tests/unit/test-char.c +++ b/tests/unit/test-char.c @@ -1212,7 +1212,6 @@ static void char_file_fifo_test(void) char *fifo = g_build_filename(tmp_path, "fifo", NULL); char *out = g_build_filename(tmp_path, "out", NULL); ChardevFile file = { .in = fifo, - .has_in = true, .out = out }; ChardevBackend backend = { .type = CHARDEV_BACKEND_KIND_FILE, .u.file.data = &file }; diff --git a/tests/unit/test-crypto-block.c b/tests/unit/test-crypto-block.c index b629e240a9..347cd5f3d7 100644 --- a/tests/unit/test-crypto-block.c +++ b/tests/unit/test-crypto-block.c @@ -41,7 +41,6 @@ static QCryptoBlockCreateOptions qcow_create_opts = { .format = Q_CRYPTO_BLOCK_FORMAT_QCOW, .u.qcow = { - .has_key_secret = true, .key_secret = (char *)"sec0", }, }; @@ -49,7 +48,6 @@ static QCryptoBlockCreateOptions qcow_create_opts = { static QCryptoBlockOpenOptions qcow_open_opts = { .format = Q_CRYPTO_BLOCK_FORMAT_QCOW, .u.qcow = { - .has_key_secret = true, .key_secret = (char *)"sec0", }, }; @@ -59,7 +57,6 @@ static QCryptoBlockOpenOptions qcow_open_opts = { static QCryptoBlockOpenOptions luks_open_opts = { .format = Q_CRYPTO_BLOCK_FORMAT_LUKS, .u.luks = { - .has_key_secret = true, .key_secret = (char *)"sec0", }, }; @@ -69,7 +66,6 @@ static QCryptoBlockOpenOptions luks_open_opts = { static QCryptoBlockCreateOptions luks_create_opts_default = { .format = Q_CRYPTO_BLOCK_FORMAT_LUKS, .u.luks = { - .has_key_secret = true, .key_secret = (char *)"sec0", }, }; @@ -79,7 +75,6 @@ static QCryptoBlockCreateOptions luks_create_opts_default = { static QCryptoBlockCreateOptions luks_create_opts_aes256_cbc_plain64 = { .format = Q_CRYPTO_BLOCK_FORMAT_LUKS, .u.luks = { - .has_key_secret = true, .key_secret = (char *)"sec0", .has_cipher_alg = true, .cipher_alg = QCRYPTO_CIPHER_ALG_AES_256, @@ -94,7 +89,6 @@ static QCryptoBlockCreateOptions luks_create_opts_aes256_cbc_plain64 = { static QCryptoBlockCreateOptions luks_create_opts_aes256_cbc_essiv = { .format = Q_CRYPTO_BLOCK_FORMAT_LUKS, .u.luks = { - .has_key_secret = true, .key_secret = (char *)"sec0", .has_cipher_alg = true, .cipher_alg = QCRYPTO_CIPHER_ALG_AES_256, diff --git a/tests/unit/test-qmp-cmds.c b/tests/unit/test-qmp-cmds.c index 6085c09995..2373cd64cb 100644 --- a/tests/unit/test-qmp-cmds.c +++ b/tests/unit/test-qmp-cmds.c @@ -43,15 +43,15 @@ void qmp_user_def_cmd1(UserDefOne * ud1, Error **errp) { } -FeatureStruct1 *qmp_test_features0(bool has_fs0, FeatureStruct0 *fs0, - bool has_fs1, FeatureStruct1 *fs1, - bool has_fs2, FeatureStruct2 *fs2, - bool has_fs3, FeatureStruct3 *fs3, - bool has_fs4, FeatureStruct4 *fs4, - bool has_cfs1, CondFeatureStruct1 *cfs1, - bool has_cfs2, CondFeatureStruct2 *cfs2, - bool has_cfs3, CondFeatureStruct3 *cfs3, - bool has_cfs4, CondFeatureStruct4 *cfs4, +FeatureStruct1 *qmp_test_features0(FeatureStruct0 *fs0, + FeatureStruct1 *fs1, + FeatureStruct2 *fs2, + FeatureStruct3 *fs3, + FeatureStruct4 *fs4, + CondFeatureStruct1 *cfs1, + CondFeatureStruct2 *cfs2, + CondFeatureStruct3 *cfs3, + CondFeatureStruct4 *cfs4, Error **errp) { return g_new0(FeatureStruct1, 1); @@ -77,8 +77,7 @@ void qmp_test_command_cond_features3(Error **errp) { } -UserDefTwo *qmp_user_def_cmd2(UserDefOne *ud1a, - bool has_udb1, UserDefOne *ud1b, +UserDefTwo *qmp_user_def_cmd2(UserDefOne *ud1a, UserDefOne *ud1b, Error **errp) { UserDefTwo *ret; @@ -87,8 +86,8 @@ UserDefTwo *qmp_user_def_cmd2(UserDefOne *ud1a, ud1c->string = strdup(ud1a->string); ud1c->integer = ud1a->integer; - ud1d->string = strdup(has_udb1 ? ud1b->string : "blah0"); - ud1d->integer = has_udb1 ? ud1b->integer : 0; + ud1d->string = strdup(ud1b ? ud1b->string : "blah0"); + ud1d->integer = ud1b ? ud1b->integer : 0; ret = g_new0(UserDefTwo, 1); ret->string0 = strdup("blah1"); @@ -98,7 +97,6 @@ UserDefTwo *qmp_user_def_cmd2(UserDefOne *ud1a, ret->dict1->dict2->userdef = ud1c; ret->dict1->dict2->string = strdup("blah3"); ret->dict1->dict3 = g_new0(UserDefTwoDictDict, 1); - ret->dict1->has_dict3 = true; ret->dict1->dict3->userdef = ud1d; ret->dict1->dict3->string = strdup("blah4"); diff --git a/tests/unit/test-qmp-event.c b/tests/unit/test-qmp-event.c index 7d961d716a..3626d2372f 100644 --- a/tests/unit/test-qmp-event.c +++ b/tests/unit/test-qmp-event.c @@ -109,7 +109,7 @@ static void test_event_c(TestEventData *data, data->expect = qdict_from_jsonf_nofail( "{ 'event': 'EVENT_C', 'data': {" " 'a': 1, 'b': { 'integer': 2, 'string': 'test1' }, 'c': 'test2' } }"); - qapi_event_send_event_c(true, 1, true, &b, "test2"); + qapi_event_send_event_c(true, 1, &b, "test2"); g_assert(data->emitted); qobject_unref(data->expect); } @@ -135,7 +135,7 @@ static void test_event_d(TestEventData *data, " 'struct1': { 'integer': 2, 'string': 'test1', 'enum1': 'value1' }," " 'string': 'test2', 'enum2': 'value2' }," " 'b': 'test3', 'enum3': 'value3' } }"); - qapi_event_send_event_d(&a, "test3", false, NULL, true, ENUM_ONE_VALUE3); + qapi_event_send_event_d(&a, "test3", NULL, true, ENUM_ONE_VALUE3); g_assert(data->emitted); qobject_unref(data->expect); } diff --git a/tests/unit/test-qobject-input-visitor.c b/tests/unit/test-qobject-input-visitor.c index 5f614afdbf..77fbf985be 100644 --- a/tests/unit/test-qobject-input-visitor.c +++ b/tests/unit/test-qobject-input-visitor.c @@ -431,7 +431,7 @@ static void test_visitor_in_struct_nested(TestInputVisitorData *data, g_assert_cmpint(udp->dict1->dict2->userdef->integer, ==, 42); g_assert_cmpstr(udp->dict1->dict2->userdef->string, ==, "string"); g_assert_cmpstr(udp->dict1->dict2->string, ==, "string2"); - g_assert(udp->dict1->has_dict3 == false); + g_assert(!udp->dict1->dict3); } static void test_visitor_in_list(TestInputVisitorData *data, diff --git a/tests/unit/test-qobject-output-visitor.c b/tests/unit/test-qobject-output-visitor.c index 66b27fad66..7f054289fe 100644 --- a/tests/unit/test-qobject-output-visitor.c +++ b/tests/unit/test-qobject-output-visitor.c @@ -182,7 +182,6 @@ static void test_visitor_out_struct_nested(TestOutputVisitorData *data, ud2->dict1->dict2->string = g_strdup(strings[2]); ud2->dict1->dict3 = g_malloc0(sizeof(*ud2->dict1->dict3)); - ud2->dict1->has_dict3 = true; ud2->dict1->dict3->userdef = g_new0(UserDefOne, 1); ud2->dict1->dict3->userdef->string = g_strdup(string); ud2->dict1->dict3->userdef->integer = value; @@ -284,7 +283,6 @@ static void test_visitor_out_list_qapi_free(TestOutputVisitorData *data, value->dict1->dict2->userdef->string = g_strdup(string); value->dict1->dict2->userdef->integer = 42; value->dict1->dict2->string = g_strdup(string); - value->dict1->has_dict3 = false; QAPI_LIST_PREPEND(head, value); } diff --git a/tests/unit/test-visitor-serialization.c b/tests/unit/test-visitor-serialization.c index 667e8fed82..c2056c3eaa 100644 --- a/tests/unit/test-visitor-serialization.c +++ b/tests/unit/test-visitor-serialization.c @@ -223,7 +223,6 @@ static UserDefTwo *nested_struct_create(void) udnp->dict1->dict2->userdef->string = strdup("test_string"); udnp->dict1->dict2->string = strdup("test_string2"); udnp->dict1->dict3 = g_malloc0(sizeof(*udnp->dict1->dict3)); - udnp->dict1->has_dict3 = true; udnp->dict1->dict3->userdef = g_new0(UserDefOne, 1); udnp->dict1->dict3->userdef->integer = 43; udnp->dict1->dict3->userdef->string = strdup("test_string"); @@ -243,7 +242,7 @@ static void nested_struct_compare(UserDefTwo *udnp1, UserDefTwo *udnp2) udnp2->dict1->dict2->userdef->string); g_assert_cmpstr(udnp1->dict1->dict2->string, ==, udnp2->dict1->dict2->string); - g_assert(udnp1->dict1->has_dict3 == udnp2->dict1->has_dict3); + g_assert(!udnp1->dict1->dict3 == !udnp2->dict1->dict3); g_assert_cmpint(udnp1->dict1->dict3->userdef->integer, ==, udnp2->dict1->dict3->userdef->integer); g_assert_cmpstr(udnp1->dict1->dict3->userdef->string, ==, diff --git a/tests/vm/freebsd b/tests/vm/freebsd index d6ff4461ba..ba2ba23d24 100755 --- a/tests/vm/freebsd +++ b/tests/vm/freebsd @@ -28,8 +28,8 @@ class FreeBSDVM(basevm.BaseVM): name = "freebsd" arch = "x86_64" - link = "https://download.freebsd.org/ftp/releases/ISO-IMAGES/12.3/FreeBSD-12.3-RELEASE-amd64-disc1.iso.xz" - csum = "36dd0de50f1fe5f0a88e181e94657656de26fb64254412f74e80e128e8b938b4" + link = "https://download.freebsd.org/ftp/releases/ISO-IMAGES/12.4/FreeBSD-12.4-RELEASE-amd64-disc1.iso.xz" + csum = "1dcf6446e31bf3f81b582e9aba3319a258c29a937a2af6138ee4b181ed719a87" size = "20G" pkgs = [ # build tools diff --git a/tools/virtiofsd/fuse_lowlevel.c b/tools/virtiofsd/fuse_lowlevel.c index 2f08471627..194a1b813b 100644 --- a/tools/virtiofsd/fuse_lowlevel.c +++ b/tools/virtiofsd/fuse_lowlevel.c @@ -216,7 +216,6 @@ static int send_reply(fuse_req_t req, int error, const void *arg, int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count) { - int res; g_autofree struct iovec *padded_iov = NULL; padded_iov = g_try_new(struct iovec, count + 1); @@ -227,9 +226,7 @@ int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count) memcpy(padded_iov + 1, iov, count * sizeof(struct iovec)); count++; - res = send_reply_iov(req, 0, padded_iov, count); - - return res; + return send_reply_iov(req, 0, padded_iov, count); } @@ -589,7 +586,6 @@ int fuse_reply_ioctl_retry(fuse_req_t req, const struct iovec *in_iov, g_autofree struct fuse_ioctl_iovec *out_fiov = NULL; struct iovec iov[4]; size_t count = 1; - int res; memset(&arg, 0, sizeof(arg)); arg.flags |= FUSE_IOCTL_RETRY; @@ -601,15 +597,13 @@ int fuse_reply_ioctl_retry(fuse_req_t req, const struct iovec *in_iov, /* Can't handle non-compat 64bit ioctls on 32bit */ if (sizeof(void *) == 4 && req->ioctl_64bit) { - res = fuse_reply_err(req, EINVAL); - return res; + return fuse_reply_err(req, EINVAL); } if (in_count) { in_fiov = fuse_ioctl_iovec_copy(in_iov, in_count); if (!in_fiov) { - res = fuse_reply_err(req, ENOMEM); - return res; + return fuse_reply_err(req, ENOMEM); } iov[count].iov_base = (void *)in_fiov; @@ -619,8 +613,7 @@ int fuse_reply_ioctl_retry(fuse_req_t req, const struct iovec *in_iov, if (out_count) { out_fiov = fuse_ioctl_iovec_copy(out_iov, out_count); if (!out_fiov) { - res = fuse_reply_err(req, ENOMEM); - return res; + return fuse_reply_err(req, ENOMEM); } iov[count].iov_base = (void *)out_fiov; @@ -628,9 +621,7 @@ int fuse_reply_ioctl_retry(fuse_req_t req, const struct iovec *in_iov, count++; } - res = send_reply_iov(req, 0, iov, count); - - return res; + return send_reply_iov(req, 0, iov, count); } int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size) @@ -659,7 +650,6 @@ int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov, { g_autofree struct iovec *padded_iov = NULL; struct fuse_ioctl_out arg; - int res; padded_iov = g_try_new(struct iovec, count + 2); if (padded_iov == NULL) { @@ -673,9 +663,7 @@ int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov, memcpy(&padded_iov[2], iov, count * sizeof(struct iovec)); - res = send_reply_iov(req, 0, padded_iov, count + 2); - - return res; + return send_reply_iov(req, 0, padded_iov, count + 2); } int fuse_reply_poll(fuse_req_t req, unsigned revents) diff --git a/ui/cocoa.m b/ui/cocoa.m index 660d3e0935..e915c344a8 100644 --- a/ui/cocoa.m +++ b/ui/cocoa.m @@ -1481,8 +1481,8 @@ static CGEventRef handleTapEvent(CGEventTapProxy proxy, CGEventType type, CGEven __block Error *err = NULL; with_iothread_lock(^{ - qmp_eject(true, [drive cStringUsingEncoding: NSASCIIStringEncoding], - false, NULL, false, false, &err); + qmp_eject([drive cStringUsingEncoding: NSASCIIStringEncoding], + NULL, false, false, &err); }); handleAnyDeviceErrors(err); } @@ -1516,13 +1516,12 @@ static CGEventRef handleTapEvent(CGEventTapProxy proxy, CGEventType type, CGEven __block Error *err = NULL; with_iothread_lock(^{ - qmp_blockdev_change_medium(true, - [drive cStringUsingEncoding: + qmp_blockdev_change_medium([drive cStringUsingEncoding: NSASCIIStringEncoding], - false, NULL, + NULL, [file cStringUsingEncoding: NSASCIIStringEncoding], - true, "raw", + "raw", true, false, false, 0, &err); diff --git a/ui/console.c b/ui/console.c index 3c0d9b061a..9ff9217f9b 100644 --- a/ui/console.c +++ b/ui/console.c @@ -407,7 +407,7 @@ static void graphic_hw_update_bh(void *con) /* Safety: coroutine-only, concurrent-coroutine safe, main thread only */ void coroutine_fn -qmp_screendump(const char *filename, bool has_device, const char *device, +qmp_screendump(const char *filename, const char *device, bool has_head, int64_t head, bool has_format, ImageFormat format, Error **errp) { @@ -416,7 +416,7 @@ qmp_screendump(const char *filename, bool has_device, const char *device, DisplaySurface *surface; int fd; - if (has_device) { + if (device) { con = qemu_console_lookup_by_device_name(device, has_head ? head : 0, errp); if (!con) { diff --git a/ui/input.c b/ui/input.c index e2a90af889..8f4a87d1d7 100644 --- a/ui/input.c +++ b/ui/input.c @@ -124,7 +124,7 @@ qemu_input_find_handler(uint32_t mask, QemuConsole *con) return NULL; } -void qmp_input_send_event(bool has_device, const char *device, +void qmp_input_send_event(const char *device, bool has_head, int64_t head, InputEventList *events, Error **errp) { @@ -133,7 +133,7 @@ void qmp_input_send_event(bool has_device, const char *device, Error *err = NULL; con = NULL; - if (has_device) { + if (device) { if (!has_head) { head = 0; } diff --git a/ui/spice-core.c b/ui/spice-core.c index c3ac20ad43..72f8f1681c 100644 --- a/ui/spice-core.c +++ b/ui/spice-core.c @@ -222,7 +222,6 @@ static void channel_event(int event, SpiceChannelEventInfo *info) break; case SPICE_CHANNEL_EVENT_INITIALIZED: if (auth) { - server->has_auth = true; server->auth = g_strdup(auth); } add_channel_info(client, info); @@ -522,13 +521,9 @@ static SpiceInfo *qmp_query_spice_real(Error **errp) port = qemu_opt_get_number(opts, "port", 0); tls_port = qemu_opt_get_number(opts, "tls-port", 0); - info->has_auth = true; info->auth = g_strdup(auth); - - info->has_host = true; info->host = g_strdup(addr ? addr : "*"); - info->has_compiled_version = true; major = (SPICE_SERVER_VERSION & 0xff0000) >> 16; minor = (SPICE_SERVER_VERSION & 0xff00) >> 8; micro = SPICE_SERVER_VERSION & 0xff; @@ -51,7 +51,6 @@ bool qemu_console_fill_device_address(QemuConsole *con, size_t size, Error **errp) { - ERRP_GUARD(); DeviceState *dev = DEVICE(object_property_get_link(OBJECT(con), "device", &error_abort)); @@ -244,7 +244,6 @@ static VncServerInfo *vnc_server_info_get(VncDisplay *vd) info = g_malloc0(sizeof(*info)); vnc_init_basic_info_from_server_addr(vd->listener->sioc[0], qapi_VncServerInfo_base(info), &err); - info->has_auth = true; info->auth = g_strdup(vnc_auth_name(vd)); if (err) { qapi_free_VncServerInfo(info); @@ -263,13 +262,10 @@ static void vnc_client_cache_auth(VncState *client) if (client->tls) { client->info->x509_dname = qcrypto_tls_session_get_peer_name(client->tls); - client->info->has_x509_dname = - client->info->x509_dname != NULL; } #ifdef CONFIG_VNC_SASL if (client->sasl.conn && client->sasl.username) { - client->info->has_sasl_username = true; client->info->sasl_username = g_strdup(client->sasl.username); } #endif @@ -341,11 +337,9 @@ static VncClientInfo *qmp_query_vnc_client(const VncState *client) if (client->tls) { info->x509_dname = qcrypto_tls_session_get_peer_name(client->tls); - info->has_x509_dname = info->x509_dname != NULL; } #ifdef CONFIG_VNC_SASL if (client->sasl.conn && client->sasl.username) { - info->has_sasl_username = true; info->sasl_username = g_strdup(client->sasl.username); } #endif @@ -426,11 +420,8 @@ VncInfo *qmp_query_vnc(Error **errp) abort(); } - info->has_host = true; - info->has_service = true; info->has_family = true; - info->has_auth = true; info->auth = g_strdup(vnc_auth_name(vd)); } @@ -568,7 +559,6 @@ VncInfo2List *qmp_query_vnc_servers(Error **errp) if (vd->dcl.con) { dev = DEVICE(object_property_get_link(OBJECT(vd->dcl.con), "device", &error_abort)); - info->has_display = true; info->display = g_strdup(dev->id); } for (i = 0; vd->listener != NULL && i < vd->listener->nsioc; i++) { diff --git a/util/async.c b/util/async.c index 63434ddae4..14d63b3091 100644 --- a/util/async.c +++ b/util/async.c @@ -27,6 +27,7 @@ #include "qapi/error.h" #include "block/aio.h" #include "block/thread-pool.h" +#include "block/graph-lock.h" #include "qemu/main-loop.h" #include "qemu/atomic.h" #include "qemu/rcu_queue.h" @@ -376,6 +377,7 @@ aio_ctx_finalize(GSource *source) qemu_rec_mutex_destroy(&ctx->lock); qemu_lockcnt_destroy(&ctx->list_lock); timerlistgroup_deinit(&ctx->tlg); + unregister_aiocontext(ctx); aio_context_destroy(ctx); } @@ -574,6 +576,8 @@ AioContext *aio_context_new(Error **errp) ctx->thread_pool_min = 0; ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT; + register_aiocontext(ctx); + return ctx; fail: g_source_destroy(&ctx->source); diff --git a/util/oslib-win32.c b/util/oslib-win32.c index a67cb3822e..07ade41800 100644 --- a/util/oslib-win32.c +++ b/util/oslib-win32.c @@ -24,10 +24,6 @@ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. - * - * The implementation of g_poll (functions poll_rest, g_poll) at the end of - * this file are based on code from GNOME glib-2 and use a different license, - * see the license comment there. */ #include "qemu/osdep.h" diff --git a/util/qemu-config.c b/util/qemu-config.c index 433488aa56..d63f27438d 100644 --- a/util/qemu-config.c +++ b/util/qemu-config.c @@ -8,6 +8,7 @@ #include "qemu/error-report.h" #include "qemu/option.h" #include "qemu/config-file.h" +#include "hw/boards.h" static QemuOptsList *vm_config_groups[48]; static QemuOptsList *drive_config_groups[5]; @@ -80,14 +81,8 @@ static CommandLineParameterInfoList *query_option_descs(const QemuOptDesc *desc) break; } - if (desc[i].help) { - info->has_help = true; - info->help = g_strdup(desc[i].help); - } - if (desc[i].def_value_str) { - info->has_q_default = true; - info->q_default = g_strdup(desc[i].def_value_str); - } + info->help = g_strdup(desc[i].help); + info->q_default = g_strdup(desc[i].def_value_str); QAPI_LIST_PREPEND(param_list, info); } @@ -149,100 +144,82 @@ static CommandLineParameterInfoList *get_drive_infolist(void) return head; } -/* restore machine options that are now machine's properties */ -static QemuOptsList machine_opts = { - .merge_lists = true, - .head = QTAILQ_HEAD_INITIALIZER(machine_opts.head), - .desc = { - { - .name = "type", - .type = QEMU_OPT_STRING, - .help = "emulated machine" - },{ - .name = "accel", - .type = QEMU_OPT_STRING, - .help = "accelerator list", - },{ - .name = "kernel_irqchip", - .type = QEMU_OPT_BOOL, - .help = "use KVM in-kernel irqchip", - },{ - .name = "kvm_shadow_mem", - .type = QEMU_OPT_SIZE, - .help = "KVM shadow MMU size", - },{ - .name = "kernel", - .type = QEMU_OPT_STRING, - .help = "Linux kernel image file", - },{ - .name = "initrd", - .type = QEMU_OPT_STRING, - .help = "Linux initial ramdisk file", - },{ - .name = "append", - .type = QEMU_OPT_STRING, - .help = "Linux kernel command line", - },{ - .name = "dtb", - .type = QEMU_OPT_STRING, - .help = "Linux kernel device tree file", - },{ - .name = "dumpdtb", - .type = QEMU_OPT_STRING, - .help = "Dump current dtb to a file and quit", - },{ - .name = "phandle_start", - .type = QEMU_OPT_NUMBER, - .help = "The first phandle ID we may generate dynamically", - },{ - .name = "dt_compatible", - .type = QEMU_OPT_STRING, - .help = "Overrides the \"compatible\" property of the dt root node", - },{ - .name = "dump-guest-core", - .type = QEMU_OPT_BOOL, - .help = "Include guest memory in a core dump", - },{ - .name = "mem-merge", - .type = QEMU_OPT_BOOL, - .help = "enable/disable memory merge support", - },{ - .name = "usb", - .type = QEMU_OPT_BOOL, - .help = "Set on/off to enable/disable usb", - },{ - .name = "firmware", - .type = QEMU_OPT_STRING, - .help = "firmware image", - },{ - .name = "iommu", - .type = QEMU_OPT_BOOL, - .help = "Set on/off to enable/disable Intel IOMMU (VT-d)", - },{ - .name = "suppress-vmdesc", - .type = QEMU_OPT_BOOL, - .help = "Set on to disable self-describing migration", - },{ - .name = "aes-key-wrap", - .type = QEMU_OPT_BOOL, - .help = "enable/disable AES key wrapping using the CPACF wrapping key", - },{ - .name = "dea-key-wrap", - .type = QEMU_OPT_BOOL, - .help = "enable/disable DEA key wrapping using the CPACF wrapping key", - },{ - .name = "loadparm", - .type = QEMU_OPT_STRING, - .help = "Up to 8 chars in set of [A-Za-z0-9. ](lower case chars" - " converted to upper case) to pass to machine" - " loader, boot manager, and guest kernel", - }, - { /* End of list */ } +static CommandLineParameterInfo *objprop_to_cmdline_prop(ObjectProperty *prop) +{ + CommandLineParameterInfo *info; + + info = g_malloc0(sizeof(*info)); + info->name = g_strdup(prop->name); + + if (g_str_equal(prop->type, "bool") || g_str_equal(prop->type, "OnOffAuto")) { + info->type = COMMAND_LINE_PARAMETER_TYPE_BOOLEAN; + } else if (g_str_equal(prop->type, "int")) { + info->type = COMMAND_LINE_PARAMETER_TYPE_NUMBER; + } else if (g_str_equal(prop->type, "size")) { + info->type = COMMAND_LINE_PARAMETER_TYPE_SIZE; + } else { + info->type = COMMAND_LINE_PARAMETER_TYPE_STRING; + } + + if (prop->description) { + info->help = g_strdup(prop->description); + } + + return info; +} + +static CommandLineParameterInfoList *query_all_machine_properties(void) +{ + CommandLineParameterInfoList *params = NULL, *clpiter; + CommandLineParameterInfo *info; + GSList *machines, *curr_mach; + ObjectPropertyIterator op_iter; + ObjectProperty *prop; + bool is_new; + + machines = object_class_get_list(TYPE_MACHINE, false); + assert(machines); + + /* Loop over all machine classes */ + for (curr_mach = machines; curr_mach; curr_mach = curr_mach->next) { + object_class_property_iter_init(&op_iter, curr_mach->data); + /* ... and over the properties of each machine: */ + while ((prop = object_property_iter_next(&op_iter))) { + if (!prop->set) { + continue; + } + /* + * Check whether the property has already been put into the list + * (via another machine class) + */ + is_new = true; + for (clpiter = params; clpiter != NULL; clpiter = clpiter->next) { + if (g_str_equal(clpiter->value->name, prop->name)) { + is_new = false; + break; + } + } + /* If it hasn't been added before, add it now to the list */ + if (is_new) { + info = objprop_to_cmdline_prop(prop); + QAPI_LIST_PREPEND(params, info); + } + } } -}; -CommandLineOptionInfoList *qmp_query_command_line_options(bool has_option, - const char *option, + g_slist_free(machines); + + /* Add entry for the "type" parameter */ + info = g_malloc0(sizeof(*info)); + info->name = g_strdup("type"); + info->type = COMMAND_LINE_PARAMETER_TYPE_STRING; + info->help = g_strdup("machine type"); + QAPI_LIST_PREPEND(params, info); + + return params; +} + +CommandLineOptionInfoList *qmp_query_command_line_options(const char *option, Error **errp) { CommandLineOptionInfoList *conf_list = NULL; @@ -250,7 +227,7 @@ CommandLineOptionInfoList *qmp_query_command_line_options(bool has_option, int i; for (i = 0; vm_config_groups[i] != NULL; i++) { - if (!has_option || !strcmp(option, vm_config_groups[i]->name)) { + if (!option || !strcmp(option, vm_config_groups[i]->name)) { info = g_malloc0(sizeof(*info)); info->option = g_strdup(vm_config_groups[i]->name); if (!strcmp("drive", vm_config_groups[i]->name)) { @@ -263,10 +240,10 @@ CommandLineOptionInfoList *qmp_query_command_line_options(bool has_option, } } - if (!has_option || !strcmp(option, "machine")) { + if (!option || !strcmp(option, "machine")) { info = g_malloc0(sizeof(*info)); info->option = g_strdup("machine"); - info->parameters = query_option_descs(machine_opts.desc); + info->parameters = query_all_machine_properties(); QAPI_LIST_PREPEND(conf_list, info); } @@ -318,9 +295,9 @@ void qemu_add_opts(QemuOptsList *list) static int qemu_config_foreach(FILE *fp, QEMUConfigCB *cb, void *opaque, const char *fname, Error **errp) { + ERRP_GUARD(); char line[1024], prev_group[64], group[64], arg[64], value[1024]; Location loc; - Error *local_err = NULL; QDict *qdict = NULL; int res = -EINVAL, lno = 0; int count = 0; @@ -348,10 +325,9 @@ static int qemu_config_foreach(FILE *fp, QEMUConfigCB *cb, void *opaque, } if (qdict != prev) { if (prev) { - cb(prev_group, prev, opaque, &local_err); + cb(prev_group, prev, opaque, errp); qobject_unref(prev); - if (local_err) { - error_propagate(errp, local_err); + if (*errp) { goto out; } } @@ -423,12 +399,12 @@ int qemu_read_config_file(const char *filename, QEMUConfigCB *cb, Error **errp) return ret; } -static void config_parse_qdict_section(QDict *options, QemuOptsList *opts, +static bool config_parse_qdict_section(QDict *options, QemuOptsList *opts, Error **errp) { QemuOpts *subopts; - QDict *subqdict; - QList *list = NULL; + g_autoptr(QDict) subqdict = NULL; + g_autoptr(QList) list = NULL; size_t orig_size, enum_size; char *prefix; @@ -437,23 +413,23 @@ static void config_parse_qdict_section(QDict *options, QemuOptsList *opts, g_free(prefix); orig_size = qdict_size(subqdict); if (!orig_size) { - goto out; + return true; } subopts = qemu_opts_create(opts, NULL, 0, errp); if (!subopts) { - goto out; + return false; } if (!qemu_opts_absorb_qdict(subopts, subqdict, errp)) { - goto out; + return false; } enum_size = qdict_size(subqdict); if (enum_size < orig_size && enum_size) { error_setg(errp, "Unknown option '%s' for [%s]", qdict_first(subqdict)->key, opts->name); - goto out; + return false; } if (enum_size) { @@ -468,7 +444,7 @@ static void config_parse_qdict_section(QDict *options, QemuOptsList *opts, if (qdict_size(subqdict)) { error_setg(errp, "Unused option '%s' for [%s]", qdict_first(subqdict)->key, opts->name); - goto out; + return false; } QLIST_FOREACH_ENTRY(list, list_entry) { @@ -478,46 +454,43 @@ static void config_parse_qdict_section(QDict *options, QemuOptsList *opts, if (!section) { error_setg(errp, "[%s] section (index %u) does not consist of " "keys", opts->name, i); - goto out; + return false; } opt_name = g_strdup_printf("%s.%u", opts->name, i++); subopts = qemu_opts_create(opts, opt_name, 1, errp); g_free(opt_name); if (!subopts) { - goto out; + return false; } if (!qemu_opts_absorb_qdict(subopts, section, errp)) { qemu_opts_del(subopts); - goto out; + return false; } if (qdict_size(section)) { error_setg(errp, "[%s] section doesn't support the option '%s'", opts->name, qdict_first(section)->key); qemu_opts_del(subopts); - goto out; + return false; } } } -out: - qobject_unref(subqdict); - qobject_unref(list); + return true; } -void qemu_config_parse_qdict(QDict *options, QemuOptsList **lists, +bool qemu_config_parse_qdict(QDict *options, QemuOptsList **lists, Error **errp) { int i; - Error *local_err = NULL; for (i = 0; lists[i]; i++) { - config_parse_qdict_section(options, lists[i], &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; + if (!config_parse_qdict_section(options, lists[i], errp)) { + return false; } } + + return true; } diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c index d185245023..6538859b87 100644 --- a/util/qemu-sockets.c +++ b/util/qemu-sockets.c @@ -210,7 +210,8 @@ static int inet_listen_saddr(InetSocketAddress *saddr, int num, Error **errp) { - struct addrinfo ai,*res,*e; + ERRP_GUARD(); + struct addrinfo ai, *res, *e; char port[33]; char uaddr[INET6_ADDRSTRLEN+1]; char uport[33]; @@ -218,7 +219,6 @@ static int inet_listen_saddr(InetSocketAddress *saddr, int slisten = -1; int saved_errno = 0; bool socket_created = false; - Error *err = NULL; if (saddr->keep_alive) { error_setg(errp, "keep-alive option is not supported for passive " @@ -231,11 +231,9 @@ static int inet_listen_saddr(InetSocketAddress *saddr, if (saddr->has_numeric && saddr->numeric) { ai.ai_flags |= AI_NUMERICHOST | AI_NUMERICSERV; } - ai.ai_family = inet_ai_family_from_address(saddr, &err); ai.ai_socktype = SOCK_STREAM; - - if (err) { - error_propagate(errp, err); + ai.ai_family = inet_ai_family_from_address(saddr, errp); + if (*errp) { return -1; } @@ -392,9 +390,9 @@ static int inet_connect_addr(const InetSocketAddress *saddr, static struct addrinfo *inet_parse_connect_saddr(InetSocketAddress *saddr, Error **errp) { + ERRP_GUARD(); struct addrinfo ai, *res; int rc; - Error *err = NULL; static int useV4Mapped = 1; memset(&ai, 0, sizeof(ai)); @@ -403,11 +401,9 @@ static struct addrinfo *inet_parse_connect_saddr(InetSocketAddress *saddr, if (qatomic_read(&useV4Mapped)) { ai.ai_flags |= AI_V4MAPPED; } - ai.ai_family = inet_ai_family_from_address(saddr, &err); ai.ai_socktype = SOCK_STREAM; - - if (err) { - error_propagate(errp, err); + ai.ai_family = inet_ai_family_from_address(saddr, errp); + if (*errp) { return NULL; } @@ -499,20 +495,18 @@ static int inet_dgram_saddr(InetSocketAddress *sraddr, InetSocketAddress *sladdr, Error **errp) { + ERRP_GUARD(); struct addrinfo ai, *peer = NULL, *local = NULL; const char *addr; const char *port; int sock = -1, rc; - Error *err = NULL; /* lookup peer addr */ memset(&ai,0, sizeof(ai)); ai.ai_flags = AI_CANONNAME | AI_V4MAPPED | AI_ADDRCONFIG; - ai.ai_family = inet_ai_family_from_address(sraddr, &err); ai.ai_socktype = SOCK_DGRAM; - - if (err) { - error_propagate(errp, err); + ai.ai_family = inet_ai_family_from_address(sraddr, errp); + if (*errp) { goto err; } diff --git a/util/thread-context.c b/util/thread-context.c index 4138245332..2bc7883b9e 100644 --- a/util/thread-context.c +++ b/util/thread-context.c @@ -90,16 +90,13 @@ static void thread_context_set_cpu_affinity(Object *obj, Visitor *v, uint16List *l, *host_cpus = NULL; unsigned long *bitmap = NULL; int nbits = 0, ret; - Error *err = NULL; if (tc->init_cpu_bitmap) { error_setg(errp, "Mixing CPU and node affinity not supported"); return; } - visit_type_uint16List(v, name, &host_cpus, &err); - if (err) { - error_propagate(errp, err); + if (!visit_type_uint16List(v, name, &host_cpus, errp)) { return; } @@ -178,7 +175,6 @@ static void thread_context_set_node_affinity(Object *obj, Visitor *v, uint16List *l, *host_nodes = NULL; unsigned long *bitmap = NULL; struct bitmask *tmp_cpus; - Error *err = NULL; int ret, i; if (tc->init_cpu_bitmap) { @@ -186,9 +182,7 @@ static void thread_context_set_node_affinity(Object *obj, Visitor *v, return; } - visit_type_uint16List(v, name, &host_nodes, &err); - if (err) { - error_propagate(errp, err); + if (!visit_type_uint16List(v, name, &host_nodes, errp)) { return; } diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c index 0d1520caac..7a84b1d806 100644 --- a/util/vfio-helpers.c +++ b/util/vfio-helpers.c @@ -271,7 +271,7 @@ static void collect_usable_iova_ranges(QEMUVFIOState *s, void *buf) if (!cap->next) { return; } - cap = (struct vfio_info_cap_header *)(buf + cap->next); + cap = buf + cap->next; } cap_iova_range = (struct vfio_iommu_type1_info_cap_iova_range *)cap; |