diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2020-05-04 20:35:59 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2020-05-04 20:35:59 +0100 |
commit | 5c7c46fea9f745956fcc7bea5c3af8380a599ee4 (patch) | |
tree | 430973a6470b5f1a7718715d04401b03c4ef201b | |
parent | 5375af3cd7b8adcc10c18d8083b7be63976c9645 (diff) | |
parent | 08b689aa6b521964b8275dd7a2564aefa5d68129 (diff) |
Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging
Pull request
v2:
* Fixed stray slirp submodule change [Peter]
Fixes for the lock guard macros, code conversions to the lock guard macros, and
support for selecting fuzzer targets with argv[0].
# gpg: Signature made Mon 04 May 2020 16:11:11 BST
# gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full]
# gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" [full]
# Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8
* remotes/stefanha/tags/block-pull-request:
lockable: Replace locks with lock guard macros
lockable: replaced locks with lock guard macros where appropriate
lockable: fix __COUNTER__ macro to be referenced properly
fuzz: select fuzz target using executable name
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r-- | block/iscsi.c | 7 | ||||
-rw-r--r-- | block/nfs.c | 51 | ||||
-rw-r--r-- | cpus-common.c | 14 | ||||
-rw-r--r-- | hw/display/qxl.c | 43 | ||||
-rw-r--r-- | hw/hyperv/hyperv.c | 15 | ||||
-rw-r--r-- | hw/rdma/rdma_backend.c | 50 | ||||
-rw-r--r-- | hw/rdma/rdma_rm.c | 3 | ||||
-rw-r--r-- | hw/vfio/platform.c | 5 | ||||
-rw-r--r-- | include/qemu/lockable.h | 7 | ||||
-rw-r--r-- | include/qemu/rcu.h | 2 | ||||
-rw-r--r-- | migration/migration.c | 3 | ||||
-rw-r--r-- | migration/multifd.c | 8 | ||||
-rw-r--r-- | migration/ram.c | 3 | ||||
-rw-r--r-- | monitor/misc.c | 4 | ||||
-rw-r--r-- | tests/qtest/fuzz/fuzz.c | 19 | ||||
-rw-r--r-- | ui/spice-display.c | 14 | ||||
-rw-r--r-- | util/log.c | 4 | ||||
-rw-r--r-- | util/qemu-timer.c | 17 | ||||
-rw-r--r-- | util/rcu.c | 8 | ||||
-rw-r--r-- | util/thread-pool.c | 3 | ||||
-rw-r--r-- | util/vfio-helpers.c | 5 |
21 files changed, 132 insertions, 153 deletions
diff --git a/block/iscsi.c b/block/iscsi.c index 914a1de9fb..a8b76979d8 100644 --- a/block/iscsi.c +++ b/block/iscsi.c @@ -1394,20 +1394,17 @@ static void iscsi_nop_timed_event(void *opaque) { IscsiLun *iscsilun = opaque; - qemu_mutex_lock(&iscsilun->mutex); + QEMU_LOCK_GUARD(&iscsilun->mutex); if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) { error_report("iSCSI: NOP timeout. Reconnecting..."); iscsilun->request_timed_out = true; } else if (iscsi_nop_out_async(iscsilun->iscsi, NULL, NULL, 0, NULL) != 0) { error_report("iSCSI: failed to sent NOP-Out. Disabling NOP messages."); - goto out; + return; } timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL); iscsi_set_events(iscsilun); - -out: - qemu_mutex_unlock(&iscsilun->mutex); } static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp) diff --git a/block/nfs.c b/block/nfs.c index 2393fbfe6b..b86ba5bfa1 100644 --- a/block/nfs.c +++ b/block/nfs.c @@ -273,15 +273,14 @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset, nfs_co_init_task(bs, &task); task.iov = iov; - qemu_mutex_lock(&client->mutex); - if (nfs_pread_async(client->context, client->fh, - offset, bytes, nfs_co_generic_cb, &task) != 0) { - qemu_mutex_unlock(&client->mutex); - return -ENOMEM; - } + WITH_QEMU_LOCK_GUARD(&client->mutex) { + if (nfs_pread_async(client->context, client->fh, + offset, bytes, nfs_co_generic_cb, &task) != 0) { + return -ENOMEM; + } - nfs_set_events(client); - qemu_mutex_unlock(&client->mutex); + nfs_set_events(client); + } while (!task.complete) { qemu_coroutine_yield(); } @@ -320,19 +319,18 @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset, buf = iov->iov[0].iov_base; } - qemu_mutex_lock(&client->mutex); - if (nfs_pwrite_async(client->context, client->fh, - offset, bytes, buf, - nfs_co_generic_cb, &task) != 0) { - qemu_mutex_unlock(&client->mutex); - if (my_buffer) { - g_free(buf); + WITH_QEMU_LOCK_GUARD(&client->mutex) { + if (nfs_pwrite_async(client->context, client->fh, + offset, bytes, buf, + nfs_co_generic_cb, &task) != 0) { + if (my_buffer) { + g_free(buf); + } + return -ENOMEM; } - return -ENOMEM; - } - nfs_set_events(client); - qemu_mutex_unlock(&client->mutex); + nfs_set_events(client); + } while (!task.complete) { qemu_coroutine_yield(); } @@ -355,15 +353,14 @@ static int coroutine_fn nfs_co_flush(BlockDriverState *bs) nfs_co_init_task(bs, &task); - qemu_mutex_lock(&client->mutex); - if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb, - &task) != 0) { - qemu_mutex_unlock(&client->mutex); - return -ENOMEM; - } + WITH_QEMU_LOCK_GUARD(&client->mutex) { + if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb, + &task) != 0) { + return -ENOMEM; + } - nfs_set_events(client); - qemu_mutex_unlock(&client->mutex); + nfs_set_events(client); + } while (!task.complete) { qemu_coroutine_yield(); } diff --git a/cpus-common.c b/cpus-common.c index eaf590cb38..55d5df8923 100644 --- a/cpus-common.c +++ b/cpus-common.c @@ -22,6 +22,7 @@ #include "exec/cpu-common.h" #include "hw/core/cpu.h" #include "sysemu/cpus.h" +#include "qemu/lockable.h" static QemuMutex qemu_cpu_list_lock; static QemuCond exclusive_cond; @@ -71,7 +72,7 @@ static int cpu_get_free_index(void) void cpu_list_add(CPUState *cpu) { - qemu_mutex_lock(&qemu_cpu_list_lock); + QEMU_LOCK_GUARD(&qemu_cpu_list_lock); if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) { cpu->cpu_index = cpu_get_free_index(); assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX); @@ -79,15 +80,13 @@ void cpu_list_add(CPUState *cpu) assert(!cpu_index_auto_assigned); } QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node); - qemu_mutex_unlock(&qemu_cpu_list_lock); } void cpu_list_remove(CPUState *cpu) { - qemu_mutex_lock(&qemu_cpu_list_lock); + QEMU_LOCK_GUARD(&qemu_cpu_list_lock); if (!QTAILQ_IN_USE(cpu, node)) { /* there is nothing to undo since cpu_exec_init() hasn't been called */ - qemu_mutex_unlock(&qemu_cpu_list_lock); return; } @@ -95,7 +94,6 @@ void cpu_list_remove(CPUState *cpu) QTAILQ_REMOVE_RCU(&cpus, cpu, node); cpu->cpu_index = UNASSIGNED_CPU_INDEX; - qemu_mutex_unlock(&qemu_cpu_list_lock); } struct qemu_work_item { @@ -237,7 +235,7 @@ void cpu_exec_start(CPUState *cpu) * see cpu->running == true, and it will kick the CPU. */ if (unlikely(atomic_read(&pending_cpus))) { - qemu_mutex_lock(&qemu_cpu_list_lock); + QEMU_LOCK_GUARD(&qemu_cpu_list_lock); if (!cpu->has_waiter) { /* Not counted in pending_cpus, let the exclusive item * run. Since we have the lock, just set cpu->running to true @@ -252,7 +250,6 @@ void cpu_exec_start(CPUState *cpu) * waiter at cpu_exec_end. */ } - qemu_mutex_unlock(&qemu_cpu_list_lock); } } @@ -280,7 +277,7 @@ void cpu_exec_end(CPUState *cpu) * next cpu_exec_start. */ if (unlikely(atomic_read(&pending_cpus))) { - qemu_mutex_lock(&qemu_cpu_list_lock); + QEMU_LOCK_GUARD(&qemu_cpu_list_lock); if (cpu->has_waiter) { cpu->has_waiter = false; atomic_set(&pending_cpus, pending_cpus - 1); @@ -288,7 +285,6 @@ void cpu_exec_end(CPUState *cpu) qemu_cond_signal(&exclusive_cond); } } - qemu_mutex_unlock(&qemu_cpu_list_lock); } } diff --git a/hw/display/qxl.c b/hw/display/qxl.c index 227da69a50..d5627119ec 100644 --- a/hw/display/qxl.c +++ b/hw/display/qxl.c @@ -478,18 +478,19 @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext) cmd->u.surface_create.stride); return 1; } - qemu_mutex_lock(&qxl->track_lock); - if (cmd->type == QXL_SURFACE_CMD_CREATE) { - qxl->guest_surfaces.cmds[id] = ext->cmd.data; - qxl->guest_surfaces.count++; - if (qxl->guest_surfaces.max < qxl->guest_surfaces.count) - qxl->guest_surfaces.max = qxl->guest_surfaces.count; - } - if (cmd->type == QXL_SURFACE_CMD_DESTROY) { - qxl->guest_surfaces.cmds[id] = 0; - qxl->guest_surfaces.count--; + WITH_QEMU_LOCK_GUARD(&qxl->track_lock) { + if (cmd->type == QXL_SURFACE_CMD_CREATE) { + qxl->guest_surfaces.cmds[id] = ext->cmd.data; + qxl->guest_surfaces.count++; + if (qxl->guest_surfaces.max < qxl->guest_surfaces.count) { + qxl->guest_surfaces.max = qxl->guest_surfaces.count; + } + } + if (cmd->type == QXL_SURFACE_CMD_DESTROY) { + qxl->guest_surfaces.cmds[id] = 0; + qxl->guest_surfaces.count--; + } } - qemu_mutex_unlock(&qxl->track_lock); break; } case QXL_CMD_CURSOR: @@ -958,10 +959,9 @@ static void interface_update_area_complete(QXLInstance *sin, int i; int qxl_i; - qemu_mutex_lock(&qxl->ssd.lock); + QEMU_LOCK_GUARD(&qxl->ssd.lock); if (surface_id != 0 || !num_updated_rects || !qxl->render_update_cookie_num) { - qemu_mutex_unlock(&qxl->ssd.lock); return; } trace_qxl_interface_update_area_complete(qxl->id, surface_id, dirty->left, @@ -980,7 +980,6 @@ static void interface_update_area_complete(QXLInstance *sin, * Don't bother copying or scheduling the bh since we will flip * the whole area anyway on completion of the update_area async call */ - qemu_mutex_unlock(&qxl->ssd.lock); return; } qxl_i = qxl->num_dirty_rects; @@ -991,7 +990,6 @@ static void interface_update_area_complete(QXLInstance *sin, trace_qxl_interface_update_area_complete_schedule_bh(qxl->id, qxl->num_dirty_rects); qemu_bh_schedule(qxl->update_area_bh); - qemu_mutex_unlock(&qxl->ssd.lock); } /* called from spice server thread context only */ @@ -1694,15 +1692,14 @@ static void ioport_write(void *opaque, hwaddr addr, case QXL_IO_MONITORS_CONFIG_ASYNC: async_common: async = QXL_ASYNC; - qemu_mutex_lock(&d->async_lock); - if (d->current_async != QXL_UNDEFINED_IO) { - qxl_set_guest_bug(d, "%d async started before last (%d) complete", - io_port, d->current_async); - qemu_mutex_unlock(&d->async_lock); - return; + WITH_QEMU_LOCK_GUARD(&d->async_lock) { + if (d->current_async != QXL_UNDEFINED_IO) { + qxl_set_guest_bug(d, "%d async started before last (%d) complete", + io_port, d->current_async); + return; + } + d->current_async = orig_io_port; } - d->current_async = orig_io_port; - qemu_mutex_unlock(&d->async_lock); break; default: break; diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c index 8ca3706f5b..4ddafe1de1 100644 --- a/hw/hyperv/hyperv.c +++ b/hw/hyperv/hyperv.c @@ -15,6 +15,7 @@ #include "sysemu/kvm.h" #include "qemu/bitops.h" #include "qemu/error-report.h" +#include "qemu/lockable.h" #include "qemu/queue.h" #include "qemu/rcu.h" #include "qemu/rcu_queue.h" @@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data) int ret; MsgHandler *mh; - qemu_mutex_lock(&handlers_mutex); + QEMU_LOCK_GUARD(&handlers_mutex); QLIST_FOREACH(mh, &msg_handlers, link) { if (mh->conn_id == conn_id) { if (handler) { @@ -501,7 +502,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data) g_free_rcu(mh, rcu); ret = 0; } - goto unlock; + return ret; } } @@ -515,8 +516,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data) } else { ret = -ENOENT; } -unlock: - qemu_mutex_unlock(&handlers_mutex); + return ret; } @@ -565,7 +565,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier) int ret; EventFlagHandler *handler; - qemu_mutex_lock(&handlers_mutex); + QEMU_LOCK_GUARD(&handlers_mutex); QLIST_FOREACH(handler, &event_flag_handlers, link) { if (handler->conn_id == conn_id) { if (notifier) { @@ -575,7 +575,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier) g_free_rcu(handler, rcu); ret = 0; } - goto unlock; + return ret; } } @@ -588,8 +588,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier) } else { ret = -ENOENT; } -unlock: - qemu_mutex_unlock(&handlers_mutex); + return ret; } diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c index 3dd39fe1a7..db7e5c8be5 100644 --- a/hw/rdma/rdma_backend.c +++ b/hw/rdma/rdma_backend.c @@ -95,36 +95,36 @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq) struct ibv_wc wc[2]; RdmaProtectedGSList *cqe_ctx_list; - qemu_mutex_lock(&rdma_dev_res->lock); - do { - ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc); + WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) { + do { + ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc); - trace_rdma_poll_cq(ne, ibcq); + trace_rdma_poll_cq(ne, ibcq); - for (i = 0; i < ne; i++) { - bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id); - if (unlikely(!bctx)) { - rdma_error_report("No matching ctx for req %"PRId64, - wc[i].wr_id); - continue; - } + for (i = 0; i < ne; i++) { + bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id); + if (unlikely(!bctx)) { + rdma_error_report("No matching ctx for req %"PRId64, + wc[i].wr_id); + continue; + } - comp_handler(bctx->up_ctx, &wc[i]); + comp_handler(bctx->up_ctx, &wc[i]); - if (bctx->backend_qp) { - cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list; - } else { - cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list; - } + if (bctx->backend_qp) { + cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list; + } else { + cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list; + } - rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id); - rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id); - g_free(bctx); - } - total_ne += ne; - } while (ne > 0); - atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne); - qemu_mutex_unlock(&rdma_dev_res->lock); + rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id); + rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id); + g_free(bctx); + } + total_ne += ne; + } while (ne > 0); + atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne); + } if (ne < 0) { rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno); diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c index 7e9ea283c9..60957f88db 100644 --- a/hw/rdma/rdma_rm.c +++ b/hw/rdma/rdma_rm.c @@ -147,14 +147,13 @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle) { trace_rdma_res_tbl_dealloc(tbl->name, handle); - qemu_mutex_lock(&tbl->lock); + QEMU_LOCK_GUARD(&tbl->lock); if (handle < tbl->tbl_sz) { clear_bit(handle, tbl->bitmap); tbl->used--; } - qemu_mutex_unlock(&tbl->lock); } int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c index 6b2952c034..ac2cefc9b1 100644 --- a/hw/vfio/platform.c +++ b/hw/vfio/platform.c @@ -22,6 +22,7 @@ #include "hw/vfio/vfio-platform.h" #include "migration/vmstate.h" #include "qemu/error-report.h" +#include "qemu/lockable.h" #include "qemu/main-loop.h" #include "qemu/module.h" #include "qemu/range.h" @@ -216,7 +217,7 @@ static void vfio_intp_interrupt(VFIOINTp *intp) VFIOPlatformDevice *vdev = intp->vdev; bool delay_handling = false; - qemu_mutex_lock(&vdev->intp_mutex); + QEMU_LOCK_GUARD(&vdev->intp_mutex); if (intp->state == VFIO_IRQ_INACTIVE) { QLIST_FOREACH(tmp, &vdev->intp_list, next) { if (tmp->state == VFIO_IRQ_ACTIVE || @@ -236,7 +237,6 @@ static void vfio_intp_interrupt(VFIOINTp *intp) QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue, intp, pqnext); ret = event_notifier_test_and_clear(intp->interrupt); - qemu_mutex_unlock(&vdev->intp_mutex); return; } @@ -266,7 +266,6 @@ static void vfio_intp_interrupt(VFIOINTp *intp) qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->mmap_timeout); } - qemu_mutex_unlock(&vdev->intp_mutex); } /** diff --git a/include/qemu/lockable.h b/include/qemu/lockable.h index 1aeb2cb1a6..b620023141 100644 --- a/include/qemu/lockable.h +++ b/include/qemu/lockable.h @@ -152,7 +152,7 @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuLockable, qemu_lockable_auto_unlock) * } */ #define WITH_QEMU_LOCK_GUARD(x) \ - WITH_QEMU_LOCK_GUARD_((x), qemu_lockable_auto##__COUNTER__) + WITH_QEMU_LOCK_GUARD_((x), glue(qemu_lockable_auto, __COUNTER__)) /** * QEMU_LOCK_GUARD - Lock an object until the end of the scope @@ -169,8 +169,9 @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuLockable, qemu_lockable_auto_unlock) * return; <-- mutex is automatically unlocked * } */ -#define QEMU_LOCK_GUARD(x) \ - g_autoptr(QemuLockable) qemu_lockable_auto##__COUNTER__ = \ +#define QEMU_LOCK_GUARD(x) \ + g_autoptr(QemuLockable) \ + glue(qemu_lockable_auto, __COUNTER__) G_GNUC_UNUSED = \ qemu_lockable_auto_lock(QEMU_MAKE_LOCKABLE((x))) #endif diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h index 9c82683e37..570aa603eb 100644 --- a/include/qemu/rcu.h +++ b/include/qemu/rcu.h @@ -170,7 +170,7 @@ static inline void rcu_read_auto_unlock(RCUReadAuto *r) G_DEFINE_AUTOPTR_CLEANUP_FUNC(RCUReadAuto, rcu_read_auto_unlock) #define WITH_RCU_READ_LOCK_GUARD() \ - WITH_RCU_READ_LOCK_GUARD_(_rcu_read_auto##__COUNTER__) + WITH_RCU_READ_LOCK_GUARD_(glue(_rcu_read_auto, __COUNTER__)) #define WITH_RCU_READ_LOCK_GUARD_(var) \ for (g_autoptr(RCUReadAuto) var = rcu_read_auto_lock(); \ diff --git a/migration/migration.c b/migration/migration.c index 187ac0410c..177cce9e95 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -1653,11 +1653,10 @@ static void migrate_fd_cleanup_bh(void *opaque) void migrate_set_error(MigrationState *s, const Error *error) { - qemu_mutex_lock(&s->error_mutex); + QEMU_LOCK_GUARD(&s->error_mutex); if (!s->error) { s->error = error_copy(error); } - qemu_mutex_unlock(&s->error_mutex); } void migrate_fd_error(MigrationState *s, const Error *error) diff --git a/migration/multifd.c b/migration/multifd.c index cb6a4a3ab8..9123c111a3 100644 --- a/migration/multifd.c +++ b/migration/multifd.c @@ -894,11 +894,11 @@ void multifd_recv_sync_main(void) for (i = 0; i < migrate_multifd_channels(); i++) { MultiFDRecvParams *p = &multifd_recv_state->params[i]; - qemu_mutex_lock(&p->mutex); - if (multifd_recv_state->packet_num < p->packet_num) { - multifd_recv_state->packet_num = p->packet_num; + WITH_QEMU_LOCK_GUARD(&p->mutex) { + if (multifd_recv_state->packet_num < p->packet_num) { + multifd_recv_state->packet_num = p->packet_num; + } } - qemu_mutex_unlock(&p->mutex); trace_multifd_recv_sync_main_signal(p->id); qemu_sem_post(&p->sem_sync); } diff --git a/migration/ram.c b/migration/ram.c index 04f13feb2e..580ec24522 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -1369,7 +1369,7 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset) return NULL; } - qemu_mutex_lock(&rs->src_page_req_mutex); + QEMU_LOCK_GUARD(&rs->src_page_req_mutex); if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) { struct RAMSrcPageRequest *entry = QSIMPLEQ_FIRST(&rs->src_page_requests); @@ -1386,7 +1386,6 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset) migration_consume_urgent_request(); } } - qemu_mutex_unlock(&rs->src_page_req_mutex); return block; } diff --git a/monitor/misc.c b/monitor/misc.c index 6c45fa490f..9723b466cd 100644 --- a/monitor/misc.c +++ b/monitor/misc.c @@ -1473,7 +1473,7 @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id, MonFdsetFd *mon_fdset_fd; AddfdInfo *fdinfo; - qemu_mutex_lock(&mon_fdsets_lock); + QEMU_LOCK_GUARD(&mon_fdsets_lock); if (has_fdset_id) { QLIST_FOREACH(mon_fdset, &mon_fdsets, next) { /* Break if match found or match impossible due to ordering by ID */ @@ -1494,7 +1494,6 @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id, if (fdset_id < 0) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "fdset-id", "a non-negative value"); - qemu_mutex_unlock(&mon_fdsets_lock); return NULL; } /* Use specified fdset ID */ @@ -1545,7 +1544,6 @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id, fdinfo->fdset_id = mon_fdset->id; fdinfo->fd = mon_fdset_fd->fd; - qemu_mutex_unlock(&mon_fdsets_lock); return fdinfo; } diff --git a/tests/qtest/fuzz/fuzz.c b/tests/qtest/fuzz/fuzz.c index 0d78ac8d36..f5c923852e 100644 --- a/tests/qtest/fuzz/fuzz.c +++ b/tests/qtest/fuzz/fuzz.c @@ -91,6 +91,7 @@ static void usage(char *path) printf(" * %s : %s\n", tmp->target->name, tmp->target->description); } + printf("Alternatively, add -target-FUZZ_TARGET to the executable name\n"); exit(0); } @@ -143,18 +144,20 @@ int LLVMFuzzerInitialize(int *argc, char ***argv, char ***envp) module_call_init(MODULE_INIT_QOM); module_call_init(MODULE_INIT_LIBQOS); - if (*argc <= 1) { + target_name = strstr(**argv, "-target-"); + if (target_name) { /* The binary name specifies the target */ + target_name += strlen("-target-"); + } else if (*argc > 1) { /* The target is specified as an argument */ + target_name = (*argv)[1]; + if (!strstr(target_name, "--fuzz-target=")) { + usage(**argv); + } + target_name += strlen("--fuzz-target="); + } else { usage(**argv); } /* Identify the fuzz target */ - target_name = (*argv)[1]; - if (!strstr(target_name, "--fuzz-target=")) { - usage(**argv); - } - - target_name += strlen("--fuzz-target="); - fuzz_target = fuzz_get_target(target_name); if (!fuzz_target) { usage(**argv); diff --git a/ui/spice-display.c b/ui/spice-display.c index 6babe24909..19632fdf6c 100644 --- a/ui/spice-display.c +++ b/ui/spice-display.c @@ -18,6 +18,7 @@ #include "qemu/osdep.h" #include "ui/qemu-spice.h" #include "qemu/timer.h" +#include "qemu/lockable.h" #include "qemu/main-loop.h" #include "qemu/option.h" #include "qemu/queue.h" @@ -483,12 +484,12 @@ void qemu_spice_display_refresh(SimpleSpiceDisplay *ssd) { graphic_hw_update(ssd->dcl.con); - qemu_mutex_lock(&ssd->lock); - if (QTAILQ_EMPTY(&ssd->updates) && ssd->ds) { - qemu_spice_create_update(ssd); - ssd->notify++; + WITH_QEMU_LOCK_GUARD(&ssd->lock) { + if (QTAILQ_EMPTY(&ssd->updates) && ssd->ds) { + qemu_spice_create_update(ssd); + ssd->notify++; + } } - qemu_mutex_unlock(&ssd->lock); trace_qemu_spice_display_refresh(ssd->qxl.id, ssd->notify); if (ssd->notify) { @@ -580,7 +581,7 @@ static int interface_get_cursor_command(QXLInstance *sin, QXLCommandExt *ext) SimpleSpiceDisplay *ssd = container_of(sin, SimpleSpiceDisplay, qxl); int ret; - qemu_mutex_lock(&ssd->lock); + QEMU_LOCK_GUARD(&ssd->lock); if (ssd->ptr_define) { *ext = ssd->ptr_define->ext; ssd->ptr_define = NULL; @@ -592,7 +593,6 @@ static int interface_get_cursor_command(QXLInstance *sin, QXLCommandExt *ext) } else { ret = false; } - qemu_mutex_unlock(&ssd->lock); return ret; } diff --git a/util/log.c b/util/log.c index 2da6cb31dc..bdb3d712e8 100644 --- a/util/log.c +++ b/util/log.c @@ -25,6 +25,7 @@ #include "qemu/cutils.h" #include "trace/control.h" #include "qemu/thread.h" +#include "qemu/lockable.h" static char *logfilename; static QemuMutex qemu_logfile_mutex; @@ -94,7 +95,7 @@ void qemu_set_log(int log_flags) if (qemu_loglevel && (!is_daemonized() || logfilename)) { need_to_open_file = true; } - qemu_mutex_lock(&qemu_logfile_mutex); + QEMU_LOCK_GUARD(&qemu_logfile_mutex); if (qemu_logfile && !need_to_open_file) { logfile = qemu_logfile; atomic_rcu_set(&qemu_logfile, NULL); @@ -136,7 +137,6 @@ void qemu_set_log(int log_flags) } atomic_rcu_set(&qemu_logfile, logfile); } - qemu_mutex_unlock(&qemu_logfile_mutex); } void qemu_log_needs_buffers(void) diff --git a/util/qemu-timer.c b/util/qemu-timer.c index d548d3c1ad..b6575a2cd5 100644 --- a/util/qemu-timer.c +++ b/util/qemu-timer.c @@ -459,17 +459,16 @@ void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time) QEMUTimerList *timer_list = ts->timer_list; bool rearm; - qemu_mutex_lock(&timer_list->active_timers_lock); - if (ts->expire_time == -1 || ts->expire_time > expire_time) { - if (ts->expire_time != -1) { - timer_del_locked(timer_list, ts); + WITH_QEMU_LOCK_GUARD(&timer_list->active_timers_lock) { + if (ts->expire_time == -1 || ts->expire_time > expire_time) { + if (ts->expire_time != -1) { + timer_del_locked(timer_list, ts); + } + rearm = timer_mod_ns_locked(timer_list, ts, expire_time); + } else { + rearm = false; } - rearm = timer_mod_ns_locked(timer_list, ts, expire_time); - } else { - rearm = false; } - qemu_mutex_unlock(&timer_list->active_timers_lock); - if (rearm) { timerlist_rearm(timer_list); } diff --git a/util/rcu.c b/util/rcu.c index 177a675619..60a37f72c3 100644 --- a/util/rcu.c +++ b/util/rcu.c @@ -31,6 +31,7 @@ #include "qemu/atomic.h" #include "qemu/thread.h" #include "qemu/main-loop.h" +#include "qemu/lockable.h" #if defined(CONFIG_MALLOC_TRIM) #include <malloc.h> #endif @@ -141,14 +142,14 @@ static void wait_for_readers(void) void synchronize_rcu(void) { - qemu_mutex_lock(&rcu_sync_lock); + QEMU_LOCK_GUARD(&rcu_sync_lock); /* Write RCU-protected pointers before reading p_rcu_reader->ctr. * Pairs with smp_mb_placeholder() in rcu_read_lock(). */ smp_mb_global(); - qemu_mutex_lock(&rcu_registry_lock); + QEMU_LOCK_GUARD(&rcu_registry_lock); if (!QLIST_EMPTY(®istry)) { /* In either case, the atomic_mb_set below blocks stores that free * old RCU-protected pointers. @@ -169,9 +170,6 @@ void synchronize_rcu(void) wait_for_readers(); } - - qemu_mutex_unlock(&rcu_registry_lock); - qemu_mutex_unlock(&rcu_sync_lock); } diff --git a/util/thread-pool.c b/util/thread-pool.c index 4ed9b89ab2..d763cea505 100644 --- a/util/thread-pool.c +++ b/util/thread-pool.c @@ -210,7 +210,7 @@ static void thread_pool_cancel(BlockAIOCB *acb) trace_thread_pool_cancel(elem, elem->common.opaque); - qemu_mutex_lock(&pool->lock); + QEMU_LOCK_GUARD(&pool->lock); if (elem->state == THREAD_QUEUED && /* No thread has yet started working on elem. we can try to "steal" * the item from the worker if we can get a signal from the @@ -225,7 +225,6 @@ static void thread_pool_cancel(BlockAIOCB *acb) elem->ret = -ECANCELED; } - qemu_mutex_unlock(&pool->lock); } static AioContext *thread_pool_get_aio_context(BlockAIOCB *acb) diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c index ddd9a96e76..e399e330e2 100644 --- a/util/vfio-helpers.c +++ b/util/vfio-helpers.c @@ -21,6 +21,7 @@ #include "standard-headers/linux/pci_regs.h" #include "qemu/event_notifier.h" #include "qemu/vfio-helpers.h" +#include "qemu/lockable.h" #include "trace.h" #define QEMU_VFIO_DEBUG 0 @@ -667,14 +668,12 @@ int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s) .size = QEMU_VFIO_IOVA_MAX - s->high_water_mark, }; trace_qemu_vfio_dma_reset_temporary(s); - qemu_mutex_lock(&s->lock); + QEMU_LOCK_GUARD(&s->lock); if (ioctl(s->container, VFIO_IOMMU_UNMAP_DMA, &unmap)) { error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno)); - qemu_mutex_unlock(&s->lock); return -errno; } s->high_water_mark = QEMU_VFIO_IOVA_MAX; - qemu_mutex_unlock(&s->lock); return 0; } |