aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2020-05-04 20:35:59 +0100
committerPeter Maydell <peter.maydell@linaro.org>2020-05-04 20:35:59 +0100
commit5c7c46fea9f745956fcc7bea5c3af8380a599ee4 (patch)
tree430973a6470b5f1a7718715d04401b03c4ef201b /hw
parent5375af3cd7b8adcc10c18d8083b7be63976c9645 (diff)
parent08b689aa6b521964b8275dd7a2564aefa5d68129 (diff)
Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging
Pull request v2: * Fixed stray slirp submodule change [Peter] Fixes for the lock guard macros, code conversions to the lock guard macros, and support for selecting fuzzer targets with argv[0]. # gpg: Signature made Mon 04 May 2020 16:11:11 BST # gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full] # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" [full] # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * remotes/stefanha/tags/block-pull-request: lockable: Replace locks with lock guard macros lockable: replaced locks with lock guard macros where appropriate lockable: fix __COUNTER__ macro to be referenced properly fuzz: select fuzz target using executable name Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw')
-rw-r--r--hw/display/qxl.c43
-rw-r--r--hw/hyperv/hyperv.c15
-rw-r--r--hw/rdma/rdma_backend.c50
-rw-r--r--hw/rdma/rdma_rm.c3
-rw-r--r--hw/vfio/platform.c5
5 files changed, 55 insertions, 61 deletions
diff --git a/hw/display/qxl.c b/hw/display/qxl.c
index 227da69a50..d5627119ec 100644
--- a/hw/display/qxl.c
+++ b/hw/display/qxl.c
@@ -478,18 +478,19 @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
cmd->u.surface_create.stride);
return 1;
}
- qemu_mutex_lock(&qxl->track_lock);
- if (cmd->type == QXL_SURFACE_CMD_CREATE) {
- qxl->guest_surfaces.cmds[id] = ext->cmd.data;
- qxl->guest_surfaces.count++;
- if (qxl->guest_surfaces.max < qxl->guest_surfaces.count)
- qxl->guest_surfaces.max = qxl->guest_surfaces.count;
- }
- if (cmd->type == QXL_SURFACE_CMD_DESTROY) {
- qxl->guest_surfaces.cmds[id] = 0;
- qxl->guest_surfaces.count--;
+ WITH_QEMU_LOCK_GUARD(&qxl->track_lock) {
+ if (cmd->type == QXL_SURFACE_CMD_CREATE) {
+ qxl->guest_surfaces.cmds[id] = ext->cmd.data;
+ qxl->guest_surfaces.count++;
+ if (qxl->guest_surfaces.max < qxl->guest_surfaces.count) {
+ qxl->guest_surfaces.max = qxl->guest_surfaces.count;
+ }
+ }
+ if (cmd->type == QXL_SURFACE_CMD_DESTROY) {
+ qxl->guest_surfaces.cmds[id] = 0;
+ qxl->guest_surfaces.count--;
+ }
}
- qemu_mutex_unlock(&qxl->track_lock);
break;
}
case QXL_CMD_CURSOR:
@@ -958,10 +959,9 @@ static void interface_update_area_complete(QXLInstance *sin,
int i;
int qxl_i;
- qemu_mutex_lock(&qxl->ssd.lock);
+ QEMU_LOCK_GUARD(&qxl->ssd.lock);
if (surface_id != 0 || !num_updated_rects ||
!qxl->render_update_cookie_num) {
- qemu_mutex_unlock(&qxl->ssd.lock);
return;
}
trace_qxl_interface_update_area_complete(qxl->id, surface_id, dirty->left,
@@ -980,7 +980,6 @@ static void interface_update_area_complete(QXLInstance *sin,
* Don't bother copying or scheduling the bh since we will flip
* the whole area anyway on completion of the update_area async call
*/
- qemu_mutex_unlock(&qxl->ssd.lock);
return;
}
qxl_i = qxl->num_dirty_rects;
@@ -991,7 +990,6 @@ static void interface_update_area_complete(QXLInstance *sin,
trace_qxl_interface_update_area_complete_schedule_bh(qxl->id,
qxl->num_dirty_rects);
qemu_bh_schedule(qxl->update_area_bh);
- qemu_mutex_unlock(&qxl->ssd.lock);
}
/* called from spice server thread context only */
@@ -1694,15 +1692,14 @@ static void ioport_write(void *opaque, hwaddr addr,
case QXL_IO_MONITORS_CONFIG_ASYNC:
async_common:
async = QXL_ASYNC;
- qemu_mutex_lock(&d->async_lock);
- if (d->current_async != QXL_UNDEFINED_IO) {
- qxl_set_guest_bug(d, "%d async started before last (%d) complete",
- io_port, d->current_async);
- qemu_mutex_unlock(&d->async_lock);
- return;
+ WITH_QEMU_LOCK_GUARD(&d->async_lock) {
+ if (d->current_async != QXL_UNDEFINED_IO) {
+ qxl_set_guest_bug(d, "%d async started before last (%d) complete",
+ io_port, d->current_async);
+ return;
+ }
+ d->current_async = orig_io_port;
}
- d->current_async = orig_io_port;
- qemu_mutex_unlock(&d->async_lock);
break;
default:
break;
diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
index 8ca3706f5b..4ddafe1de1 100644
--- a/hw/hyperv/hyperv.c
+++ b/hw/hyperv/hyperv.c
@@ -15,6 +15,7 @@
#include "sysemu/kvm.h"
#include "qemu/bitops.h"
#include "qemu/error-report.h"
+#include "qemu/lockable.h"
#include "qemu/queue.h"
#include "qemu/rcu.h"
#include "qemu/rcu_queue.h"
@@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
int ret;
MsgHandler *mh;
- qemu_mutex_lock(&handlers_mutex);
+ QEMU_LOCK_GUARD(&handlers_mutex);
QLIST_FOREACH(mh, &msg_handlers, link) {
if (mh->conn_id == conn_id) {
if (handler) {
@@ -501,7 +502,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
g_free_rcu(mh, rcu);
ret = 0;
}
- goto unlock;
+ return ret;
}
}
@@ -515,8 +516,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
} else {
ret = -ENOENT;
}
-unlock:
- qemu_mutex_unlock(&handlers_mutex);
+
return ret;
}
@@ -565,7 +565,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
int ret;
EventFlagHandler *handler;
- qemu_mutex_lock(&handlers_mutex);
+ QEMU_LOCK_GUARD(&handlers_mutex);
QLIST_FOREACH(handler, &event_flag_handlers, link) {
if (handler->conn_id == conn_id) {
if (notifier) {
@@ -575,7 +575,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
g_free_rcu(handler, rcu);
ret = 0;
}
- goto unlock;
+ return ret;
}
}
@@ -588,8 +588,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
} else {
ret = -ENOENT;
}
-unlock:
- qemu_mutex_unlock(&handlers_mutex);
+
return ret;
}
diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
index 3dd39fe1a7..db7e5c8be5 100644
--- a/hw/rdma/rdma_backend.c
+++ b/hw/rdma/rdma_backend.c
@@ -95,36 +95,36 @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
struct ibv_wc wc[2];
RdmaProtectedGSList *cqe_ctx_list;
- qemu_mutex_lock(&rdma_dev_res->lock);
- do {
- ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
+ WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
+ do {
+ ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
- trace_rdma_poll_cq(ne, ibcq);
+ trace_rdma_poll_cq(ne, ibcq);
- for (i = 0; i < ne; i++) {
- bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
- if (unlikely(!bctx)) {
- rdma_error_report("No matching ctx for req %"PRId64,
- wc[i].wr_id);
- continue;
- }
+ for (i = 0; i < ne; i++) {
+ bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
+ if (unlikely(!bctx)) {
+ rdma_error_report("No matching ctx for req %"PRId64,
+ wc[i].wr_id);
+ continue;
+ }
- comp_handler(bctx->up_ctx, &wc[i]);
+ comp_handler(bctx->up_ctx, &wc[i]);
- if (bctx->backend_qp) {
- cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
- } else {
- cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
- }
+ if (bctx->backend_qp) {
+ cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
+ } else {
+ cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
+ }
- rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
- rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
- g_free(bctx);
- }
- total_ne += ne;
- } while (ne > 0);
- atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
- qemu_mutex_unlock(&rdma_dev_res->lock);
+ rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
+ rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
+ g_free(bctx);
+ }
+ total_ne += ne;
+ } while (ne > 0);
+ atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
+ }
if (ne < 0) {
rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c
index 7e9ea283c9..60957f88db 100644
--- a/hw/rdma/rdma_rm.c
+++ b/hw/rdma/rdma_rm.c
@@ -147,14 +147,13 @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle)
{
trace_rdma_res_tbl_dealloc(tbl->name, handle);
- qemu_mutex_lock(&tbl->lock);
+ QEMU_LOCK_GUARD(&tbl->lock);
if (handle < tbl->tbl_sz) {
clear_bit(handle, tbl->bitmap);
tbl->used--;
}
- qemu_mutex_unlock(&tbl->lock);
}
int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c
index 6b2952c034..ac2cefc9b1 100644
--- a/hw/vfio/platform.c
+++ b/hw/vfio/platform.c
@@ -22,6 +22,7 @@
#include "hw/vfio/vfio-platform.h"
#include "migration/vmstate.h"
#include "qemu/error-report.h"
+#include "qemu/lockable.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "qemu/range.h"
@@ -216,7 +217,7 @@ static void vfio_intp_interrupt(VFIOINTp *intp)
VFIOPlatformDevice *vdev = intp->vdev;
bool delay_handling = false;
- qemu_mutex_lock(&vdev->intp_mutex);
+ QEMU_LOCK_GUARD(&vdev->intp_mutex);
if (intp->state == VFIO_IRQ_INACTIVE) {
QLIST_FOREACH(tmp, &vdev->intp_list, next) {
if (tmp->state == VFIO_IRQ_ACTIVE ||
@@ -236,7 +237,6 @@ static void vfio_intp_interrupt(VFIOINTp *intp)
QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue,
intp, pqnext);
ret = event_notifier_test_and_clear(intp->interrupt);
- qemu_mutex_unlock(&vdev->intp_mutex);
return;
}
@@ -266,7 +266,6 @@ static void vfio_intp_interrupt(VFIOINTp *intp)
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
vdev->mmap_timeout);
}
- qemu_mutex_unlock(&vdev->intp_mutex);
}
/**