diff options
author | Fam Zheng <famz@redhat.com> | 2017-07-04 20:23:25 +0800 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2017-07-04 14:39:28 +0200 |
commit | c096358e747e88fc7364e40e3c354ee0bb683960 (patch) | |
tree | 241c11c021450913505a090cf2cf5629c1488a41 /util/qemu-thread-posix.c | |
parent | 025bdeab3c163aee9604a60b2332a5fcbcc00f8d (diff) |
qemu-thread: Assert locks are initialized before using
Not all platforms check whether a lock is initialized before used. In
particular Linux seems to be more permissive than OSX.
Check initialization state explicitly in our code to catch such bugs
earlier.
Signed-off-by: Fam Zheng <famz@redhat.com>
Message-Id: <20170704122325.25634-1-famz@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'util/qemu-thread-posix.c')
-rw-r--r-- | util/qemu-thread-posix.c | 27 |
1 files changed, 27 insertions, 0 deletions
diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c index eacd99e497..4e95d272dc 100644 --- a/util/qemu-thread-posix.c +++ b/util/qemu-thread-posix.c @@ -43,12 +43,15 @@ void qemu_mutex_init(QemuMutex *mutex) err = pthread_mutex_init(&mutex->lock, NULL); if (err) error_exit(err, __func__); + mutex->initialized = true; } void qemu_mutex_destroy(QemuMutex *mutex) { int err; + assert(mutex->initialized); + mutex->initialized = false; err = pthread_mutex_destroy(&mutex->lock); if (err) error_exit(err, __func__); @@ -58,6 +61,7 @@ void qemu_mutex_lock(QemuMutex *mutex) { int err; + assert(mutex->initialized); err = pthread_mutex_lock(&mutex->lock); if (err) error_exit(err, __func__); @@ -69,6 +73,7 @@ int qemu_mutex_trylock(QemuMutex *mutex) { int err; + assert(mutex->initialized); err = pthread_mutex_trylock(&mutex->lock); if (err == 0) { trace_qemu_mutex_locked(mutex); @@ -84,6 +89,7 @@ void qemu_mutex_unlock(QemuMutex *mutex) { int err; + assert(mutex->initialized); trace_qemu_mutex_unlocked(mutex); err = pthread_mutex_unlock(&mutex->lock); if (err) @@ -102,6 +108,7 @@ void qemu_rec_mutex_init(QemuRecMutex *mutex) if (err) { error_exit(err, __func__); } + mutex->initialized = true; } void qemu_cond_init(QemuCond *cond) @@ -111,12 +118,15 @@ void qemu_cond_init(QemuCond *cond) err = pthread_cond_init(&cond->cond, NULL); if (err) error_exit(err, __func__); + cond->initialized = true; } void qemu_cond_destroy(QemuCond *cond) { int err; + assert(cond->initialized); + cond->initialized = false; err = pthread_cond_destroy(&cond->cond); if (err) error_exit(err, __func__); @@ -126,6 +136,7 @@ void qemu_cond_signal(QemuCond *cond) { int err; + assert(cond->initialized); err = pthread_cond_signal(&cond->cond); if (err) error_exit(err, __func__); @@ -135,6 +146,7 @@ void qemu_cond_broadcast(QemuCond *cond) { int err; + assert(cond->initialized); err = pthread_cond_broadcast(&cond->cond); if (err) error_exit(err, __func__); @@ -144,6 +156,7 @@ void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex) { int err; + assert(cond->initialized); trace_qemu_mutex_unlocked(mutex); err = pthread_cond_wait(&cond->cond, &mutex->lock); trace_qemu_mutex_locked(mutex); @@ -174,12 +187,15 @@ void qemu_sem_init(QemuSemaphore *sem, int init) error_exit(errno, __func__); } #endif + sem->initialized = true; } void qemu_sem_destroy(QemuSemaphore *sem) { int rc; + assert(sem->initialized); + sem->initialized = false; #if defined(__APPLE__) || defined(__NetBSD__) rc = pthread_cond_destroy(&sem->cond); if (rc < 0) { @@ -201,6 +217,7 @@ void qemu_sem_post(QemuSemaphore *sem) { int rc; + assert(sem->initialized); #if defined(__APPLE__) || defined(__NetBSD__) pthread_mutex_lock(&sem->lock); if (sem->count == UINT_MAX) { @@ -238,6 +255,7 @@ int qemu_sem_timedwait(QemuSemaphore *sem, int ms) int rc; struct timespec ts; + assert(sem->initialized); #if defined(__APPLE__) || defined(__NetBSD__) rc = 0; compute_abs_deadline(&ts, ms); @@ -285,6 +303,7 @@ void qemu_sem_wait(QemuSemaphore *sem) { int rc; + assert(sem->initialized); #if defined(__APPLE__) || defined(__NetBSD__) pthread_mutex_lock(&sem->lock); while (sem->count == 0) { @@ -310,6 +329,7 @@ void qemu_sem_wait(QemuSemaphore *sem) #else static inline void qemu_futex_wake(QemuEvent *ev, int n) { + assert(ev->initialized); pthread_mutex_lock(&ev->lock); if (n == 1) { pthread_cond_signal(&ev->cond); @@ -321,6 +341,7 @@ static inline void qemu_futex_wake(QemuEvent *ev, int n) static inline void qemu_futex_wait(QemuEvent *ev, unsigned val) { + assert(ev->initialized); pthread_mutex_lock(&ev->lock); if (ev->value == val) { pthread_cond_wait(&ev->cond, &ev->lock); @@ -355,10 +376,13 @@ void qemu_event_init(QemuEvent *ev, bool init) #endif ev->value = (init ? EV_SET : EV_FREE); + ev->initialized = true; } void qemu_event_destroy(QemuEvent *ev) { + assert(ev->initialized); + ev->initialized = false; #ifndef __linux__ pthread_mutex_destroy(&ev->lock); pthread_cond_destroy(&ev->cond); @@ -370,6 +394,7 @@ void qemu_event_set(QemuEvent *ev) /* qemu_event_set has release semantics, but because it *loads* * ev->value we need a full memory barrier here. */ + assert(ev->initialized); smp_mb(); if (atomic_read(&ev->value) != EV_SET) { if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) { @@ -383,6 +408,7 @@ void qemu_event_reset(QemuEvent *ev) { unsigned value; + assert(ev->initialized); value = atomic_read(&ev->value); smp_mb_acquire(); if (value == EV_SET) { @@ -398,6 +424,7 @@ void qemu_event_wait(QemuEvent *ev) { unsigned value; + assert(ev->initialized); value = atomic_read(&ev->value); smp_mb_acquire(); if (value != EV_SET) { |