aboutsummaryrefslogtreecommitdiff
path: root/util/qemu-thread-win32.c
diff options
context:
space:
mode:
Diffstat (limited to 'util/qemu-thread-win32.c')
-rw-r--r--util/qemu-thread-win32.c136
1 files changed, 15 insertions, 121 deletions
diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c
index 29c3e4dd85..59befd5202 100644
--- a/util/qemu-thread-win32.c
+++ b/util/qemu-thread-win32.c
@@ -10,6 +10,11 @@
* See the COPYING file in the top-level directory.
*
*/
+
+#ifndef _WIN32_WINNT
+#define _WIN32_WINNT 0x0600
+#endif
+
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qemu/thread.h"
@@ -39,44 +44,30 @@ static void error_exit(int err, const char *msg)
void qemu_mutex_init(QemuMutex *mutex)
{
- mutex->owner = 0;
- InitializeCriticalSection(&mutex->lock);
+ InitializeSRWLock(&mutex->lock);
}
void qemu_mutex_destroy(QemuMutex *mutex)
{
- assert(mutex->owner == 0);
- DeleteCriticalSection(&mutex->lock);
+ InitializeSRWLock(&mutex->lock);
}
void qemu_mutex_lock(QemuMutex *mutex)
{
- EnterCriticalSection(&mutex->lock);
-
- /* Win32 CRITICAL_SECTIONs are recursive. Assert that we're not
- * using them as such.
- */
- assert(mutex->owner == 0);
- mutex->owner = GetCurrentThreadId();
+ AcquireSRWLockExclusive(&mutex->lock);
}
int qemu_mutex_trylock(QemuMutex *mutex)
{
int owned;
- owned = TryEnterCriticalSection(&mutex->lock);
- if (owned) {
- assert(mutex->owner == 0);
- mutex->owner = GetCurrentThreadId();
- }
+ owned = TryAcquireSRWLockExclusive(&mutex->lock);
return !owned;
}
void qemu_mutex_unlock(QemuMutex *mutex)
{
- assert(mutex->owner == GetCurrentThreadId());
- mutex->owner = 0;
- LeaveCriticalSection(&mutex->lock);
+ ReleaseSRWLockExclusive(&mutex->lock);
}
void qemu_rec_mutex_init(QemuRecMutex *mutex)
@@ -107,124 +98,27 @@ void qemu_rec_mutex_unlock(QemuRecMutex *mutex)
void qemu_cond_init(QemuCond *cond)
{
memset(cond, 0, sizeof(*cond));
-
- cond->sema = CreateSemaphore(NULL, 0, LONG_MAX, NULL);
- if (!cond->sema) {
- error_exit(GetLastError(), __func__);
- }
- cond->continue_event = CreateEvent(NULL, /* security */
- FALSE, /* auto-reset */
- FALSE, /* not signaled */
- NULL); /* name */
- if (!cond->continue_event) {
- error_exit(GetLastError(), __func__);
- }
+ InitializeConditionVariable(&cond->var);
}
void qemu_cond_destroy(QemuCond *cond)
{
- BOOL result;
- result = CloseHandle(cond->continue_event);
- if (!result) {
- error_exit(GetLastError(), __func__);
- }
- cond->continue_event = 0;
- result = CloseHandle(cond->sema);
- if (!result) {
- error_exit(GetLastError(), __func__);
- }
- cond->sema = 0;
+ InitializeConditionVariable(&cond->var);
}
void qemu_cond_signal(QemuCond *cond)
{
- DWORD result;
-
- /*
- * Signal only when there are waiters. cond->waiters is
- * incremented by pthread_cond_wait under the external lock,
- * so we are safe about that.
- */
- if (cond->waiters == 0) {
- return;
- }
-
- /*
- * Waiting threads decrement it outside the external lock, but
- * only if another thread is executing pthread_cond_broadcast and
- * has the mutex. So, it also cannot be decremented concurrently
- * with this particular access.
- */
- cond->target = cond->waiters - 1;
- result = SignalObjectAndWait(cond->sema, cond->continue_event,
- INFINITE, FALSE);
- if (result == WAIT_ABANDONED || result == WAIT_FAILED) {
- error_exit(GetLastError(), __func__);
- }
+ WakeConditionVariable(&cond->var);
}
void qemu_cond_broadcast(QemuCond *cond)
{
- BOOLEAN result;
- /*
- * As in pthread_cond_signal, access to cond->waiters and
- * cond->target is locked via the external mutex.
- */
- if (cond->waiters == 0) {
- return;
- }
-
- cond->target = 0;
- result = ReleaseSemaphore(cond->sema, cond->waiters, NULL);
- if (!result) {
- error_exit(GetLastError(), __func__);
- }
-
- /*
- * At this point all waiters continue. Each one takes its
- * slice of the semaphore. Now it's our turn to wait: Since
- * the external mutex is held, no thread can leave cond_wait,
- * yet. For this reason, we can be sure that no thread gets
- * a chance to eat *more* than one slice. OTOH, it means
- * that the last waiter must send us a wake-up.
- */
- WaitForSingleObject(cond->continue_event, INFINITE);
+ WakeAllConditionVariable(&cond->var);
}
void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
{
- /*
- * This access is protected under the mutex.
- */
- cond->waiters++;
-
- /*
- * Unlock external mutex and wait for signal.
- * NOTE: we've held mutex locked long enough to increment
- * waiters count above, so there's no problem with
- * leaving mutex unlocked before we wait on semaphore.
- */
- qemu_mutex_unlock(mutex);
- WaitForSingleObject(cond->sema, INFINITE);
-
- /* Now waiters must rendez-vous with the signaling thread and
- * let it continue. For cond_broadcast this has heavy contention
- * and triggers thundering herd. So goes life.
- *
- * Decrease waiters count. The mutex is not taken, so we have
- * to do this atomically.
- *
- * All waiters contend for the mutex at the end of this function
- * until the signaling thread relinquishes it. To ensure
- * each waiter consumes exactly one slice of the semaphore,
- * the signaling thread stops until it is told by the last
- * waiter that it can go on.
- */
- if (InterlockedDecrement(&cond->waiters) == cond->target) {
- SetEvent(cond->continue_event);
- }
-
- qemu_mutex_lock(mutex);
+ SleepConditionVariableSRW(&cond->var, &mutex->lock, INFINITE, 0);
}
void qemu_sem_init(QemuSemaphore *sem, int init)