diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2016-09-19 11:27:46 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2016-10-24 11:30:56 +0200 |
commit | e11131b02584ddea93deef1caad8e4f945dd7340 (patch) | |
tree | 9c984ecb69f30561b5da12d4a3b16deecc94f4c5 | |
parent | 374293ca6fb060f0302e5ca76ddccbc2bbb075c9 (diff) |
rcu: simplify memory barriers
Thanks to the acquire semantics of qemu_event_reset and qemu_event_wait,
some memory barriers can be removed.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | util/rcu.c | 11 |
1 files changed, 5 insertions, 6 deletions
diff --git a/util/rcu.c b/util/rcu.c index bceb3e4720..9adc5e4a36 100644 --- a/util/rcu.c +++ b/util/rcu.c @@ -82,14 +82,16 @@ static void wait_for_readers(void) /* Instead of using atomic_mb_set for index->waiting, and * atomic_mb_read for index->ctr, memory barriers are placed * manually since writes to different threads are independent. - * atomic_mb_set has a smp_wmb before... + * qemu_event_reset has acquire semantics, so no memory barrier + * is needed here. */ - smp_wmb(); QLIST_FOREACH(index, ®istry, node) { atomic_set(&index->waiting, true); } - /* ... and a smp_mb after. */ + /* Here, order the stores to index->waiting before the + * loads of index->ctr. + */ smp_mb(); QLIST_FOREACH_SAFE(index, ®istry, node, tmp) { @@ -104,9 +106,6 @@ static void wait_for_readers(void) } } - /* atomic_mb_read has smp_rmb after. */ - smp_rmb(); - if (QLIST_EMPTY(®istry)) { break; } |