diff options
Diffstat (limited to 'qemu-barrier.h')
-rw-r--r-- | qemu-barrier.h | 14 |
1 files changed, 12 insertions, 2 deletions
diff --git a/qemu-barrier.h b/qemu-barrier.h index f0b842e5b2..7e11197814 100644 --- a/qemu-barrier.h +++ b/qemu-barrier.h @@ -7,12 +7,13 @@ #if defined(__i386__) /* - * Because of the strongly ordered x86 storage model, wmb() is a nop + * Because of the strongly ordered x86 storage model, wmb() and rmb() are nops * on x86(well, a compiler barrier only). Well, at least as long as * qemu doesn't do accesses to write-combining memory or non-temporal * load/stores from C code. */ #define smp_wmb() barrier() +#define smp_rmb() barrier() /* * We use GCC builtin if it's available, as that can use * mfence on 32 bit as well, e.g. if built with -march=pentium-m. @@ -27,6 +28,7 @@ #elif defined(__x86_64__) #define smp_wmb() barrier() +#define smp_rmb() barrier() #define smp_mb() asm volatile("mfence" ::: "memory") #elif defined(_ARCH_PPC) @@ -37,6 +39,13 @@ * each other */ #define smp_wmb() asm volatile("eieio" ::: "memory") + +#if defined(__powerpc64__) +#define smp_rmb() asm volatile("lwsync" ::: "memory") +#else +#define smp_rmb() asm volatile("sync" ::: "memory") +#endif + #define smp_mb() asm volatile("sync" ::: "memory") #else @@ -45,10 +54,11 @@ * For (host) platforms we don't have explicit barrier definitions * for, we use the gcc __sync_synchronize() primitive to generate a * full barrier. This should be safe on all platforms, though it may - * be overkill for wmb(). + * be overkill for wmb() and rmb(). */ #define smp_wmb() __sync_synchronize() #define smp_mb() __sync_synchronize() +#define smp_rmb() __sync_synchronize() #endif |