diff options
Diffstat (limited to 'qemu-barrier.h')
-rw-r--r-- | qemu-barrier.h | 23 |
1 files changed, 20 insertions, 3 deletions
diff --git a/qemu-barrier.h b/qemu-barrier.h index c11bb2b59f..f0b842e5b2 100644 --- a/qemu-barrier.h +++ b/qemu-barrier.h @@ -4,7 +4,7 @@ /* Compiler barrier */ #define barrier() asm volatile("" ::: "memory") -#if defined(__i386__) || defined(__x86_64__) +#if defined(__i386__) /* * Because of the strongly ordered x86 storage model, wmb() is a nop @@ -13,15 +13,31 @@ * load/stores from C code. */ #define smp_wmb() barrier() +/* + * We use GCC builtin if it's available, as that can use + * mfence on 32 bit as well, e.g. if built with -march=pentium-m. + * However, on i386, there seem to be known bugs as recently as 4.3. + * */ +#if defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 +#define smp_mb() __sync_synchronize() +#else +#define smp_mb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory") +#endif + +#elif defined(__x86_64__) + +#define smp_wmb() barrier() +#define smp_mb() asm volatile("mfence" ::: "memory") #elif defined(_ARCH_PPC) /* - * We use an eieio() for a wmb() on powerpc. This assumes we don't + * We use an eieio() for wmb() on powerpc. This assumes we don't * need to order cacheable and non-cacheable stores with respect to * each other */ #define smp_wmb() asm volatile("eieio" ::: "memory") +#define smp_mb() asm volatile("sync" ::: "memory") #else @@ -29,9 +45,10 @@ * For (host) platforms we don't have explicit barrier definitions * for, we use the gcc __sync_synchronize() primitive to generate a * full barrier. This should be safe on all platforms, though it may - * be overkill. + * be overkill for wmb(). */ #define smp_wmb() __sync_synchronize() +#define smp_mb() __sync_synchronize() #endif |