aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2016-02-09 19:34:46 +0000
committerPeter Maydell <peter.maydell@linaro.org>2016-02-09 19:34:46 +0000
commitc9f19dff101e2c2cf3fa3967eceec2833e845e40 (patch)
tree5bcc3ba8281fc7902d3c99bbbf1a7097384c711b /include
parentf075c89f0a9cb31daf38892371d2822177505706 (diff)
parent150dcd1aed6f9ebcf370dbb9b666e7d7c6d908e2 (diff)
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
* switch to C11 atomics (Alex) * Coverity fixes for IPMI (Corey), i386 (Paolo), qemu-char (Paolo) * at long last, fail on wrong .pc files if -m32 is in use (Daniel) * qemu-char regression fix (Daniel) * SAS1068 device (Paolo) * memory region docs improvements (Peter) * target-i386 cleanups (Richard) * qemu-nbd docs improvements (Sitsofe) * thread-safe memory hotplug (Stefan) # gpg: Signature made Tue 09 Feb 2016 16:09:30 GMT using RSA key ID 78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" * remotes/bonzini/tags/for-upstream: (33 commits) qemu-char, io: fix ordering of arguments for UDP socket creation MAINTAINERS: add all-match entry for qemu-devel@ get_maintainer.pl: fall back to git if only lists are found target-i386: fix PSE36 mode docs/memory.txt: Improve list of different memory regions ipmi_bmc_sim: Add break to correct watchdog NMI check ipmi_bmc_sim: Fix off by one in check. ipmi: do not take/drop iothread lock target-i386: Deconstruct the cpu_T array target-i386: Tidy gen_add_A0_im target-i386: Rewrite leave target-i386: Rewrite gen_enter inline target-i386: Use gen_lea_v_seg in pusha/popa target-i386: Access segs via TCG registers target-i386: Use gen_lea_v_seg in stack subroutines target-i386: Use gen_lea_v_seg in gen_lea_modrm target-i386: Introduce mo_stacksize target-i386: Create gen_lea_v_seg char: fix repeated registration of tcp chardev I/O handlers kvm-all: trace: strerror fixup ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'include')
-rw-r--r--include/exec/ram_addr.h193
-rw-r--r--include/hw/pci/pci_ids.h1
-rw-r--r--include/hw/scsi/scsi.h3
-rw-r--r--include/qemu/atomic.h192
4 files changed, 304 insertions, 85 deletions
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 606e277092..b1413a1286 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -49,13 +49,43 @@ static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
return (char *)block->host + offset;
}
+/* The dirty memory bitmap is split into fixed-size blocks to allow growth
+ * under RCU. The bitmap for a block can be accessed as follows:
+ *
+ * rcu_read_lock();
+ *
+ * DirtyMemoryBlocks *blocks =
+ * atomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
+ *
+ * ram_addr_t idx = (addr >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
+ * unsigned long *block = blocks.blocks[idx];
+ * ...access block bitmap...
+ *
+ * rcu_read_unlock();
+ *
+ * Remember to check for the end of the block when accessing a range of
+ * addresses. Move on to the next block if you reach the end.
+ *
+ * Organization into blocks allows dirty memory to grow (but not shrink) under
+ * RCU. When adding new RAMBlocks requires the dirty memory to grow, a new
+ * DirtyMemoryBlocks array is allocated with pointers to existing blocks kept
+ * the same. Other threads can safely access existing blocks while dirty
+ * memory is being grown. When no threads are using the old DirtyMemoryBlocks
+ * anymore it is freed by RCU (but the underlying blocks stay because they are
+ * pointed to from the new DirtyMemoryBlocks).
+ */
+#define DIRTY_MEMORY_BLOCK_SIZE ((ram_addr_t)256 * 1024 * 8)
+typedef struct {
+ struct rcu_head rcu;
+ unsigned long *blocks[];
+} DirtyMemoryBlocks;
+
typedef struct RAMList {
QemuMutex mutex;
- /* Protected by the iothread lock. */
- unsigned long *dirty_memory[DIRTY_MEMORY_NUM];
RAMBlock *mru_block;
/* RCU-enabled, writes protected by the ramlist lock. */
QLIST_HEAD(, RAMBlock) blocks;
+ DirtyMemoryBlocks *dirty_memory[DIRTY_MEMORY_NUM];
uint32_t version;
} RAMList;
extern RAMList ram_list;
@@ -89,30 +119,70 @@ static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
- unsigned long end, page, next;
+ DirtyMemoryBlocks *blocks;
+ unsigned long end, page;
+ bool dirty = false;
assert(client < DIRTY_MEMORY_NUM);
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- next = find_next_bit(ram_list.dirty_memory[client], end, page);
- return next < end;
+ rcu_read_lock();
+
+ blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ while (page < end) {
+ unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
+
+ if (find_next_bit(blocks->blocks[idx], offset, num) < num) {
+ dirty = true;
+ break;
+ }
+
+ page += num;
+ }
+
+ rcu_read_unlock();
+
+ return dirty;
}
static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
- unsigned long end, page, next;
+ DirtyMemoryBlocks *blocks;
+ unsigned long end, page;
+ bool dirty = true;
assert(client < DIRTY_MEMORY_NUM);
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- next = find_next_zero_bit(ram_list.dirty_memory[client], end, page);
- return next >= end;
+ rcu_read_lock();
+
+ blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ while (page < end) {
+ unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
+
+ if (find_next_zero_bit(blocks->blocks[idx], offset, num) < num) {
+ dirty = false;
+ break;
+ }
+
+ page += num;
+ }
+
+ rcu_read_unlock();
+
+ return dirty;
}
static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
@@ -154,28 +224,68 @@ static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
unsigned client)
{
+ unsigned long page, idx, offset;
+ DirtyMemoryBlocks *blocks;
+
assert(client < DIRTY_MEMORY_NUM);
- set_bit_atomic(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
+
+ page = addr >> TARGET_PAGE_BITS;
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+
+ rcu_read_lock();
+
+ blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ set_bit_atomic(offset, blocks->blocks[idx]);
+
+ rcu_read_unlock();
}
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
ram_addr_t length,
uint8_t mask)
{
+ DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
unsigned long end, page;
- unsigned long **d = ram_list.dirty_memory;
+ int i;
+
+ if (!mask && !xen_enabled()) {
+ return;
+ }
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
- bitmap_set_atomic(d[DIRTY_MEMORY_MIGRATION], page, end - page);
- }
- if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
- bitmap_set_atomic(d[DIRTY_MEMORY_VGA], page, end - page);
+
+ rcu_read_lock();
+
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
}
- if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
- bitmap_set_atomic(d[DIRTY_MEMORY_CODE], page, end - page);
+
+ while (page < end) {
+ unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
+
+ if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
+ offset, num);
+ }
+ if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
+ offset, num);
+ }
+ if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
+ offset, num);
+ }
+
+ page += num;
}
+
+ rcu_read_unlock();
+
xen_modified_memory(start, length);
}
@@ -195,21 +305,41 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
/* start address is aligned at the start of a word? */
if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
(hpratio == 1)) {
+ unsigned long **blocks[DIRTY_MEMORY_NUM];
+ unsigned long idx;
+ unsigned long offset;
long k;
long nr = BITS_TO_LONGS(pages);
+ idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
+ DIRTY_MEMORY_BLOCK_SIZE);
+
+ rcu_read_lock();
+
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
+ }
+
for (k = 0; k < nr; k++) {
if (bitmap[k]) {
unsigned long temp = leul_to_cpu(bitmap[k]);
- unsigned long **d = ram_list.dirty_memory;
- atomic_or(&d[DIRTY_MEMORY_MIGRATION][page + k], temp);
- atomic_or(&d[DIRTY_MEMORY_VGA][page + k], temp);
+ atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
+ atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
if (tcg_enabled()) {
- atomic_or(&d[DIRTY_MEMORY_CODE][page + k], temp);
+ atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
}
}
+
+ if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
+ offset = 0;
+ idx++;
+ }
}
+
+ rcu_read_unlock();
+
xen_modified_memory(start, pages << TARGET_PAGE_BITS);
} else {
uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
@@ -261,18 +391,33 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
int k;
int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
- unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
+ unsigned long * const *src;
+ unsigned long idx = (page * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = BIT_WORD((page * BITS_PER_LONG) %
+ DIRTY_MEMORY_BLOCK_SIZE);
+
+ rcu_read_lock();
+
+ src = atomic_rcu_read(
+ &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
for (k = page; k < page + nr; k++) {
- if (src[k]) {
- unsigned long bits = atomic_xchg(&src[k], 0);
+ if (src[idx][offset]) {
+ unsigned long bits = atomic_xchg(&src[idx][offset], 0);
unsigned long new_dirty;
new_dirty = ~dest[k];
dest[k] |= bits;
new_dirty &= bits;
num_dirty += ctpopl(new_dirty);
}
+
+ if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
+ offset = 0;
+ idx++;
+ }
}
+
+ rcu_read_unlock();
} else {
for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
if (cpu_physical_memory_test_and_clear_dirty(
diff --git a/include/hw/pci/pci_ids.h b/include/hw/pci/pci_ids.h
index d98e6c915d..db85afa03e 100644
--- a/include/hw/pci/pci_ids.h
+++ b/include/hw/pci/pci_ids.h
@@ -64,6 +64,7 @@
#define PCI_VENDOR_ID_LSI_LOGIC 0x1000
#define PCI_DEVICE_ID_LSI_53C810 0x0001
#define PCI_DEVICE_ID_LSI_53C895A 0x0012
+#define PCI_DEVICE_ID_LSI_SAS1068 0x0054
#define PCI_DEVICE_ID_LSI_SAS1078 0x0060
#define PCI_DEVICE_ID_LSI_SAS0079 0x0079
diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h
index 1915a7342e..29052f81a5 100644
--- a/include/hw/scsi/scsi.h
+++ b/include/hw/scsi/scsi.h
@@ -108,6 +108,8 @@ struct SCSIDevice
int blocksize;
int type;
uint64_t max_lba;
+ uint64_t wwn;
+ uint64_t port_wwn;
};
extern const VMStateDescription vmstate_scsi_device;
@@ -271,6 +273,7 @@ void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense);
void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense);
void scsi_device_report_change(SCSIDevice *dev, SCSISense sense);
void scsi_device_unit_attention_reported(SCSIDevice *dev);
+void scsi_generic_read_device_identification(SCSIDevice *dev);
int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed);
SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int target, int lun);
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index bd2c075343..05b447c728 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -8,6 +8,8 @@
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
+ * See docs/atomics.txt for discussion about the guarantees each
+ * atomic primitive is meant to provide.
*/
#ifndef __QEMU_ATOMIC_H
@@ -15,12 +17,130 @@
#include "qemu/compiler.h"
-/* For C11 atomic ops */
/* Compiler barrier */
#define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
-#ifndef __ATOMIC_RELAXED
+#ifdef __ATOMIC_RELAXED
+/* For C11 atomic ops */
+
+/* Manual memory barriers
+ *
+ *__atomic_thread_fence does not include a compiler barrier; instead,
+ * the barrier is part of __atomic_load/__atomic_store's "volatile-like"
+ * semantics. If smp_wmb() is a no-op, absence of the barrier means that
+ * the compiler is free to reorder stores on each side of the barrier.
+ * Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
+ */
+
+#define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); barrier(); })
+#define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); barrier(); })
+#define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); barrier(); })
+
+#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); })
+
+/* Weak atomic operations prevent the compiler moving other
+ * loads/stores past the atomic operation load/store. However there is
+ * no explicit memory barrier for the processor.
+ */
+#define atomic_read(ptr) \
+ ({ \
+ typeof(*ptr) _val; \
+ __atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
+ _val; \
+ })
+
+#define atomic_set(ptr, i) do { \
+ typeof(*ptr) _val = (i); \
+ __atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
+} while(0)
+
+/* Atomic RCU operations imply weak memory barriers */
+
+#define atomic_rcu_read(ptr) \
+ ({ \
+ typeof(*ptr) _val; \
+ __atomic_load(ptr, &_val, __ATOMIC_CONSUME); \
+ _val; \
+ })
+
+#define atomic_rcu_set(ptr, i) do { \
+ typeof(*ptr) _val = (i); \
+ __atomic_store(ptr, &_val, __ATOMIC_RELEASE); \
+} while(0)
+
+/* atomic_mb_read/set semantics map Java volatile variables. They are
+ * less expensive on some platforms (notably POWER & ARMv7) than fully
+ * sequentially consistent operations.
+ *
+ * As long as they are used as paired operations they are safe to
+ * use. See docs/atomic.txt for more discussion.
+ */
+
+#if defined(_ARCH_PPC)
+#define atomic_mb_read(ptr) \
+ ({ \
+ typeof(*ptr) _val; \
+ __atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
+ smp_rmb(); \
+ _val; \
+ })
+
+#define atomic_mb_set(ptr, i) do { \
+ typeof(*ptr) _val = (i); \
+ smp_wmb(); \
+ __atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
+ smp_mb(); \
+} while(0)
+#else
+#define atomic_mb_read(ptr) \
+ ({ \
+ typeof(*ptr) _val; \
+ __atomic_load(ptr, &_val, __ATOMIC_SEQ_CST); \
+ _val; \
+ })
+
+#define atomic_mb_set(ptr, i) do { \
+ typeof(*ptr) _val = (i); \
+ __atomic_store(ptr, &_val, __ATOMIC_SEQ_CST); \
+} while(0)
+#endif
+
+
+/* All the remaining operations are fully sequentially consistent */
+
+#define atomic_xchg(ptr, i) ({ \
+ typeof(*ptr) _new = (i), _old; \
+ __atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
+ _old; \
+})
+
+/* Returns the eventual value, failed or not */
+#define atomic_cmpxchg(ptr, old, new) \
+ ({ \
+ typeof(*ptr) _old = (old), _new = (new); \
+ __atomic_compare_exchange(ptr, &_old, &_new, false, \
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
+ _old; \
+ })
+
+/* Provide shorter names for GCC atomic builtins, return old value */
+#define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
+#define atomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
+#define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
+#define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
+#define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
+#define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
+
+/* And even shorter names that return void. */
+#define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
+#define atomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
+#define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
+#define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
+#define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
+#define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
+
+#else /* __ATOMIC_RELAXED */
/*
* We use GCC builtin if it's available, as that can use mfence on
@@ -85,8 +205,6 @@
#endif /* _ARCH_PPC */
-#endif /* C11 atomics */
-
/*
* For (host) platforms we don't have explicit barrier definitions
* for, we use the gcc __sync_synchronize() primitive to generate a
@@ -98,42 +216,22 @@
#endif
#ifndef smp_wmb
-#ifdef __ATOMIC_RELEASE
-/* __atomic_thread_fence does not include a compiler barrier; instead,
- * the barrier is part of __atomic_load/__atomic_store's "volatile-like"
- * semantics. If smp_wmb() is a no-op, absence of the barrier means that
- * the compiler is free to reorder stores on each side of the barrier.
- * Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
- */
-#define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); barrier(); })
-#else
#define smp_wmb() __sync_synchronize()
#endif
-#endif
#ifndef smp_rmb
-#ifdef __ATOMIC_ACQUIRE
-#define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); barrier(); })
-#else
#define smp_rmb() __sync_synchronize()
#endif
-#endif
#ifndef smp_read_barrier_depends
-#ifdef __ATOMIC_CONSUME
-#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); })
-#else
#define smp_read_barrier_depends() barrier()
#endif
-#endif
-#ifndef atomic_read
+/* These will only be atomic if the processor does the fetch or store
+ * in a single issue memory operation
+ */
#define atomic_read(ptr) (*(__typeof__(*ptr) volatile*) (ptr))
-#endif
-
-#ifndef atomic_set
#define atomic_set(ptr, i) ((*(__typeof__(*ptr) volatile*) (ptr)) = (i))
-#endif
/**
* atomic_rcu_read - reads a RCU-protected pointer to a local variable
@@ -146,30 +244,18 @@
* Inserts memory barriers on architectures that require them (currently only
* Alpha) and documents which pointers are protected by RCU.
*
- * Unless the __ATOMIC_CONSUME memory order is available, atomic_rcu_read also
- * includes a compiler barrier to ensure that value-speculative optimizations
- * (e.g. VSS: Value Speculation Scheduling) does not perform the data read
- * before the pointer read by speculating the value of the pointer. On new
- * enough compilers, atomic_load takes care of such concern about
- * dependency-breaking optimizations.
+ * atomic_rcu_read also includes a compiler barrier to ensure that
+ * value-speculative optimizations (e.g. VSS: Value Speculation
+ * Scheduling) does not perform the data read before the pointer read
+ * by speculating the value of the pointer.
*
* Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg().
*/
-#ifndef atomic_rcu_read
-#ifdef __ATOMIC_CONSUME
-#define atomic_rcu_read(ptr) ({ \
- typeof(*ptr) _val; \
- __atomic_load(ptr, &_val, __ATOMIC_CONSUME); \
- _val; \
-})
-#else
#define atomic_rcu_read(ptr) ({ \
typeof(*ptr) _val = atomic_read(ptr); \
smp_read_barrier_depends(); \
_val; \
})
-#endif
-#endif
/**
* atomic_rcu_set - assigns (publicizes) a pointer to a new data structure
@@ -182,19 +268,10 @@
*
* Should match atomic_rcu_read().
*/
-#ifndef atomic_rcu_set
-#ifdef __ATOMIC_RELEASE
-#define atomic_rcu_set(ptr, i) do { \
- typeof(*ptr) _val = (i); \
- __atomic_store(ptr, &_val, __ATOMIC_RELEASE); \
-} while(0)
-#else
#define atomic_rcu_set(ptr, i) do { \
smp_wmb(); \
atomic_set(ptr, i); \
} while (0)
-#endif
-#endif
/* These have the same semantics as Java volatile variables.
* See http://gee.cs.oswego.edu/dl/jmm/cookbook.html:
@@ -218,13 +295,11 @@
* (see docs/atomics.txt), and I'm not sure that __ATOMIC_ACQ_REL is enough.
* Just always use the barriers manually by the rules above.
*/
-#ifndef atomic_mb_read
#define atomic_mb_read(ptr) ({ \
typeof(*ptr) _val = atomic_read(ptr); \
smp_rmb(); \
_val; \
})
-#endif
#ifndef atomic_mb_set
#define atomic_mb_set(ptr, i) do { \
@@ -237,12 +312,6 @@
#ifndef atomic_xchg
#if defined(__clang__)
#define atomic_xchg(ptr, i) __sync_swap(ptr, i)
-#elif defined(__ATOMIC_SEQ_CST)
-#define atomic_xchg(ptr, i) ({ \
- typeof(*ptr) _new = (i), _old; \
- __atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
- _old; \
-})
#else
/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
@@ -266,4 +335,5 @@
#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
-#endif
+#endif /* __ATOMIC_RELAXED */
+#endif /* __QEMU_ATOMIC_H */