aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/exec/exec-all.h2
-rw-r--r--include/exec/memory.h2
-rw-r--r--include/exec/tb-hash.h12
-rw-r--r--include/hw/mips/mips.h1
-rw-r--r--include/qemu/atomic.h34
5 files changed, 41 insertions, 10 deletions
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index bcde1e6a14..87ae10bcc9 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -368,6 +368,8 @@ struct TranslationBlock {
void tb_free(TranslationBlock *tb);
void tb_flush(CPUState *cpu);
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
+TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
+ target_ulong cs_base, uint32_t flags);
#if defined(USE_DIRECT_JUMP)
diff --git a/include/exec/memory.h b/include/exec/memory.h
index bfdc685f24..80e605a96a 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -17,9 +17,7 @@
#ifndef CONFIG_USER_ONLY
#include "exec/cpu-common.h"
-#ifndef CONFIG_USER_ONLY
#include "exec/hwaddr.h"
-#endif
#include "exec/memattrs.h"
#include "exec/ramlist.h"
#include "qemu/queue.h"
diff --git a/include/exec/tb-hash.h b/include/exec/tb-hash.h
index 2c27490cb8..b1fe2d0161 100644
--- a/include/exec/tb-hash.h
+++ b/include/exec/tb-hash.h
@@ -22,6 +22,8 @@
#include "exec/tb-hash-xx.h"
+#ifdef CONFIG_SOFTMMU
+
/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
addresses on the same page. The top bits are the same. This allows
TLB invalidation to quickly clear a subset of the hash table. */
@@ -45,6 +47,16 @@ static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
| (tmp & TB_JMP_ADDR_MASK));
}
+#else
+
+/* In user-mode we can get better hashing because we do not have a TLB */
+static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
+{
+ return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1);
+}
+
+#endif /* CONFIG_SOFTMMU */
+
static inline
uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags)
{
diff --git a/include/hw/mips/mips.h b/include/hw/mips/mips.h
index e0065ce808..16412dc150 100644
--- a/include/hw/mips/mips.h
+++ b/include/hw/mips/mips.h
@@ -6,6 +6,7 @@
#define INITRD_PAGE_MASK (~((1 << 16) - 1))
#include "exec/memory.h"
+#include "hw/irq.h"
/* gt64xxx.c */
PCIBus *gt64120_register(qemu_irq *pic);
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index 878fa0700d..e07c7972ab 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -88,6 +88,24 @@
#define smp_read_barrier_depends() barrier()
#endif
+/* Sanity check that the size of an atomic operation isn't "overly large".
+ * Despite the fact that e.g. i686 has 64-bit atomic operations, we do not
+ * want to use them because we ought not need them, and this lets us do a
+ * bit of sanity checking that other 32-bit hosts might build.
+ *
+ * That said, we have a problem on 64-bit ILP32 hosts in that in order to
+ * sync with TCG_OVERSIZED_GUEST, this must match TCG_TARGET_REG_BITS.
+ * We'd prefer not want to pull in everything else TCG related, so handle
+ * those few cases by hand.
+ *
+ * Note that x32 is fully detected with __x64_64__ + _ILP32, and that for
+ * Sparc we always force the use of sparcv9 in configure.
+ */
+#if defined(__x86_64__) || defined(__sparc__)
+# define ATOMIC_REG_SIZE 8
+#else
+# define ATOMIC_REG_SIZE sizeof(void *)
+#endif
/* Weak atomic operations prevent the compiler moving other
* loads/stores past the atomic operation load/store. However there is
@@ -104,7 +122,7 @@
#define atomic_read(ptr) \
({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
atomic_read__nocheck(ptr); \
})
@@ -112,7 +130,7 @@
__atomic_store_n(ptr, i, __ATOMIC_RELAXED)
#define atomic_set(ptr, i) do { \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
atomic_set__nocheck(ptr, i); \
} while(0)
@@ -130,27 +148,27 @@
#define atomic_rcu_read(ptr) \
({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
typeof_strip_qual(*ptr) _val; \
atomic_rcu_read__nocheck(ptr, &_val); \
_val; \
})
#define atomic_rcu_set(ptr, i) do { \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
__atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
} while(0)
#define atomic_load_acquire(ptr) \
({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
typeof_strip_qual(*ptr) _val; \
__atomic_load(ptr, &_val, __ATOMIC_ACQUIRE); \
_val; \
})
#define atomic_store_release(ptr, i) do { \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
__atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
} while(0)
@@ -162,7 +180,7 @@
})
#define atomic_xchg(ptr, i) ({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
atomic_xchg__nocheck(ptr, i); \
})
@@ -175,7 +193,7 @@
})
#define atomic_cmpxchg(ptr, old, new) ({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+ QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
atomic_cmpxchg__nocheck(ptr, old, new); \
})