aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAurelien Jarno <aurelien@aurel32.net>2013-09-03 01:35:43 +0200
committerAurelien Jarno <aurelien@aurel32.net>2013-09-03 01:35:43 +0200
commit545825d4cda03ea292b7788b3401b99860efe8bc (patch)
tree22983d4bcd8f48fb38561d241ac5e2d7e51e5a22 /include
parent32f3bd6d4d6d6f835cbc2b9241fe8c32d2898d73 (diff)
parent6fb5874590589585cdcad4ca2431d9d8d4d491b1 (diff)
Merge branch 'tcg-next' of git://github.com/rth7680/qemu
* 'tcg-next' of git://github.com/rth7680/qemu: (29 commits) tcg-i386: Make use of zero-extended memory helper routines tcg: Introduce zero and sign-extended versions of load helpers exec: Split softmmu_defs.h target: Include softmmu_exec.h where forgotten exec: Rename USUFFIX to LSUFFIX tcg-i386: Don't perform GETPC adjustment in TCG code exec: Reorganize the GETRA/GETPC macros configure: Allow x32 as a host tcg-i386: Adjust tcg_out_tlb_load for x32 tcg-i386: Use intptr_t appropriately tcg: Fix jit debug for x32 tcg: Use appropriate types in tcg_reg_alloc_call tcg: Change tcg_out_ld/st offset to intptr_t tcg: Change tcg_gen_exit_tb argument to uintptr_t tcg: Use uintptr_t in TCGHelperInfo tcg: Change relocation offsets to intptr_t tcg: Change memory offsets to intptr_t tcg: Change frame pointer offsets to intptr_t tcg: Define TCG_ptr properly tcg: Define TCG_TYPE_PTR properly ...
Diffstat (limited to 'include')
-rw-r--r--include/exec/exec-all.h89
-rw-r--r--include/exec/gen-icount.h4
-rw-r--r--include/exec/softmmu_defs.h49
-rw-r--r--include/exec/softmmu_exec.h3
-rw-r--r--include/exec/softmmu_template.h88
5 files changed, 113 insertions, 120 deletions
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index ffb69a4c70..beb41491b4 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -295,47 +295,42 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
}
}
-/* The return address may point to the start of the next instruction.
- Subtracting one gets us the call instruction itself. */
+/* GETRA is the true target of the return instruction that we'll execute,
+ defined here for simplicity of defining the follow-up macros. */
#if defined(CONFIG_TCG_INTERPRETER)
extern uintptr_t tci_tb_ptr;
-# define GETPC() tci_tb_ptr
-#elif defined(__s390__) && !defined(__s390x__)
-# define GETPC() \
- (((uintptr_t)__builtin_return_address(0) & 0x7fffffffUL) - 1)
-#elif defined(__arm__)
-/* Thumb return addresses have the low bit set, so we need to subtract two.
- This is still safe in ARM mode because instructions are 4 bytes. */
-# define GETPC() ((uintptr_t)__builtin_return_address(0) - 2)
+# define GETRA() tci_tb_ptr
+#else
+# define GETRA() \
+ ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
+#endif
+
+/* The true return address will often point to a host insn that is part of
+ the next translated guest insn. Adjust the address backward to point to
+ the middle of the call insn. Subtracting one would do the job except for
+ several compressed mode architectures (arm, mips) which set the low bit
+ to indicate the compressed mode; subtracting two works around that. It
+ is also the case that there are no host isas that contain a call insn
+ smaller than 4 bytes, so we don't worry about special-casing this. */
+#if defined(CONFIG_TCG_INTERPRETER)
+# define GETPC_ADJ 0
#else
-# define GETPC() ((uintptr_t)__builtin_return_address(0) - 1)
+# define GETPC_ADJ 2
#endif
+#define GETPC() (GETRA() - GETPC_ADJ)
+
+/* The LDST optimizations splits code generation into fast and slow path.
+ In some implementations, we pass the "logical" return address manually;
+ in others, we must infer the logical return from the true return. */
#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
-/* qemu_ld/st optimization split code generation to fast and slow path, thus,
- it needs special handling for an MMU helper which is called from the slow
- path, to get the fast path's pc without any additional argument.
- It uses a tricky solution which embeds the fast path pc into the slow path.
-
- Code flow in slow path:
- (1) pre-process
- (2) call MMU helper
- (3) jump to (5)
- (4) fast path information (implementation specific)
- (5) post-process (e.g. stack adjust)
- (6) jump to corresponding code of the next of fast path
- */
-# if defined(__i386__) || defined(__x86_64__)
-# define GETPC_EXT() GETPC()
-# elif defined (_ARCH_PPC) && !defined (_ARCH_PPC64)
-# define GETRA() ((uintptr_t)__builtin_return_address(0))
-# define GETPC_LDST() ((uintptr_t) ((*(int32_t *)(GETRA() - 4)) - 1))
+# if defined (_ARCH_PPC) && !defined (_ARCH_PPC64)
+# define GETRA_LDST(RA) (*(int32_t *)((RA) - 4))
# elif defined(__arm__)
/* We define two insns between the return address and the branch back to
straight-line. Find and decode that branch insn. */
-# define GETRA() ((uintptr_t)__builtin_return_address(0))
-# define GETPC_LDST() tcg_getpc_ldst(GETRA())
-static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
+# define GETRA_LDST(RA) tcg_getra_ldst(RA)
+static inline uintptr_t tcg_getra_ldst(uintptr_t ra)
{
int32_t b;
ra += 8; /* skip the two insns */
@@ -343,33 +338,32 @@ static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
b = (b << 8) >> (8 - 2); /* extract the displacement */
ra += 8; /* branches are relative to pc+8 */
ra += b; /* apply the displacement */
- ra -= 4; /* return a pointer into the current opcode,
- not the start of the next opcode */
return ra;
}
# elif defined(__aarch64__)
-# define GETRA() ((uintptr_t)__builtin_return_address(0))
-# define GETPC_LDST() tcg_getpc_ldst(GETRA())
-static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
+# define GETRA_LDST(RA) tcg_getra_ldst(RA)
+static inline uintptr_t tcg_getra_ldst(uintptr_t ra)
{
int32_t b;
ra += 4; /* skip one instruction */
b = *(int32_t *)ra; /* load the branch insn */
b = (b << 6) >> (6 - 2); /* extract the displacement */
ra += b; /* apply the displacement */
- ra -= 4; /* return a pointer into the current opcode,
- not the start of the next opcode */
return ra;
}
-# else
-# error "CONFIG_QEMU_LDST_OPTIMIZATION needs GETPC_LDST() implementation!"
# endif
+#endif /* CONFIG_QEMU_LDST_OPTIMIZATION */
+
+/* ??? Delete these once they are no longer used. */
bool is_tcg_gen_code(uintptr_t pc_ptr);
-# ifndef GETPC_EXT
-# define GETPC_EXT() (is_tcg_gen_code(GETRA()) ? GETPC_LDST() : GETPC())
-# endif
+#ifdef GETRA_LDST
+# define GETRA_EXT() tcg_getra_ext(GETRA())
+static inline uintptr_t tcg_getra_ext(uintptr_t ra)
+{
+ return is_tcg_gen_code(ra) ? GETRA_LDST(ra) : ra;
+}
#else
-# define GETPC_EXT() GETPC()
+# define GETRA_EXT() GETRA()
#endif
#if !defined(CONFIG_USER_ONLY)
@@ -383,7 +377,10 @@ bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr);
-#include "exec/softmmu_defs.h"
+uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
#define ACCESS_TYPE (NB_MMU_MODES + 1)
#define MEMSUFFIX _code
diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h
index 4fc7b2981d..39a6b61e4f 100644
--- a/include/exec/gen-icount.h
+++ b/include/exec/gen-icount.h
@@ -39,12 +39,12 @@ static inline void gen_tb_start(void)
static void gen_tb_end(TranslationBlock *tb, int num_insns)
{
gen_set_label(exitreq_label);
- tcg_gen_exit_tb((tcg_target_long)tb + TB_EXIT_REQUESTED);
+ tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_REQUESTED);
if (use_icount) {
*icount_arg = num_insns;
gen_set_label(icount_label);
- tcg_gen_exit_tb((tcg_target_long)tb + TB_EXIT_ICOUNT_EXPIRED);
+ tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_ICOUNT_EXPIRED);
}
}
diff --git a/include/exec/softmmu_defs.h b/include/exec/softmmu_defs.h
deleted file mode 100644
index e55e7178c6..0000000000
--- a/include/exec/softmmu_defs.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Software MMU support
- *
- * Declare helpers used by TCG for qemu_ld/st ops.
- *
- * Used by softmmu_exec.h, TCG targets and exec-all.h.
- *
- */
-#ifndef SOFTMMU_DEFS_H
-#define SOFTMMU_DEFS_H
-
-uint8_t helper_ret_ldb_mmu(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr);
-uint16_t helper_ret_ldw_mmu(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr);
-uint32_t helper_ret_ldl_mmu(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr);
-uint64_t helper_ret_ldq_mmu(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr);
-
-void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
- int mmu_idx, uintptr_t retaddr);
-void helper_ret_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
- int mmu_idx, uintptr_t retaddr);
-void helper_ret_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- int mmu_idx, uintptr_t retaddr);
-void helper_ret_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- int mmu_idx, uintptr_t retaddr);
-
-uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-
-void helper_stb_mmu(CPUArchState *env, target_ulong addr,
- uint8_t val, int mmu_idx);
-void helper_stw_mmu(CPUArchState *env, target_ulong addr,
- uint16_t val, int mmu_idx);
-void helper_stl_mmu(CPUArchState *env, target_ulong addr,
- uint32_t val, int mmu_idx);
-void helper_stq_mmu(CPUArchState *env, target_ulong addr,
- uint64_t val, int mmu_idx);
-
-uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-
-#endif /* SOFTMMU_DEFS_H */
diff --git a/include/exec/softmmu_exec.h b/include/exec/softmmu_exec.h
index 3e4e886a30..6fde154527 100644
--- a/include/exec/softmmu_exec.h
+++ b/include/exec/softmmu_exec.h
@@ -19,7 +19,8 @@
#define ldul_executive ldl_executive
#define ldul_supervisor ldl_supervisor
-#include "exec/softmmu_defs.h"
+/* The memory helpers for tcg-generated code need tcg_target_long etc. */
+#include "tcg.h"
#define ACCESS_TYPE 0
#define MEMSUFFIX MMU_MODE0_SUFFIX
diff --git a/include/exec/softmmu_template.h b/include/exec/softmmu_template.h
index eaca9e1035..5bbc56afd5 100644
--- a/include/exec/softmmu_template.h
+++ b/include/exec/softmmu_template.h
@@ -28,24 +28,40 @@
#if DATA_SIZE == 8
#define SUFFIX q
-#define USUFFIX q
-#define DATA_TYPE uint64_t
+#define LSUFFIX q
+#define SDATA_TYPE int64_t
#elif DATA_SIZE == 4
#define SUFFIX l
-#define USUFFIX l
-#define DATA_TYPE uint32_t
+#define LSUFFIX l
+#define SDATA_TYPE int32_t
#elif DATA_SIZE == 2
#define SUFFIX w
-#define USUFFIX uw
-#define DATA_TYPE uint16_t
+#define LSUFFIX uw
+#define SDATA_TYPE int16_t
#elif DATA_SIZE == 1
#define SUFFIX b
-#define USUFFIX ub
-#define DATA_TYPE uint8_t
+#define LSUFFIX ub
+#define SDATA_TYPE int8_t
#else
#error unsupported data size
#endif
+#define DATA_TYPE glue(u, SDATA_TYPE)
+
+/* For the benefit of TCG generated code, we want to avoid the complication
+ of ABI-specific return type promotion and always return a value extended
+ to the register size of the host. This is tcg_target_long, except in the
+ case of a 32-bit host and 64-bit data, and for that we always have
+ uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
+#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
+# define WORD_TYPE DATA_TYPE
+# define USUFFIX SUFFIX
+#else
+# define WORD_TYPE tcg_target_ulong
+# define USUFFIX glue(u, SUFFIX)
+# define SSUFFIX glue(s, SUFFIX)
+#endif
+
#ifdef SOFTMMU_CODE_ACCESS
#define READ_ACCESS_TYPE 2
#define ADDR_READ addr_code
@@ -77,15 +93,18 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
#ifdef SOFTMMU_CODE_ACCESS
static
#endif
-DATA_TYPE
-glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
- target_ulong addr, int mmu_idx,
- uintptr_t retaddr)
+WORD_TYPE
+glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
+ target_ulong addr, int mmu_idx,
+ uintptr_t retaddr)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
uintptr_t haddr;
+ /* Adjust the given return address. */
+ retaddr -= GETPC_ADJ;
+
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
@@ -121,10 +140,12 @@ glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
#endif
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
- res1 = glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr1,
- mmu_idx, retaddr);
- res2 = glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr2,
- mmu_idx, retaddr);
+ /* Note the adjustment at the beginning of the function.
+ Undo that for the recursion. */
+ res1 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
+ (env, addr1, mmu_idx, retaddr + GETPC_ADJ);
+ res2 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
+ (env, addr2, mmu_idx, retaddr + GETPC_ADJ);
shift = (addr & (DATA_SIZE - 1)) * 8;
#ifdef TARGET_WORDS_BIGENDIAN
res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
@@ -142,19 +163,33 @@ glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
#endif
haddr = addr + env->tlb_table[mmu_idx][index].addend;
- return glue(glue(ld, USUFFIX), _raw)((uint8_t *)haddr);
+ /* Note that ldl_raw is defined with type "int". */
+ return (DATA_TYPE) glue(glue(ld, LSUFFIX), _raw)((uint8_t *)haddr);
}
DATA_TYPE
glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
int mmu_idx)
{
- return glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
- GETPC_EXT());
+ return glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
+ GETRA_EXT());
}
#ifndef SOFTMMU_CODE_ACCESS
+/* Provide signed versions of the load routines as well. We can of course
+ avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
+#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
+WORD_TYPE
+glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)(CPUArchState *env,
+ target_ulong addr, int mmu_idx,
+ uintptr_t retaddr)
+{
+ return (SDATA_TYPE) glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
+ (env, addr, mmu_idx, retaddr);
+}
+#endif
+
static inline void glue(io_write, SUFFIX)(CPUArchState *env,
hwaddr physaddr,
DATA_TYPE val,
@@ -182,6 +217,9 @@ glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
uintptr_t haddr;
+ /* Adjust the given return address. */
+ retaddr -= GETPC_ADJ;
+
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
@@ -223,8 +261,10 @@ glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
#else
uint8_t val8 = val >> (i * 8);
#endif
+ /* Note the adjustment at the beginning of the function.
+ Undo that for the recursion. */
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
- mmu_idx, retaddr);
+ mmu_idx, retaddr + GETPC_ADJ);
}
return;
}
@@ -245,7 +285,7 @@ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
DATA_TYPE val, int mmu_idx)
{
glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, val, mmu_idx,
- GETPC_EXT());
+ GETRA_EXT());
}
#endif /* !defined(SOFTMMU_CODE_ACCESS) */
@@ -254,6 +294,10 @@ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
#undef SHIFT
#undef DATA_TYPE
#undef SUFFIX
-#undef USUFFIX
+#undef LSUFFIX
#undef DATA_SIZE
#undef ADDR_READ
+#undef WORD_TYPE
+#undef SDATA_TYPE
+#undef USUFFIX
+#undef SSUFFIX