diff options
-rw-r--r-- | cpu-all.h | 9 | ||||
-rw-r--r-- | exec-all.h | 2 | ||||
-rw-r--r-- | exec.c | 4 | ||||
-rw-r--r-- | softmmu_defs.h | 28 | ||||
-rw-r--r-- | softmmu_header.h | 60 | ||||
-rw-r--r-- | softmmu_template.h | 84 | ||||
-rw-r--r-- | tcg/arm/tcg-target.c | 53 | ||||
-rw-r--r-- | tcg/hppa/tcg-target.c | 44 | ||||
-rw-r--r-- | tcg/i386/tcg-target.c | 57 | ||||
-rw-r--r-- | tcg/ia64/tcg-target.c | 46 | ||||
-rw-r--r-- | tcg/mips/tcg-target.c | 44 | ||||
-rw-r--r-- | tcg/ppc/tcg-target.c | 45 | ||||
-rw-r--r-- | tcg/ppc64/tcg-target.c | 44 | ||||
-rw-r--r-- | tcg/s390/tcg-target.c | 44 | ||||
-rw-r--r-- | tcg/sparc/tcg-target.c | 50 | ||||
-rw-r--r-- | tcg/tci/tcg-target.c | 6 |
16 files changed, 576 insertions, 44 deletions
@@ -259,12 +259,21 @@ extern unsigned long reserved_va; #define stfl(p, v) stfl_raw(p, v) #define stfq(p, v) stfq_raw(p, v) +#ifndef CONFIG_TCG_PASS_AREG0 #define ldub_code(p) ldub_raw(p) #define ldsb_code(p) ldsb_raw(p) #define lduw_code(p) lduw_raw(p) #define ldsw_code(p) ldsw_raw(p) #define ldl_code(p) ldl_raw(p) #define ldq_code(p) ldq_raw(p) +#else +#define cpu_ldub_code(env1, p) ldub_raw(p) +#define cpu_ldsb_code(env1, p) ldsb_raw(p) +#define cpu_lduw_code(env1, p) lduw_raw(p) +#define cpu_ldsw_code(env1, p) ldsw_raw(p) +#define cpu_ldl_code(env1, p) ldl_raw(p) +#define cpu_ldq_code(env1, p) ldq_raw(p) +#endif #define ldub_kernel(p) ldub_raw(p) #define ldsb_kernel(p) ldsb_raw(p) diff --git a/exec-all.h b/exec-all.h index 8fca67b117..93a5b22c1c 100644 --- a/exec-all.h +++ b/exec-all.h @@ -312,7 +312,9 @@ void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx, #define ACCESS_TYPE (NB_MMU_MODES + 1) #define MEMSUFFIX _code +#ifndef CONFIG_TCG_PASS_AREG0 #define env cpu_single_env +#endif #define DATA_SIZE 1 #include "softmmu_header.h" @@ -4595,7 +4595,11 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) mmu_idx = cpu_mmu_index(env1); if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code != (addr & TARGET_PAGE_MASK))) { +#ifdef CONFIG_TCG_PASS_AREG0 + cpu_ldub_code(env1, addr); +#else ldub_code(addr); +#endif } pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK; mr = iotlb_to_region(pd); diff --git a/softmmu_defs.h b/softmmu_defs.h index d47d30d711..8d59f9d4f8 100644 --- a/softmmu_defs.h +++ b/softmmu_defs.h @@ -9,6 +9,7 @@ #ifndef SOFTMMU_DEFS_H #define SOFTMMU_DEFS_H +#ifndef CONFIG_TCG_PASS_AREG0 uint8_t __ldb_mmu(target_ulong addr, int mmu_idx); void __stb_mmu(target_ulong addr, uint8_t val, int mmu_idx); uint16_t __ldw_mmu(target_ulong addr, int mmu_idx); @@ -26,5 +27,32 @@ uint32_t __ldl_cmmu(target_ulong addr, int mmu_idx); void __stl_cmmu(target_ulong addr, uint32_t val, int mmu_idx); uint64_t __ldq_cmmu(target_ulong addr, int mmu_idx); void __stq_cmmu(target_ulong addr, uint64_t val, int mmu_idx); +#else +uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); +void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, + int mmu_idx); +uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); +void helper_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + int mmu_idx); +uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); +void helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + int mmu_idx); +uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); +void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + int mmu_idx); + +uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); +void helper_stb_cmmu(CPUArchState *env, target_ulong addr, uint8_t val, +int mmu_idx); +uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); +void helper_stw_cmmu(CPUArchState *env, target_ulong addr, uint16_t val, + int mmu_idx); +uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); +void helper_stl_cmmu(CPUArchState *env, target_ulong addr, uint32_t val, + int mmu_idx); +uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); +void helper_stq_cmmu(CPUArchState *env, target_ulong addr, uint64_t val, + int mmu_idx); +#endif #endif diff --git a/softmmu_header.h b/softmmu_header.h index 818d7b662e..6b72093a0c 100644 --- a/softmmu_header.h +++ b/softmmu_header.h @@ -78,9 +78,23 @@ #define ADDR_READ addr_read #endif +#ifndef CONFIG_TCG_PASS_AREG0 +#define ENV_PARAM +#define ENV_VAR +#define CPU_PREFIX +#define HELPER_PREFIX __ +#else +#define ENV_PARAM CPUArchState *env, +#define ENV_VAR env, +#define CPU_PREFIX cpu_ +#define HELPER_PREFIX helper_ +#endif + /* generic load/store macros */ -static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr) +static inline RES_TYPE +glue(glue(glue(CPU_PREFIX, ld), USUFFIX), MEMSUFFIX)(ENV_PARAM + target_ulong ptr) { int page_index; RES_TYPE res; @@ -93,7 +107,9 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr) mmu_idx = CPU_MMU_INDEX; if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { - res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx); + res = glue(glue(glue(HELPER_PREFIX, ld), SUFFIX), MMUSUFFIX)(ENV_VAR + addr, + mmu_idx); } else { physaddr = addr + env->tlb_table[mmu_idx][page_index].addend; res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr); @@ -102,7 +118,9 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr) } #if DATA_SIZE <= 2 -static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr) +static inline int +glue(glue(glue(CPU_PREFIX, lds), SUFFIX), MEMSUFFIX)(ENV_PARAM + target_ulong ptr) { int res, page_index; target_ulong addr; @@ -114,7 +132,8 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr) mmu_idx = CPU_MMU_INDEX; if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { - res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx); + res = (DATA_STYPE)glue(glue(glue(HELPER_PREFIX, ld), SUFFIX), + MMUSUFFIX)(ENV_VAR addr, mmu_idx); } else { physaddr = addr + env->tlb_table[mmu_idx][page_index].addend; res = glue(glue(lds, SUFFIX), _raw)((uint8_t *)physaddr); @@ -127,7 +146,9 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr) /* generic store macro */ -static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v) +static inline void +glue(glue(glue(CPU_PREFIX, st), SUFFIX), MEMSUFFIX)(ENV_PARAM target_ulong ptr, + RES_TYPE v) { int page_index; target_ulong addr; @@ -139,7 +160,8 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE mmu_idx = CPU_MMU_INDEX; if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write != (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { - glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, mmu_idx); + glue(glue(glue(HELPER_PREFIX, st), SUFFIX), MMUSUFFIX)(ENV_VAR addr, v, + mmu_idx); } else { physaddr = addr + env->tlb_table[mmu_idx][page_index].addend; glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v); @@ -151,46 +173,52 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE #if ACCESS_TYPE != (NB_MMU_MODES + 1) #if DATA_SIZE == 8 -static inline float64 glue(ldfq, MEMSUFFIX)(target_ulong ptr) +static inline float64 glue(glue(CPU_PREFIX, ldfq), MEMSUFFIX)(ENV_PARAM + target_ulong ptr) { union { float64 d; uint64_t i; } u; - u.i = glue(ldq, MEMSUFFIX)(ptr); + u.i = glue(glue(CPU_PREFIX, ldq), MEMSUFFIX)(ENV_VAR ptr); return u.d; } -static inline void glue(stfq, MEMSUFFIX)(target_ulong ptr, float64 v) +static inline void glue(glue(CPU_PREFIX, stfq), MEMSUFFIX)(ENV_PARAM + target_ulong ptr, + float64 v) { union { float64 d; uint64_t i; } u; u.d = v; - glue(stq, MEMSUFFIX)(ptr, u.i); + glue(glue(CPU_PREFIX, stq), MEMSUFFIX)(ENV_VAR ptr, u.i); } #endif /* DATA_SIZE == 8 */ #if DATA_SIZE == 4 -static inline float32 glue(ldfl, MEMSUFFIX)(target_ulong ptr) +static inline float32 glue(glue(CPU_PREFIX, ldfl), MEMSUFFIX)(ENV_PARAM + target_ulong ptr) { union { float32 f; uint32_t i; } u; - u.i = glue(ldl, MEMSUFFIX)(ptr); + u.i = glue(glue(CPU_PREFIX, ldl), MEMSUFFIX)(ENV_VAR ptr); return u.f; } -static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v) +static inline void glue(glue(CPU_PREFIX, stfl), MEMSUFFIX)(ENV_PARAM + target_ulong ptr, + float32 v) { union { float32 f; uint32_t i; } u; u.f = v; - glue(stl, MEMSUFFIX)(ptr, u.i); + glue(glue(CPU_PREFIX, stl), MEMSUFFIX)(ENV_VAR ptr, u.i); } #endif /* DATA_SIZE == 4 */ @@ -205,3 +233,7 @@ static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v) #undef CPU_MMU_INDEX #undef MMUSUFFIX #undef ADDR_READ +#undef ENV_PARAM +#undef ENV_VAR +#undef CPU_PREFIX +#undef HELPER_PREFIX diff --git a/softmmu_template.h b/softmmu_template.h index d633bb5558..afcab1e6a9 100644 --- a/softmmu_template.h +++ b/softmmu_template.h @@ -54,10 +54,24 @@ #define ADDR_READ addr_read #endif -static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, +#ifndef CONFIG_TCG_PASS_AREG0 +#define ENV_PARAM +#define ENV_VAR +#define CPU_PREFIX +#define HELPER_PREFIX __ +#else +#define ENV_PARAM CPUArchState *env, +#define ENV_VAR env, +#define CPU_PREFIX cpu_ +#define HELPER_PREFIX helper_ +#endif + +static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_PARAM + target_ulong addr, int mmu_idx, void *retaddr); -static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr, +static inline DATA_TYPE glue(io_read, SUFFIX)(ENV_PARAM + target_phys_addr_t physaddr, target_ulong addr, void *retaddr) { @@ -89,7 +103,10 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr, } /* handle all cases except unaligned access which span two pages */ -DATA_TYPE glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, int mmu_idx) +DATA_TYPE +glue(glue(glue(HELPER_PREFIX, ld), SUFFIX), MMUSUFFIX)(ENV_PARAM + target_ulong addr, + int mmu_idx) { DATA_TYPE res; int index; @@ -110,22 +127,22 @@ DATA_TYPE glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, int mmu_idx) goto do_unaligned_access; retaddr = GETPC(); ioaddr = env->iotlb[mmu_idx][index]; - res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr); + res = glue(io_read, SUFFIX)(ENV_VAR ioaddr, addr, retaddr); } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { /* slow unaligned access (it spans two pages or IO) */ do_unaligned_access: retaddr = GETPC(); #ifdef ALIGNED_ONLY - do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + do_unaligned_access(ENV_VAR addr, READ_ACCESS_TYPE, mmu_idx, retaddr); #endif - res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr, + res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_VAR addr, mmu_idx, retaddr); } else { /* unaligned/aligned access in the same page */ #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { retaddr = GETPC(); - do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + do_unaligned_access(ENV_VAR addr, READ_ACCESS_TYPE, mmu_idx, retaddr); } #endif addend = env->tlb_table[mmu_idx][index].addend; @@ -136,7 +153,7 @@ DATA_TYPE glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, int mmu_idx) retaddr = GETPC(); #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) - do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + do_unaligned_access(ENV_VAR addr, READ_ACCESS_TYPE, mmu_idx, retaddr); #endif tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); goto redo; @@ -145,9 +162,11 @@ DATA_TYPE glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, int mmu_idx) } /* handle all unaligned cases */ -static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, - int mmu_idx, - void *retaddr) +static DATA_TYPE +glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_PARAM + target_ulong addr, + int mmu_idx, + void *retaddr) { DATA_TYPE res, res1, res2; int index, shift; @@ -164,15 +183,15 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, if ((addr & (DATA_SIZE - 1)) != 0) goto do_unaligned_access; ioaddr = env->iotlb[mmu_idx][index]; - res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr); + res = glue(io_read, SUFFIX)(ENV_VAR ioaddr, addr, retaddr); } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { do_unaligned_access: /* slow unaligned access (it spans two pages) */ addr1 = addr & ~(DATA_SIZE - 1); addr2 = addr1 + DATA_SIZE; - res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1, + res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_VAR addr1, mmu_idx, retaddr); - res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2, + res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_VAR addr2, mmu_idx, retaddr); shift = (addr & (DATA_SIZE - 1)) * 8; #ifdef TARGET_WORDS_BIGENDIAN @@ -196,12 +215,14 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, #ifndef SOFTMMU_CODE_ACCESS -static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, +static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(ENV_PARAM + target_ulong addr, DATA_TYPE val, int mmu_idx, void *retaddr); -static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr, +static inline void glue(io_write, SUFFIX)(ENV_PARAM + target_phys_addr_t physaddr, DATA_TYPE val, target_ulong addr, void *retaddr) @@ -231,8 +252,10 @@ static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr, #endif /* SHIFT > 2 */ } -void glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE val, - int mmu_idx) +void glue(glue(glue(HELPER_PREFIX, st), SUFFIX), MMUSUFFIX)(ENV_PARAM + target_ulong addr, + DATA_TYPE val, + int mmu_idx) { target_phys_addr_t ioaddr; unsigned long addend; @@ -250,21 +273,21 @@ void glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE val, goto do_unaligned_access; retaddr = GETPC(); ioaddr = env->iotlb[mmu_idx][index]; - glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr); + glue(io_write, SUFFIX)(ENV_VAR ioaddr, val, addr, retaddr); } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { do_unaligned_access: retaddr = GETPC(); #ifdef ALIGNED_ONLY - do_unaligned_access(addr, 1, mmu_idx, retaddr); + do_unaligned_access(ENV_VAR addr, 1, mmu_idx, retaddr); #endif - glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val, + glue(glue(slow_st, SUFFIX), MMUSUFFIX)(ENV_VAR addr, val, mmu_idx, retaddr); } else { /* aligned/unaligned access in the same page */ #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { retaddr = GETPC(); - do_unaligned_access(addr, 1, mmu_idx, retaddr); + do_unaligned_access(ENV_VAR addr, 1, mmu_idx, retaddr); } #endif addend = env->tlb_table[mmu_idx][index].addend; @@ -275,7 +298,7 @@ void glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE val, retaddr = GETPC(); #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) - do_unaligned_access(addr, 1, mmu_idx, retaddr); + do_unaligned_access(ENV_VAR addr, 1, mmu_idx, retaddr); #endif tlb_fill(env, addr, 1, mmu_idx, retaddr); goto redo; @@ -283,7 +306,8 @@ void glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE val, } /* handles all unaligned cases */ -static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, +static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(ENV_PARAM + target_ulong addr, DATA_TYPE val, int mmu_idx, void *retaddr) @@ -302,7 +326,7 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, if ((addr & (DATA_SIZE - 1)) != 0) goto do_unaligned_access; ioaddr = env->iotlb[mmu_idx][index]; - glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr); + glue(io_write, SUFFIX)(ENV_VAR ioaddr, val, addr, retaddr); } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { do_unaligned_access: /* XXX: not efficient, but simple */ @@ -310,10 +334,12 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, * previous page from the TLB cache. */ for(i = DATA_SIZE - 1; i >= 0; i--) { #ifdef TARGET_WORDS_BIGENDIAN - glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)), + glue(slow_stb, MMUSUFFIX)(ENV_VAR addr + i, + val >> (((DATA_SIZE - 1) * 8) - (i * 8)), mmu_idx, retaddr); #else - glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8), + glue(slow_stb, MMUSUFFIX)(ENV_VAR addr + i, + val >> (i * 8), mmu_idx, retaddr); #endif } @@ -338,3 +364,7 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, #undef USUFFIX #undef DATA_SIZE #undef ADDR_READ +#undef ENV_PARAM +#undef ENV_VAR +#undef CPU_PREFIX +#undef HELPER_PREFIX diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c index 5af21b3f5d..4d59a63855 100644 --- a/tcg/arm/tcg-target.c +++ b/tcg/arm/tcg-target.c @@ -929,6 +929,27 @@ static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index) #include "../../softmmu_defs.h" +#ifdef CONFIG_TCG_PASS_AREG0 +/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, + int mmu_idx) */ +static const void * const qemu_ld_helpers[4] = { + helper_ldb_mmu, + helper_ldw_mmu, + helper_ldl_mmu, + helper_ldq_mmu, +}; + +/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, + uintxx_t val, int mmu_idx) */ +static const void * const qemu_st_helpers[4] = { + helper_stb_mmu, + helper_stw_mmu, + helper_stl_mmu, + helper_stq_mmu, +}; +#else +/* legacy helper signature: __ld_mmu(target_ulong addr, int + mmu_idx) */ static void *qemu_ld_helpers[4] = { __ldb_mmu, __ldw_mmu, @@ -936,6 +957,8 @@ static void *qemu_ld_helpers[4] = { __ldq_mmu, }; +/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val, + int mmu_idx) */ static void *qemu_st_helpers[4] = { __stb_mmu, __stw_mmu, @@ -943,6 +966,7 @@ static void *qemu_st_helpers[4] = { __stq_mmu, }; #endif +#endif #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS) @@ -1075,6 +1099,19 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0)); tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index); # endif +#ifdef CONFIG_TCG_PASS_AREG0 + /* XXX/FIXME: suboptimal and incorrect for 64 bit */ + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, + tcg_target_call_iarg_regs[2], 0, + tcg_target_call_iarg_regs[1], SHIFT_IMM_LSL(0)); + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, + tcg_target_call_iarg_regs[1], 0, + tcg_target_call_iarg_regs[0], SHIFT_IMM_LSL(0)); + + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, + tcg_target_call_iarg_regs[0], 0, TCG_AREG0, + SHIFT_IMM_LSL(0)); +#endif tcg_out_call(s, (tcg_target_long) qemu_ld_helpers[s_bits]); switch (opc) { @@ -1341,6 +1378,22 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) } # endif +#ifdef CONFIG_TCG_PASS_AREG0 + /* XXX/FIXME: suboptimal and incorrect for 64 bit */ + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, + tcg_target_call_iarg_regs[3], 0, + tcg_target_call_iarg_regs[2], SHIFT_IMM_LSL(0)); + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, + tcg_target_call_iarg_regs[2], 0, + tcg_target_call_iarg_regs[1], SHIFT_IMM_LSL(0)); + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, + tcg_target_call_iarg_regs[1], 0, + tcg_target_call_iarg_regs[0], SHIFT_IMM_LSL(0)); + + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, + tcg_target_call_iarg_regs[0], 0, TCG_AREG0, + SHIFT_IMM_LSL(0)); +#endif tcg_out_call(s, (tcg_target_long) qemu_st_helpers[s_bits]); if (opc == 3) tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R13, TCG_REG_R13, 0x10); diff --git a/tcg/hppa/tcg-target.c b/tcg/hppa/tcg-target.c index c5a3730a2b..e579ef06a1 100644 --- a/tcg/hppa/tcg-target.c +++ b/tcg/hppa/tcg-target.c @@ -882,6 +882,27 @@ static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret, #if defined(CONFIG_SOFTMMU) #include "../../softmmu_defs.h" +#ifdef CONFIG_TCG_PASS_AREG0 +/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, + int mmu_idx) */ +static const void * const qemu_ld_helpers[4] = { + helper_ldb_mmu, + helper_ldw_mmu, + helper_ldl_mmu, + helper_ldq_mmu, +}; + +/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, + uintxx_t val, int mmu_idx) */ +static const void * const qemu_st_helpers[4] = { + helper_stb_mmu, + helper_stw_mmu, + helper_stl_mmu, + helper_stq_mmu, +}; +#else +/* legacy helper signature: __ld_mmu(target_ulong addr, int + mmu_idx) */ static void *qemu_ld_helpers[4] = { __ldb_mmu, __ldw_mmu, @@ -889,12 +910,15 @@ static void *qemu_ld_helpers[4] = { __ldq_mmu, }; +/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val, + int mmu_idx) */ static void *qemu_st_helpers[4] = { __stb_mmu, __stw_mmu, __stl_mmu, __stq_mmu, }; +#endif /* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate @@ -1061,6 +1085,15 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) } tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index); +#ifdef CONFIG_TCG_PASS_AREG0 + /* XXX/FIXME: suboptimal */ + tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], + tcg_target_call_iarg_regs[1]); + tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1], + tcg_target_call_iarg_regs[0]); + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], + TCG_AREG0); +#endif tcg_out_call(s, qemu_ld_helpers[opc & 3]); switch (opc) { @@ -1212,6 +1245,17 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) tcg_abort(); } +#ifdef CONFIG_TCG_PASS_AREG0 + /* XXX/FIXME: suboptimal */ + tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], + tcg_target_call_iarg_regs[2]); + tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2], + tcg_target_call_iarg_regs[1]); + tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1], + tcg_target_call_iarg_regs[0]); + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], + TCG_AREG0); +#endif tcg_out_call(s, qemu_st_helpers[opc]); /* label2: */ diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c index c4e940da5b..43a51a1c54 100644 --- a/tcg/i386/tcg-target.c +++ b/tcg/i386/tcg-target.c @@ -178,6 +178,9 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) tcg_regset_set32(ct->u.regs, 0, 0xffff); tcg_regset_reset_reg(ct->u.regs, TCG_REG_RSI); tcg_regset_reset_reg(ct->u.regs, TCG_REG_RDI); +#ifdef CONFIG_TCG_PASS_AREG0 + tcg_regset_reset_reg(ct->u.regs, TCG_REG_RDX); +#endif } else { tcg_regset_set32(ct->u.regs, 0, 0xff); tcg_regset_reset_reg(ct->u.regs, TCG_REG_EAX); @@ -957,6 +960,27 @@ static void tcg_out_jmp(TCGContext *s, tcg_target_long dest) #include "../../softmmu_defs.h" +#ifdef CONFIG_TCG_PASS_AREG0 +/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, + int mmu_idx) */ +static const void *qemu_ld_helpers[4] = { + helper_ldb_mmu, + helper_ldw_mmu, + helper_ldl_mmu, + helper_ldq_mmu, +}; + +/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, + uintxx_t val, int mmu_idx) */ +static const void *qemu_st_helpers[4] = { + helper_stb_mmu, + helper_stw_mmu, + helper_stl_mmu, + helper_stq_mmu, +}; +#else +/* legacy helper signature: __ld_mmu(target_ulong addr, int + mmu_idx) */ static void *qemu_ld_helpers[4] = { __ldb_mmu, __ldw_mmu, @@ -964,12 +988,15 @@ static void *qemu_ld_helpers[4] = { __ldq_mmu, }; +/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val, + int mmu_idx) */ static void *qemu_st_helpers[4] = { __stb_mmu, __stw_mmu, __stl_mmu, __stq_mmu, }; +#endif /* Perform the TLB load and compare. @@ -1188,11 +1215,26 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, } tcg_out_push(s, args[addrlo_idx]); stack_adjust += 4; +#ifdef CONFIG_TCG_PASS_AREG0 + tcg_out_push(s, TCG_AREG0); + stack_adjust += 4; +#endif #else /* The first argument is already loaded with addrlo. */ arg_idx = 1; tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[arg_idx], mem_index); +#ifdef CONFIG_TCG_PASS_AREG0 + /* XXX/FIXME: suboptimal */ + tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[3], + tcg_target_call_iarg_regs[2]); + tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2], + tcg_target_call_iarg_regs[1]); + tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[1], + tcg_target_call_iarg_regs[0]); + tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0], + TCG_AREG0); +#endif #endif tcg_out_calli(s, (tcg_target_long)qemu_ld_helpers[s_bits]); @@ -1386,11 +1428,26 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, } tcg_out_push(s, args[addrlo_idx]); stack_adjust += 4; +#ifdef CONFIG_TCG_PASS_AREG0 + tcg_out_push(s, TCG_AREG0); + stack_adjust += 4; +#endif #else tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32), TCG_REG_RSI, data_reg); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_RDX, mem_index); stack_adjust = 0; +#ifdef CONFIG_TCG_PASS_AREG0 + /* XXX/FIXME: suboptimal */ + tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[3], + tcg_target_call_iarg_regs[2]); + tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2], + tcg_target_call_iarg_regs[1]); + tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[1], + tcg_target_call_iarg_regs[0]); + tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0], + TCG_AREG0); +#endif #endif tcg_out_calli(s, (tcg_target_long)qemu_st_helpers[s_bits]); diff --git a/tcg/ia64/tcg-target.c b/tcg/ia64/tcg-target.c index f90252a443..e02dacc84f 100644 --- a/tcg/ia64/tcg-target.c +++ b/tcg/ia64/tcg-target.c @@ -1452,12 +1452,25 @@ static inline void tcg_out_qemu_tlb(TCGContext *s, TCGArg addr_reg, TCG_REG_P7, TCG_REG_R3, TCG_REG_R57)); } +#ifdef CONFIG_TCG_PASS_AREG0 +/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, + int mmu_idx) */ +static const void * const qemu_ld_helpers[4] = { + helper_ldb_mmu, + helper_ldw_mmu, + helper_ldl_mmu, + helper_ldq_mmu, +}; +#else +/* legacy helper signature: __ld_mmu(target_ulong addr, int + mmu_idx) */ static void *qemu_ld_helpers[4] = { __ldb_mmu, __ldw_mmu, __ldl_mmu, __ldq_mmu, }; +#endif static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) { @@ -1517,6 +1530,15 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2), tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0)); } +#ifdef CONFIG_TCG_PASS_AREG0 + /* XXX/FIXME: suboptimal */ + tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], + tcg_target_call_iarg_regs[1]); + tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1], + tcg_target_call_iarg_regs[0]); + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], + TCG_AREG0); +#endif if (!bswap || s_bits == 0) { tcg_out_bundle(s, miB, tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0), @@ -1547,12 +1569,25 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) } } +#ifdef CONFIG_TCG_PASS_AREG0 +/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, + uintxx_t val, int mmu_idx) */ +static const void * const qemu_st_helpers[4] = { + helper_stb_mmu, + helper_stw_mmu, + helper_stl_mmu, + helper_stq_mmu, +}; +#else +/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val, + int mmu_idx) */ static void *qemu_st_helpers[4] = { __stb_mmu, __stw_mmu, __stl_mmu, __stq_mmu, }; +#endif static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) { @@ -1622,6 +1657,17 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) data_reg = TCG_REG_R2; } +#ifdef CONFIG_TCG_PASS_AREG0 + /* XXX/FIXME: suboptimal */ + tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], + tcg_target_call_iarg_regs[2]); + tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2], + tcg_target_call_iarg_regs[1]); + tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1], + tcg_target_call_iarg_regs[0]); + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], + TCG_AREG0); +#endif tcg_out_bundle(s, miB, tcg_opc_m4 (TCG_REG_P6, opc_st_m4[opc], data_reg, TCG_REG_R3), diff --git a/tcg/mips/tcg-target.c b/tcg/mips/tcg-target.c index c6aa5bced5..393ba07f25 100644 --- a/tcg/mips/tcg-target.c +++ b/tcg/mips/tcg-target.c @@ -750,6 +750,27 @@ static void tcg_out_setcond2(TCGContext *s, TCGCond cond, int ret, #include "../../softmmu_defs.h" +#ifdef CONFIG_TCG_PASS_AREG0 +/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, + int mmu_idx) */ +static const void * const qemu_ld_helpers[4] = { + helper_ldb_mmu, + helper_ldw_mmu, + helper_ldl_mmu, + helper_ldq_mmu, +}; + +/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, + uintxx_t val, int mmu_idx) */ +static const void * const qemu_st_helpers[4] = { + helper_stb_mmu, + helper_stw_mmu, + helper_stl_mmu, + helper_stq_mmu, +}; +#else +/* legacy helper signature: __ld_mmu(target_ulong addr, int + mmu_idx) */ static void *qemu_ld_helpers[4] = { __ldb_mmu, __ldw_mmu, @@ -757,6 +778,8 @@ static void *qemu_ld_helpers[4] = { __ldq_mmu, }; +/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val, + int mmu_idx) */ static void *qemu_st_helpers[4] = { __stb_mmu, __stw_mmu, @@ -764,6 +787,7 @@ static void *qemu_st_helpers[4] = { __stq_mmu, }; #endif +#endif static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) @@ -858,6 +882,15 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, # endif tcg_out_movi(s, TCG_TYPE_I32, sp_args++, mem_index); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T9, (tcg_target_long)qemu_ld_helpers[s_bits]); +#ifdef CONFIG_TCG_PASS_AREG0 + /* XXX/FIXME: suboptimal and incorrect for 64 on 32 bit */ + tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], + tcg_target_call_iarg_regs[1]); + tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1], + tcg_target_call_iarg_regs[0]); + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], + TCG_AREG0); +#endif tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0); tcg_out_nop(s); @@ -1069,6 +1102,17 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, } tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T9, (tcg_target_long)qemu_st_helpers[s_bits]); +#ifdef CONFIG_TCG_PASS_AREG0 + /* XXX/FIXME: suboptimal and incorrect for 64 on 32 bit */ + tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], + tcg_target_call_iarg_regs[2]); + tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2], + tcg_target_call_iarg_regs[1]); + tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1], + tcg_target_call_iarg_regs[0]); + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], + TCG_AREG0); +#endif tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0); tcg_out_nop(s); diff --git a/tcg/ppc/tcg-target.c b/tcg/ppc/tcg-target.c index 6a34cab5f9..b0aa914798 100644 --- a/tcg/ppc/tcg-target.c +++ b/tcg/ppc/tcg-target.c @@ -508,6 +508,27 @@ static void tcg_out_call (TCGContext *s, tcg_target_long arg, int const_arg) #include "../../softmmu_defs.h" +#ifdef CONFIG_TCG_PASS_AREG0 +/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, + int mmu_idx) */ +static const void * const qemu_ld_helpers[4] = { + helper_ldb_mmu, + helper_ldw_mmu, + helper_ldl_mmu, + helper_ldq_mmu, +}; + +/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, + uintxx_t val, int mmu_idx) */ +static const void * const qemu_st_helpers[4] = { + helper_stb_mmu, + helper_stw_mmu, + helper_stl_mmu, + helper_stq_mmu, +}; +#else +/* legacy helper signature: __ld_mmu(target_ulong addr, int + mmu_idx) */ static void *qemu_ld_helpers[4] = { __ldb_mmu, __ldw_mmu, @@ -515,6 +536,8 @@ static void *qemu_ld_helpers[4] = { __ldq_mmu, }; +/* legacy helper signature: __ld_mmu(target_ulong addr, int + mmu_idx) */ static void *qemu_st_helpers[4] = { __stb_mmu, __stw_mmu, @@ -522,6 +545,7 @@ static void *qemu_st_helpers[4] = { __stq_mmu, }; #endif +#endif static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc) { @@ -598,6 +622,16 @@ static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc) tcg_out_movi (s, TCG_TYPE_I32, 5, mem_index); #endif +#ifdef CONFIG_TCG_PASS_AREG0 + /* XXX/FIXME: suboptimal */ + tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], + tcg_target_call_iarg_regs[1]); + tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1], + tcg_target_call_iarg_regs[0]); + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], + TCG_AREG0); +#endif + tcg_out_call (s, (tcg_target_long) qemu_ld_helpers[s_bits], 1); switch (opc) { case 0|4: @@ -829,6 +863,17 @@ static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc) ir++; tcg_out_movi (s, TCG_TYPE_I32, ir, mem_index); +#ifdef CONFIG_TCG_PASS_AREG0 + /* XXX/FIXME: suboptimal */ + tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], + tcg_target_call_iarg_regs[2]); + tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2], + tcg_target_call_iarg_regs[1]); + tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1], + tcg_target_call_iarg_regs[0]); + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], + TCG_AREG0); +#endif tcg_out_call (s, (tcg_target_long) qemu_st_helpers[opc], 1); label2_ptr = s->code_ptr; tcg_out32 (s, B); diff --git a/tcg/ppc64/tcg-target.c b/tcg/ppc64/tcg-target.c index 7f723b5c2c..409a1ac1ce 100644 --- a/tcg/ppc64/tcg-target.c +++ b/tcg/ppc64/tcg-target.c @@ -552,6 +552,27 @@ static void tcg_out_ldsta (TCGContext *s, int ret, int addr, #include "../../softmmu_defs.h" +#ifdef CONFIG_TCG_PASS_AREG0 +/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, + int mmu_idx) */ +static const void * const qemu_ld_helpers[4] = { + helper_ldb_mmu, + helper_ldw_mmu, + helper_ldl_mmu, + helper_ldq_mmu, +}; + +/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, + uintxx_t val, int mmu_idx) */ +static const void * const qemu_st_helpers[4] = { + helper_stb_mmu, + helper_stw_mmu, + helper_stl_mmu, + helper_stq_mmu, +}; +#else +/* legacy helper signature: __ld_mmu(target_ulong addr, int + mmu_idx) */ static void *qemu_ld_helpers[4] = { __ldb_mmu, __ldw_mmu, @@ -559,12 +580,15 @@ static void *qemu_ld_helpers[4] = { __ldq_mmu, }; +/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val, + int mmu_idx) */ static void *qemu_st_helpers[4] = { __stb_mmu, __stw_mmu, __stl_mmu, __stq_mmu, }; +#endif static void tcg_out_tlb_read (TCGContext *s, int r0, int r1, int r2, int addr_reg, int s_bits, int offset) @@ -648,6 +672,15 @@ static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc) tcg_out_mov (s, TCG_TYPE_I64, 3, addr_reg); tcg_out_movi (s, TCG_TYPE_I64, 4, mem_index); +#ifdef CONFIG_TCG_PASS_AREG0 + /* XXX/FIXME: suboptimal */ + tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], + tcg_target_call_iarg_regs[1]); + tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1], + tcg_target_call_iarg_regs[0]); + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], + TCG_AREG0); +#endif tcg_out_call (s, (tcg_target_long) qemu_ld_helpers[s_bits], 1); switch (opc) { @@ -796,6 +829,17 @@ static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc) tcg_out_rld (s, RLDICL, 4, data_reg, 0, 64 - (1 << (3 + opc))); tcg_out_movi (s, TCG_TYPE_I64, 5, mem_index); +#ifdef CONFIG_TCG_PASS_AREG0 + /* XXX/FIXME: suboptimal */ + tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], + tcg_target_call_iarg_regs[2]); + tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2], + tcg_target_call_iarg_regs[1]); + tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1], + tcg_target_call_iarg_regs[0]); + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], + TCG_AREG0); +#endif tcg_out_call (s, (tcg_target_long) qemu_st_helpers[opc], 1); label2_ptr = s->code_ptr; diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c index 47ffcc1f51..04662c15fd 100644 --- a/tcg/s390/tcg-target.c +++ b/tcg/s390/tcg-target.c @@ -301,6 +301,27 @@ static const uint8_t tcg_cond_to_ltr_cond[10] = { #include "../../softmmu_defs.h" +#ifdef CONFIG_TCG_PASS_AREG0 +/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, + int mmu_idx) */ +static const void * const qemu_ld_helpers[4] = { + helper_ldb_mmu, + helper_ldw_mmu, + helper_ldl_mmu, + helper_ldq_mmu, +}; + +/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, + uintxx_t val, int mmu_idx) */ +static const void * const qemu_st_helpers[4] = { + helper_stb_mmu, + helper_stw_mmu, + helper_stl_mmu, + helper_stq_mmu, +}; +#else +/* legacy helper signature: __ld_mmu(target_ulong addr, int + mmu_idx) */ static void *qemu_ld_helpers[4] = { __ldb_mmu, __ldw_mmu, @@ -308,6 +329,8 @@ static void *qemu_ld_helpers[4] = { __ldq_mmu, }; +/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val, + int mmu_idx) */ static void *qemu_st_helpers[4] = { __stb_mmu, __stw_mmu, @@ -315,6 +338,7 @@ static void *qemu_st_helpers[4] = { __stq_mmu, }; #endif +#endif static uint8_t *tb_ret_addr; @@ -1483,9 +1507,29 @@ static void tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg, tcg_abort(); } tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, mem_index); +#ifdef CONFIG_TCG_PASS_AREG0 + /* XXX/FIXME: suboptimal */ + tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], + tcg_target_call_iarg_regs[1]); + tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1], + tcg_target_call_iarg_regs[0]); + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], + TCG_AREG0); +#endif tgen_calli(s, (tcg_target_ulong)qemu_st_helpers[s_bits]); } else { tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index); +#ifdef CONFIG_TCG_PASS_AREG0 + /* XXX/FIXME: suboptimal */ + tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], + tcg_target_call_iarg_regs[2]); + tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2], + tcg_target_call_iarg_regs[1]); + tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1], + tcg_target_call_iarg_regs[0]); + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], + TCG_AREG0); +#endif tgen_calli(s, (tcg_target_ulong)qemu_ld_helpers[s_bits]); /* sign extension */ diff --git a/tcg/sparc/tcg-target.c b/tcg/sparc/tcg-target.c index b287122df5..80f0818679 100644 --- a/tcg/sparc/tcg-target.c +++ b/tcg/sparc/tcg-target.c @@ -59,6 +59,12 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { }; #endif +#ifdef CONFIG_TCG_PASS_AREG0 +#define ARG_OFFSET 1 +#else +#define ARG_OFFSET 0 +#endif + static const int tcg_target_reg_alloc_order[] = { TCG_REG_L0, TCG_REG_L1, @@ -86,9 +92,9 @@ static const int tcg_target_call_iarg_regs[6] = { static const int tcg_target_call_oarg_regs[] = { TCG_REG_O0, -#if TCG_TARGET_REG_BITS == 32 - TCG_REG_O1 -#endif + TCG_REG_O1, + TCG_REG_O2, + TCG_REG_O3, }; static inline int check_fit_tl(tcg_target_long val, unsigned int bits) @@ -155,6 +161,9 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0); tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1); tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2); +#ifdef CONFIG_TCG_PASS_AREG0 + tcg_regset_reset_reg(ct->u.regs, TCG_REG_O3); +#endif break; case 'I': ct->ct |= TCG_CT_CONST_S11; @@ -706,6 +715,27 @@ static void tcg_target_qemu_prologue(TCGContext *s) #include "../../softmmu_defs.h" +#ifdef CONFIG_TCG_PASS_AREG0 +/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, + int mmu_idx) */ +static const void * const qemu_ld_helpers[4] = { + helper_ldb_mmu, + helper_ldw_mmu, + helper_ldl_mmu, + helper_ldq_mmu, +}; + +/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, + uintxx_t val, int mmu_idx) */ +static const void * const qemu_st_helpers[4] = { + helper_stb_mmu, + helper_stw_mmu, + helper_stl_mmu, + helper_stq_mmu, +}; +#else +/* legacy helper signature: __ld_mmu(target_ulong addr, int + mmu_idx) */ static const void * const qemu_ld_helpers[4] = { __ldb_mmu, __ldw_mmu, @@ -713,6 +743,8 @@ static const void * const qemu_ld_helpers[4] = { __ldq_mmu, }; +/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val, + int mmu_idx) */ static const void * const qemu_st_helpers[4] = { __stb_mmu, __stw_mmu, @@ -720,6 +752,7 @@ static const void * const qemu_st_helpers[4] = { __stq_mmu, }; #endif +#endif #if TARGET_LONG_BITS == 32 #define TARGET_LD_OP LDUW @@ -801,6 +834,17 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, /* mov */ tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index); +#ifdef CONFIG_TCG_PASS_AREG0 + /* XXX/FIXME: suboptimal */ + tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], + tcg_target_call_iarg_regs[2]); + tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2], + tcg_target_call_iarg_regs[1]); + tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1], + tcg_target_call_iarg_regs[0]); + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], + TCG_AREG0); +#endif /* XXX: move that code at the end of the TB */ /* qemu_ld_helper[s_bits](arg0, arg1) */ diff --git a/tcg/tci/tcg-target.c b/tcg/tci/tcg-target.c index bd85073662..453f1875e2 100644 --- a/tcg/tci/tcg-target.c +++ b/tcg/tci/tcg-target.c @@ -798,6 +798,9 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, case INDEX_op_qemu_st8: case INDEX_op_qemu_st16: case INDEX_op_qemu_st32: +#ifdef CONFIG_TCG_PASS_AREG0 + tcg_out_r(s, TCG_AREG0); +#endif tcg_out_r(s, *args++); tcg_out_r(s, *args++); #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS @@ -808,6 +811,9 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, #endif break; case INDEX_op_qemu_st64: +#ifdef CONFIG_TCG_PASS_AREG0 + tcg_out_r(s, TCG_AREG0); +#endif tcg_out_r(s, *args++); #if TCG_TARGET_REG_BITS == 32 tcg_out_r(s, *args++); |