diff options
-rw-r--r-- | target-arm/exec.h | 8 | ||||
-rw-r--r-- | target-arm/helper.c | 14 | ||||
-rw-r--r-- | target-arm/helpers.h | 13 | ||||
-rw-r--r-- | target-arm/op_helper.c | 13 | ||||
-rw-r--r-- | target-arm/op_mem.h | 58 | ||||
-rw-r--r-- | target-arm/op_neon.h | 67 | ||||
-rw-r--r-- | target-arm/translate.c | 311 |
7 files changed, 204 insertions, 280 deletions
diff --git a/target-arm/exec.h b/target-arm/exec.h index af1818171a..bd4910d114 100644 --- a/target-arm/exec.h +++ b/target-arm/exec.h @@ -23,7 +23,6 @@ register struct CPUARMState *env asm(AREG0); register uint32_t T0 asm(AREG1); register uint32_t T1 asm(AREG2); -register uint32_t T2 asm(AREG3); #define M0 env->iwmmxt.val @@ -59,15 +58,8 @@ static inline int cpu_halted(CPUState *env) { #include "softmmu_exec.h" #endif -/* In op_helper.c */ - -void helper_mark_exclusive(CPUARMState *, uint32_t addr); -int helper_test_exclusive(CPUARMState *, uint32_t addr); -void helper_clrex(CPUARMState *env); - void cpu_loop_exit(void); void raise_exception(int); -void helper_neon_tbl(int rn, int maxindex); uint32_t helper_neon_mul_p8(uint32_t op1, uint32_t op2); diff --git a/target-arm/helper.c b/target-arm/helper.c index 6438882913..48cd6c8925 100644 --- a/target-arm/helper.c +++ b/target-arm/helper.c @@ -432,7 +432,7 @@ static void flush_mmon(uint32_t addr) } /* Mark an address for exclusive access. */ -void helper_mark_exclusive(CPUState *env, uint32_t addr) +void HELPER(mark_exclusive)(CPUState *env, uint32_t addr) { if (!env->mmon_entry) allocate_mmon_state(env); @@ -443,7 +443,7 @@ void helper_mark_exclusive(CPUState *env, uint32_t addr) /* Test if an exclusive address is still exclusive. Returns zero if the address is still exclusive. */ -int helper_test_exclusive(CPUState *env, uint32_t addr) +uint32_t HELPER(test_exclusive)(CPUState *env, uint32_t addr) { int res; @@ -457,7 +457,7 @@ int helper_test_exclusive(CPUState *env, uint32_t addr) return res; } -void helper_clrex(CPUState *env) +void HELPER(clrex)(CPUState *env) { if (!(env->mmon_entry && env->mmon_entry->addr)) return; @@ -1176,17 +1176,17 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) /* Not really implemented. Need to figure out a sane way of doing this. Maybe add generic watchpoint support and use that. */ -void helper_mark_exclusive(CPUState *env, uint32_t addr) +void HELPER(mark_exclusive)(CPUState *env, uint32_t addr) { env->mmon_addr = addr; } -int helper_test_exclusive(CPUState *env, uint32_t addr) +uint32_t HELPER(test_exclusive)(CPUState *env, uint32_t addr) { return (env->mmon_addr != addr); } -void helper_clrex(CPUState *env) +void HELPER(clrex)(CPUState *env) { env->mmon_addr = -1; } @@ -2496,6 +2496,8 @@ float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUState *env) return float32_sub(three, float32_mul(a, b, s), s); } +/* NEON helpers. */ + /* TODO: The architecture specifies the value that the estimate functions should return. We return the exact reciprocal/root instead. */ float32 HELPER(recpe_f32)(float32 a, CPUState *env) diff --git a/target-arm/helpers.h b/target-arm/helpers.h index 08eb590114..8ebd25fae8 100644 --- a/target-arm/helpers.h +++ b/target-arm/helpers.h @@ -51,6 +51,13 @@ static inline void gen_helper_##name(TCGv ret, \ { \ tcg_gen_helper_1_3(helper_##name, ret, arg1, arg2, arg3); \ } +#define DEF_HELPER_1_4(name, ret, args) \ +DEF_HELPER(name, ret, args) \ +static inline void gen_helper_##name(TCGv ret, \ + TCGv arg1, TCGv arg2, TCGv arg3, TCGv arg4) \ +{ \ + tcg_gen_helper_1_4(helper_##name, ret, arg1, arg2, arg3, arg4); \ +} #else /* !GEN_HELPER */ #define DEF_HELPER_0_0 DEF_HELPER #define DEF_HELPER_0_1 DEF_HELPER @@ -60,6 +67,7 @@ static inline void gen_helper_##name(TCGv ret, \ #define DEF_HELPER_1_1 DEF_HELPER #define DEF_HELPER_1_2 DEF_HELPER #define DEF_HELPER_1_3 DEF_HELPER +#define DEF_HELPER_1_4 DEF_HELPER #define HELPER(x) glue(helper_,x) #endif @@ -130,6 +138,10 @@ DEF_HELPER_1_2(get_cp, uint32_t, (CPUState *, uint32_t)) DEF_HELPER_1_2(get_r13_banked, uint32_t, (CPUState *, uint32_t)) DEF_HELPER_0_3(set_r13_banked, void, (CPUState *, uint32_t, uint32_t)) +DEF_HELPER_0_2(mark_exclusive, void, (CPUState *, uint32_t)) +DEF_HELPER_1_2(test_exclusive, uint32_t, (CPUState *, uint32_t)) +DEF_HELPER_0_1(clrex, void, (CPUState *)) + DEF_HELPER_1_1(get_user_reg, uint32_t, (uint32_t)) DEF_HELPER_0_2(set_user_reg, void, (uint32_t, uint32_t)) @@ -195,6 +207,7 @@ DEF_HELPER_1_2(recpe_f32, float32, (float32, CPUState *)) DEF_HELPER_1_2(rsqrte_f32, float32, (float32, CPUState *)) DEF_HELPER_1_2(recpe_u32, uint32_t, (uint32_t, CPUState *)) DEF_HELPER_1_2(rsqrte_u32, uint32_t, (uint32_t, CPUState *)) +DEF_HELPER_1_4(neon_tbl, uint32_t, (uint32_t, uint32_t, uint32_t, uint32_t)) DEF_HELPER_1_2(add_cc, uint32_t, (uint32_t, uint32_t)) DEF_HELPER_1_2(adc_cc, uint32_t, (uint32_t, uint32_t)) diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c index 79b3f1a456..d1ce3a657c 100644 --- a/target-arm/op_helper.c +++ b/target-arm/op_helper.c @@ -40,27 +40,26 @@ void cpu_unlock(void) spin_unlock(&global_cpu_lock); } -void helper_neon_tbl(int rn, int maxindex) +uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, + uint32_t rn, uint32_t maxindex) { uint32_t val; - uint32_t mask; uint32_t tmp; int index; int shift; uint64_t *table; table = (uint64_t *)&env->vfp.regs[rn]; val = 0; - mask = 0; for (shift = 0; shift < 32; shift += 8) { - index = (T1 >> shift) & 0xff; - if (index <= maxindex) { + index = (ireg >> shift) & 0xff; + if (index < maxindex) { tmp = (table[index >> 3] >> (index & 7)) & 0xff; val |= tmp << shift; } else { - val |= T0 & (0xff << shift); + val |= def & (0xff << shift); } } - T0 = val; + return val; } #if !defined(CONFIG_USER_ONLY) diff --git a/target-arm/op_mem.h b/target-arm/op_mem.h index 519544d0ef..daaab51f86 100644 --- a/target-arm/op_mem.h +++ b/target-arm/op_mem.h @@ -1,63 +1,5 @@ /* ARM memory operations. */ -/* Load-locked, store exclusive. */ -#define EXCLUSIVE_OP(suffix, ldsuffix) \ -void OPPROTO glue(op_ld##suffix##ex,MEMSUFFIX)(void) \ -{ \ - cpu_lock(); \ - helper_mark_exclusive(env, T1); \ - T0 = glue(ld##ldsuffix,MEMSUFFIX)(T1); \ - cpu_unlock(); \ - FORCE_RET(); \ -} \ - \ -void OPPROTO glue(op_st##suffix##ex,MEMSUFFIX)(void) \ -{ \ - int failed; \ - cpu_lock(); \ - failed = helper_test_exclusive(env, T1); \ - /* ??? Is it safe to hold the cpu lock over a store? */ \ - if (!failed) { \ - glue(st##suffix,MEMSUFFIX)(T1, T0); \ - } \ - T0 = failed; \ - cpu_unlock(); \ - FORCE_RET(); \ -} - -EXCLUSIVE_OP(b, ub) -EXCLUSIVE_OP(w, uw) -EXCLUSIVE_OP(l, l) - -#undef EXCLUSIVE_OP - -/* Load exclusive T0:T1 from address T1. */ -void OPPROTO glue(op_ldqex,MEMSUFFIX)(void) -{ - cpu_lock(); - helper_mark_exclusive(env, T1); - T0 = glue(ldl,MEMSUFFIX)(T1); - T1 = glue(ldl,MEMSUFFIX)((T1 + 4)); - cpu_unlock(); - FORCE_RET(); -} - -/* Store exclusive T0:T2 to address T1. */ -void OPPROTO glue(op_stqex,MEMSUFFIX)(void) -{ - int failed; - cpu_lock(); - failed = helper_test_exclusive(env, T1); - /* ??? Is it safe to hold the cpu lock over a store? */ - if (!failed) { - glue(stl,MEMSUFFIX)(T1, T0); - glue(stl,MEMSUFFIX)((T1 + 4), T2); - } - T0 = failed; - cpu_unlock(); - FORCE_RET(); -} - /* iwMMXt load/store. Address is in T1 */ #define MMX_MEM_OP(name, ldname) \ void OPPROTO glue(op_iwmmxt_ld##name,MEMSUFFIX)(void) \ diff --git a/target-arm/op_neon.h b/target-arm/op_neon.h index 095165ede5..df3b7cb2a6 100644 --- a/target-arm/op_neon.h +++ b/target-arm/op_neon.h @@ -47,11 +47,6 @@ NEON_OP(getreg_T1) T1 = *(uint32_t *)((char *) env + PARAM1); } -NEON_OP(getreg_T2) -{ - T2 = *(uint32_t *)((char *) env + PARAM1); -} - NEON_OP(setreg_T0) { *(uint32_t *)((char *) env + PARAM1) = T0; @@ -62,11 +57,6 @@ NEON_OP(setreg_T1) *(uint32_t *)((char *) env + PARAM1) = T1; } -NEON_OP(setreg_T2) -{ - *(uint32_t *)((char *) env + PARAM1) = T2; -} - #define NEON_TYPE1(name, type) \ typedef struct \ { \ @@ -293,28 +283,6 @@ NEON_OP(hsub_u32) FORCE_RET(); } -/* ??? bsl, bif and bit are all the same op, just with the oparands in a - differnet order. It's currently easier to have 3 differnt ops than - rearange the operands. */ - -/* Bitwise Select. */ -NEON_OP(bsl) -{ - T0 = (T0 & T2) | (T1 & ~T2); -} - -/* Bitwise Insert If True. */ -NEON_OP(bit) -{ - T0 = (T0 & T1) | (T2 & ~T1); -} - -/* Bitwise Insert If False. */ -NEON_OP(bif) -{ - T0 = (T2 & T1) | (T0 & ~T1); -} - #define NEON_USAT(dest, src1, src2, type) do { \ uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \ if (tmp != (type)tmp) { \ @@ -423,7 +391,7 @@ NEON_VOP(shl_u32, neon_u32, 1) NEON_OP(shl_u64) { - int8_t shift = T2; + int8_t shift = env->vfp.scratch[0]; uint64_t val = T0 | ((uint64_t)T1 << 32); if (shift < 0) { val >>= -shift; @@ -437,7 +405,7 @@ NEON_OP(shl_u64) NEON_OP(shl_s64) { - int8_t shift = T2; + int8_t shift = env->vfp.scratch[0]; int64_t val = T0 | ((uint64_t)T1 << 32); if (shift < 0) { val >>= -shift; @@ -468,7 +436,7 @@ NEON_VOP(rshl_u32, neon_u32, 1) NEON_OP(rshl_u64) { - int8_t shift = T2; + int8_t shift = env->vfp.scratch[0]; uint64_t val = T0 | ((uint64_t)T1 << 32); if (shift < 0) { val = (val + ((uint64_t)1 << (-1 - shift))) >> -shift; @@ -483,7 +451,7 @@ NEON_OP(rshl_u64) NEON_OP(rshl_s64) { - int8_t shift = T2; + int8_t shift = env->vfp.scratch[0]; int64_t val = T0 | ((uint64_t)T1 << 32); if (shift < 0) { val = (val + ((int64_t)1 << (-1 - shift))) >> -shift; @@ -514,7 +482,7 @@ NEON_VOP(qshl_s32, neon_s32, 1) NEON_OP(qshl_s64) { - int8_t shift = T2; + int8_t shift = env->vfp.scratch[0]; int64_t val = T0 | ((uint64_t)T1 << 32); if (shift < 0) { val >>= -shift; @@ -550,7 +518,7 @@ NEON_VOP(qshl_u32, neon_u32, 1) NEON_OP(qshl_u64) { - int8_t shift = T2; + int8_t shift = env->vfp.scratch[0]; uint64_t val = T0 | ((uint64_t)T1 << 32); if (shift < 0) { val >>= -shift; @@ -1713,12 +1681,6 @@ NEON_OP(zip_u16) FORCE_RET(); } -/* Table lookup. This accessed the register file directly. */ -NEON_OP(tbl) -{ - helper_neon_tbl(PARAM1, PARAM2); -} - NEON_OP(dup_u8) { T0 = (T0 >> PARAM1) & 0xff; @@ -1726,20 +1688,3 @@ NEON_OP(dup_u8) T0 |= T0 << 16; FORCE_RET(); } - -/* Helpers for element load/store. */ -NEON_OP(insert_elt) -{ - int shift = PARAM1; - uint32_t mask = PARAM2; - T2 = (T2 & mask) | (T0 << shift); - FORCE_RET(); -} - -NEON_OP(extract_elt) -{ - int shift = PARAM1; - uint32_t mask = PARAM2; - T0 = (T2 & mask) >> shift; - FORCE_RET(); -} diff --git a/target-arm/translate.c b/target-arm/translate.c index 0220d6ac53..a4b1df504b 100644 --- a/target-arm/translate.c +++ b/target-arm/translate.c @@ -78,7 +78,7 @@ extern int loglevel; static TCGv cpu_env; /* FIXME: These should be removed. */ -static TCGv cpu_T[3]; +static TCGv cpu_T[2]; static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d; /* initialize TCG globals. */ @@ -88,7 +88,6 @@ void arm_translate_init(void) cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0"); cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1"); - cpu_T[2] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG3, "T2"); } /* The code generator doesn't like lots of temporaries, so maintain our own @@ -188,13 +187,9 @@ static void store_reg(DisasContext *s, int reg, TCGv var) /* Basic operations. */ #define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1]) -#define gen_op_movl_T0_T2() tcg_gen_mov_i32(cpu_T[0], cpu_T[2]) #define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0]) -#define gen_op_movl_T1_T2() tcg_gen_mov_i32(cpu_T[1], cpu_T[2]) -#define gen_op_movl_T2_T0() tcg_gen_mov_i32(cpu_T[2], cpu_T[0]) #define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im) #define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im) -#define gen_op_movl_T2_im(im) tcg_gen_movi_i32(cpu_T[2], im) #define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im) #define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1]) @@ -310,9 +305,9 @@ static void gen_sbfx(TCGv var, int shift, int width) /* Bitfield insertion. Insert val into base. Clobbers base and val. */ static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask) { - tcg_gen_shli_i32(val, val, shift); tcg_gen_andi_i32(val, val, mask); - tcg_gen_andi_i32(base, base, ~mask); + tcg_gen_shli_i32(val, val, shift); + tcg_gen_andi_i32(base, base, ~(mask << shift)); tcg_gen_or_i32(dest, base, val); } @@ -460,6 +455,13 @@ static inline void tcg_gen_not_i32(TCGv t0, TCGv t1) /* T0 &= ~T1. Clobbers T1. */ /* FIXME: Implement bic natively. */ +static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1) +{ + TCGv tmp = new_tmp(); + tcg_gen_not_i32(tmp, t1); + tcg_gen_and_i32(dest, t0, tmp); + dead_tmp(tmp); +} static inline void gen_op_bicl_T0_T1(void) { gen_op_notl_T1(); @@ -1167,6 +1169,19 @@ neon_reg_offset (int reg, int n) #define NEON_GET_REG(T, reg, n) gen_op_neon_getreg_##T(neon_reg_offset(reg, n)) #define NEON_SET_REG(T, reg, n) gen_op_neon_setreg_##T(neon_reg_offset(reg, n)) +static TCGv neon_load_reg(int reg, int pass) +{ + TCGv tmp = new_tmp(); + tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass)); + return tmp; +} + +static void neon_store_reg(int reg, int pass, TCGv var) +{ + tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass)); + dead_tmp(var); +} + #define tcg_gen_ld_f32 tcg_gen_ld_i32 #define tcg_gen_ld_f64 tcg_gen_ld_i64 #define tcg_gen_st_f32 tcg_gen_st_i32 @@ -2500,19 +2515,14 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn) /* VMOV */ switch (size) { case 0: - NEON_GET_REG(T2, rn, pass); - gen_op_movl_T1_im(0xff); - gen_op_andl_T0_T1(); - gen_op_neon_insert_elt(offset, ~(0xff << offset)); - NEON_SET_REG(T2, rn, pass); + tmp = neon_load_reg(rn, pass); + gen_bfi(tmp, tmp, cpu_T[0], offset, 0xff); + neon_store_reg(rn, pass, tmp); break; case 1: - NEON_GET_REG(T2, rn, pass); - gen_op_movl_T1_im(0xffff); - gen_op_andl_T0_T1(); - bank_mask = offset ? 0xffff : 0xffff0000; - gen_op_neon_insert_elt(offset, bank_mask); - NEON_SET_REG(T2, rn, pass); + tmp = neon_load_reg(rn, pass); + gen_bfi(tmp, tmp, cpu_T[0], offset, 0xffff); + neon_store_reg(rn, pass, tmp); break; case 2: NEON_SET_REG(T0, rn, pass); @@ -3480,9 +3490,9 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn) int pass; int load; int shift; - uint32_t mask; int n; TCGv tmp; + TCGv tmp2; if (!vfp_enabled(env)) return 1; @@ -3525,60 +3535,47 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn) } else if (size == 1) { if (load) { tmp = gen_ld16u(cpu_T[1], IS_USER(s)); - tcg_gen_mov_i32(cpu_T[0], tmp); - dead_tmp(tmp); gen_op_addl_T1_im(stride); - gen_op_movl_T2_T0(); - tmp = gen_ld16u(cpu_T[1], IS_USER(s)); - tcg_gen_mov_i32(cpu_T[0], tmp); - dead_tmp(tmp); + tmp2 = gen_ld16u(cpu_T[1], IS_USER(s)); gen_op_addl_T1_im(stride); - gen_op_neon_insert_elt(16, 0xffff); - NEON_SET_REG(T2, rd, pass); + gen_bfi(tmp, tmp, tmp2, 16, 0xffff); + dead_tmp(tmp2); + neon_store_reg(rd, pass, tmp); } else { - NEON_GET_REG(T2, rd, pass); - gen_op_movl_T0_T2(); - tmp = new_tmp(); - tcg_gen_mov_i32(tmp, cpu_T[0]); + tmp = neon_load_reg(rd, pass); + tmp2 = new_tmp(); + tcg_gen_shri_i32(tmp2, tmp, 16); gen_st16(tmp, cpu_T[1], IS_USER(s)); gen_op_addl_T1_im(stride); - gen_op_neon_extract_elt(16, 0xffff0000); - tmp = new_tmp(); - tcg_gen_mov_i32(tmp, cpu_T[0]); - gen_st16(tmp, cpu_T[1], IS_USER(s)); + gen_st16(tmp2, cpu_T[1], IS_USER(s)); gen_op_addl_T1_im(stride); } } else /* size == 0 */ { if (load) { - mask = 0xff; for (n = 0; n < 4; n++) { tmp = gen_ld8u(cpu_T[1], IS_USER(s)); - tcg_gen_mov_i32(cpu_T[0], tmp); - dead_tmp(tmp); gen_op_addl_T1_im(stride); if (n == 0) { - gen_op_movl_T2_T0(); + tmp2 = tmp; } else { - gen_op_neon_insert_elt(n * 8, ~mask); + gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff); + dead_tmp(tmp); } - mask <<= 8; } - NEON_SET_REG(T2, rd, pass); + neon_store_reg(rd, pass, tmp2); } else { - NEON_GET_REG(T2, rd, pass); - mask = 0xff; + tmp2 = neon_load_reg(rd, pass); for (n = 0; n < 4; n++) { + tmp = new_tmp(); if (n == 0) { - gen_op_movl_T0_T2(); + tcg_gen_mov_i32(tmp, tmp2); } else { - gen_op_neon_extract_elt(n * 8, mask); + tcg_gen_shri_i32(tmp, tmp2, n * 8); } - tmp = new_tmp(); - tcg_gen_mov_i32(tmp, cpu_T[0]); gen_st8(tmp, cpu_T[1], IS_USER(s)); gen_op_addl_T1_im(stride); - mask <<= 8; } + dead_tmp(tmp2); } } } @@ -3629,17 +3626,14 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn) switch (size) { case 0: shift = ((insn >> 5) & 3) * 8; - mask = 0xff << shift; stride = 1; break; case 1: shift = ((insn >> 6) & 1) * 16; - mask = shift ? 0xffff0000 : 0xffff; stride = (insn & (1 << 5)) ? 2 : 1; break; case 2: shift = 0; - mask = 0xffffffff; stride = (insn & (1 << 6)) ? 2 : 1; break; default: @@ -3649,9 +3643,6 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn) gen_movl_T1_reg(s, rn); for (reg = 0; reg < nregs; reg++) { if (load) { - if (size != 2) { - NEON_GET_REG(T2, rd, pass); - } switch (size) { case 0: tmp = gen_ld8u(cpu_T[1], IS_USER(s)); @@ -3663,23 +3654,16 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn) tmp = gen_ld32(cpu_T[1], IS_USER(s)); break; } - tcg_gen_mov_i32(cpu_T[0], tmp); - dead_tmp(tmp); if (size != 2) { - gen_op_neon_insert_elt(shift, ~mask); - NEON_SET_REG(T0, rd, pass); - } else { - NEON_SET_REG(T0, rd, pass); + tmp2 = neon_load_reg(rd, pass); + gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff); + dead_tmp(tmp2); } + neon_store_reg(rd, pass, tmp); } else { /* Store */ - if (size == 2) { - NEON_GET_REG(T0, rd, pass); - } else { - NEON_GET_REG(T2, rd, pass); - gen_op_neon_extract_elt(shift, mask); - } - tmp = new_tmp(); - tcg_gen_mov_i32(tmp, cpu_T[0]); + tmp = neon_load_reg(rd, pass); + if (shift) + tcg_gen_shri_i32(tmp, tmp, shift); switch (size) { case 0: gen_st8(tmp, cpu_T[1], IS_USER(s)); @@ -3715,6 +3699,14 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn) return 0; } +/* Bitwise select. dest = c ? t : f. Clobbers T and F. */ +static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c) +{ + tcg_gen_and_i32(t, t, c); + tcg_gen_bic_i32(f, f, c); + tcg_gen_or_i32(dest, t, f); +} + /* Translate a NEON data processing instruction. Return nonzero if the instruction is invalid. In general we process vectors in 32-bit chunks. This means we can reuse @@ -3735,6 +3727,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) int u; int n; uint32_t imm; + TCGv tmp; + TCGv tmp2; + TCGv tmp3; if (!vfp_enabled(env)) return 1; @@ -3875,16 +3870,19 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) gen_op_xorl_T0_T1(); break; case 5: /* VBSL */ - NEON_GET_REG(T2, rd, pass); - gen_op_neon_bsl(); + tmp = neon_load_reg(rd, pass); + gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp); + dead_tmp(tmp); break; case 6: /* VBIT */ - NEON_GET_REG(T2, rd, pass); - gen_op_neon_bit(); + tmp = neon_load_reg(rd, pass); + gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]); + dead_tmp(tmp); break; case 7: /* VBIF */ - NEON_GET_REG(T2, rd, pass); - gen_op_neon_bif(); + tmp = neon_load_reg(rd, pass); + gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]); + dead_tmp(tmp); break; } break; @@ -4190,8 +4188,6 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) element size in bits. */ if (op <= 4) shift = shift - (1 << (size + 3)); - else - shift++; if (size == 3) { count = q + 1; } else { @@ -4276,9 +4272,10 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) default: abort(); } - NEON_GET_REG(T1, rd, pass); - gen_op_movl_T2_im(imm); - gen_op_neon_bsl(); + tmp = neon_load_reg(rd, pass); + tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm); + tcg_gen_andi_i32(tmp, tmp, ~imm); + tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp); } if (size == 3) { NEON_SET_REG(T0, rd, pass * 2); @@ -4519,24 +4516,26 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) /* Avoid overlapping operands. Wide source operands are always aligned so will never overlap with wide destinations in problematic ways. */ - if (rd == rm) { - NEON_GET_REG(T2, rm, 1); - } else if (rd == rn) { - NEON_GET_REG(T2, rn, 1); + if (rd == rm && !src2_wide) { + NEON_GET_REG(T0, rm, 1); + gen_neon_movl_scratch_T0(2); + } else if (rd == rn && !src1_wide) { + NEON_GET_REG(T0, rn, 1); + gen_neon_movl_scratch_T0(2); } for (pass = 0; pass < 2; pass++) { /* Load the second operand into env->vfp.scratch. Also widen narrow operands. */ - if (pass == 1 && rd == rm) { - if (prewiden) { - gen_op_movl_T0_T2(); - } else { - gen_op_movl_T1_T2(); - } + if (src2_wide) { + NEON_GET_REG(T0, rm, pass * 2); + NEON_GET_REG(T1, rm, pass * 2 + 1); } else { - if (src2_wide) { - NEON_GET_REG(T0, rm, pass * 2); - NEON_GET_REG(T1, rm, pass * 2 + 1); + if (pass == 1 && rd == rm) { + if (prewiden) { + gen_neon_movl_T0_scratch(2); + } else { + gen_neon_movl_T1_scratch(2); + } } else { if (prewiden) { NEON_GET_REG(T0, rm, pass); @@ -4554,12 +4553,12 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) } /* Load the first operand. */ - if (pass == 1 && rd == rn) { - gen_op_movl_T0_T2(); + if (src1_wide) { + NEON_GET_REG(T0, rn, pass * 2); + NEON_GET_REG(T1, rn, pass * 2 + 1); } else { - if (src1_wide) { - NEON_GET_REG(T0, rn, pass * 2); - NEON_GET_REG(T1, rn, pass * 2 + 1); + if (pass == 1 && rd == rn) { + gen_neon_movl_T0_scratch(2); } else { NEON_GET_REG(T0, rn, pass); } @@ -4696,10 +4695,10 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) case 12: /* VQDMULH scalar */ case 13: /* VQRDMULH scalar */ gen_neon_get_scalar(size, rm); - gen_op_movl_T2_T0(); + gen_neon_movl_scratch_T0(0); for (pass = 0; pass < (u ? 4 : 2); pass++) { if (pass != 0) - gen_op_movl_T0_T2(); + gen_neon_movl_T0_scratch(0); NEON_GET_REG(T1, rn, pass); if (op == 12) { if (size == 1) { @@ -4764,10 +4763,10 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) gen_neon_movl_scratch_T0(2); } gen_neon_get_scalar(size, rm); - gen_op_movl_T2_T0(); + gen_neon_movl_scratch_T0(3); for (pass = 0; pass < 2; pass++) { if (pass != 0) { - gen_op_movl_T0_T2(); + gen_neon_movl_T0_scratch(3); } if (pass != 0 && rd == rn) { gen_neon_movl_T1_scratch(2); @@ -5025,11 +5024,12 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) if (q) return 1; if (rm == rd) { - NEON_GET_REG(T2, rm, 1); + NEON_GET_REG(T0, rm, 1); + gen_neon_movl_scratch_T0(0); } for (pass = 0; pass < 2; pass++) { if (pass == 1 && rm == rd) { - gen_op_movl_T0_T2(); + gen_neon_movl_T0_scratch(0); } else { NEON_GET_REG(T0, rm, pass); } @@ -5253,23 +5253,26 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) } else if ((insn & (1 << 10)) == 0) { /* VTBL, VTBX. */ n = (insn >> 5) & 0x18; - NEON_GET_REG(T1, rm, 0); if (insn & (1 << 6)) { - NEON_GET_REG(T0, rd, 0); + tmp = neon_load_reg(rd, 0); } else { - gen_op_movl_T0_im(0); + tmp = new_tmp(); + tcg_gen_movi_i32(tmp, 0); } - gen_op_neon_tbl(rn, n); - gen_op_movl_T2_T0(); - NEON_GET_REG(T1, rm, 1); + tmp2 = neon_load_reg(rm, 0); + gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn), + tcg_const_i32(n)); if (insn & (1 << 6)) { - NEON_GET_REG(T0, rd, 0); + tmp = neon_load_reg(rd, 1); } else { - gen_op_movl_T0_im(0); + tmp = new_tmp(); + tcg_gen_movi_i32(tmp, 0); } - gen_op_neon_tbl(rn, n); - NEON_SET_REG(T2, rd, 0); - NEON_SET_REG(T0, rd, 1); + tmp3 = neon_load_reg(rm, 1); + gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn), + tcg_const_i32(n)); + neon_store_reg(rd, 0, tmp2); + neon_store_reg(rd, 1, tmp2); } else if ((insn & 0x380) == 0) { /* VDUP */ if (insn & (1 << 19)) { @@ -5430,7 +5433,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) switch ((insn >> 4) & 0xf) { case 1: /* clrex */ ARCH(6K); - gen_op_clrex(); + gen_helper_clrex(cpu_env); return; case 4: /* dsb */ case 5: /* dmb */ @@ -5977,13 +5980,19 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) /* load/store exclusive */ gen_movl_T1_reg(s, rn); if (insn & (1 << 20)) { - gen_ldst(ldlex, s); + gen_helper_mark_exclusive(cpu_env, cpu_T[1]); + tmp = gen_ld32(addr, IS_USER(s)); + store_reg(s, rd, tmp); } else { + int label = gen_new_label(); rm = insn & 0xf; - gen_movl_T0_reg(s, rm); - gen_ldst(stlex, s); + gen_helper_test_exclusive(cpu_T[0], cpu_env, addr); + tcg_gen_brcond_i32(TCG_COND_NE, cpu_T[0], + tcg_const_i32(0), label); + tmp = load_reg(s,rm); + gen_st32(tmp, cpu_T[1], IS_USER(s)); + gen_movl_reg_T0(s, rd); } - gen_movl_reg_T0(s, rd); } else { /* SWP instruction */ rm = (insn) & 0xf; @@ -6287,8 +6296,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) } if (i != 32) { tmp2 = load_reg(s, rd); - gen_bfi(tmp, tmp2, tmp, - shift, ((1u << i) - 1) << shift); + gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1); dead_tmp(tmp2); } store_reg(s, rd, tmp); @@ -6720,14 +6728,21 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) } } else if ((insn & (1 << 23)) == 0) { /* Load/store exclusive word. */ - gen_movl_T0_reg(s, rd); gen_movl_T1_reg(s, rn); if (insn & (1 << 20)) { - gen_ldst(ldlex, s); + gen_helper_mark_exclusive(cpu_env, cpu_T[1]); + tmp = gen_ld32(addr, IS_USER(s)); + store_reg(s, rd, tmp); } else { - gen_ldst(stlex, s); + int label = gen_new_label(); + gen_helper_test_exclusive(cpu_T[0], cpu_env, addr); + tcg_gen_brcond_i32(TCG_COND_NE, cpu_T[0], + tcg_const_i32(0), label); + tmp = load_reg(s, rs); + gen_st32(tmp, cpu_T[1], IS_USER(s)); + gen_set_label(label); + gen_movl_reg_T0(s, rd); } - gen_movl_reg_T0(s, rd); } else if ((insn & (1 << 6)) == 0) { /* Table Branch. */ if (rn == 15) { @@ -6753,40 +6768,57 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) store_reg(s, 15, tmp); } else { /* Load/store exclusive byte/halfword/doubleword. */ + /* ??? These are not really atomic. However we know + we never have multiple CPUs running in parallel, + so it is good enough. */ op = (insn >> 4) & 0x3; + /* Must use a global reg for the address because we have + a conditional branch in the store instruction. */ gen_movl_T1_reg(s, rn); + addr = cpu_T[1]; if (insn & (1 << 20)) { + gen_helper_mark_exclusive(cpu_env, addr); switch (op) { case 0: - gen_ldst(ldbex, s); + tmp = gen_ld8u(addr, IS_USER(s)); break; case 1: - gen_ldst(ldwex, s); + tmp = gen_ld16u(addr, IS_USER(s)); break; case 3: - gen_ldst(ldqex, s); - gen_movl_reg_T1(s, rd); + tmp = gen_ld32(addr, IS_USER(s)); + tcg_gen_addi_i32(addr, addr, 4); + tmp2 = gen_ld32(addr, IS_USER(s)); + store_reg(s, rd, tmp2); break; default: goto illegal_op; } - gen_movl_reg_T0(s, rs); + store_reg(s, rs, tmp); } else { - gen_movl_T0_reg(s, rs); + int label = gen_new_label(); + /* Must use a global that is not killed by the branch. */ + gen_helper_test_exclusive(cpu_T[0], cpu_env, addr); + tcg_gen_brcond_i32(TCG_COND_NE, cpu_T[0], tcg_const_i32(0), + label); + tmp = load_reg(s, rs); switch (op) { case 0: - gen_ldst(stbex, s); + gen_st8(tmp, addr, IS_USER(s)); break; case 1: - gen_ldst(stwex, s); + gen_st16(tmp, addr, IS_USER(s)); break; case 3: - gen_movl_T2_reg(s, rd); - gen_ldst(stqex, s); + gen_st32(tmp, addr, IS_USER(s)); + tcg_gen_addi_i32(addr, addr, 4); + tmp = load_reg(s, rd); + gen_st32(tmp, addr, IS_USER(s)); break; default: goto illegal_op; } + gen_set_label(label); gen_movl_reg_T0(s, rm); } } @@ -7271,7 +7303,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) op = (insn >> 4) & 0xf; switch (op) { case 2: /* clrex */ - gen_op_clrex(); + gen_helper_clrex(cpu_env); break; case 4: /* dsb */ case 5: /* dmb */ @@ -7369,8 +7401,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) imm = imm + 1 - shift; if (imm != 32) { tmp2 = load_reg(s, rd); - gen_bfi(tmp, tmp2, tmp, - shift, ((1u << imm) - 1) << shift); + gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1); dead_tmp(tmp2); } break; |