diff options
author | Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> | 2011-04-04 17:38:44 +0400 |
---|---|---|
committer | Aurelien Jarno <aurelien@aurel32.net> | 2011-04-10 00:53:21 +0200 |
commit | be5e7a76010bd14d09f74504ed6368782e701888 (patch) | |
tree | 70418830723cfd7422f72d5c04d04d48e278f9f1 | |
parent | 4b4a72e55660abf7efe85aca78762dcfea5519ad (diff) |
arm: basic support for ARMv4/ARMv4T emulation
Currently target-arm/ assumes at least ARMv5 core. Add support for
handling also ARMv4/ARMv4T. This changes the following instructions:
BX(v4T and later)
BKPT, BLX, CDP2, CLZ, LDC2, LDRD, MCRR, MCRR2, MRRC, MCRR, MRC2, MRRC,
MRRC2, PLD QADD, QDADD, QDSUB, QSUB, STRD, SMLAxy, SMLALxy, SMLAWxy,
SMULxy, SMULWxy, STC2 (v5 and later)
All instructions that are "v5TE and later" are also bound to just v5, as
that's how it was before.
This patch doesn _not_ include disabling of cp15 access and base-updated
data abort model (that will be required to emulate chips based on a
ARM7TDMI), because:
* no ARM7TDMI chips are currently emulated (or planned)
* those features aren't strictly necessary for my purposes (SA-1 core
emulation).
All v5 models are handled as they are v5T. Internally we still have a
check if the model is a v5(T) or v5TE, but as all emulated cores are
v5TE, those two cases are simply aliased (for now).
Patch is heavily based on patch by Filip Navara <filip.navara@gmail.com>
which in turn is based on work by Ulrich Hecht <uli@suse.de> and Vincent
Sanders <vince@kyllikki.org>.
Signed-off-by: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
-rw-r--r-- | target-arm/cpu.h | 4 | ||||
-rw-r--r-- | target-arm/helper.c | 29 | ||||
-rw-r--r-- | target-arm/translate.c | 59 |
3 files changed, 80 insertions, 12 deletions
diff --git a/target-arm/cpu.h b/target-arm/cpu.h index 1ae7982c7f..e247a7ade0 100644 --- a/target-arm/cpu.h +++ b/target-arm/cpu.h @@ -360,7 +360,9 @@ enum arm_features { ARM_FEATURE_M, /* Microcontroller profile. */ ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */ ARM_FEATURE_THUMB2EE, - ARM_FEATURE_V7MP /* v7 Multiprocessing Extensions */ + ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */ + ARM_FEATURE_V4T, + ARM_FEATURE_V5, }; static inline int arm_feature(CPUARMState *env, int feature) diff --git a/target-arm/helper.c b/target-arm/helper.c index 6788a4c383..ce9a9d8fd2 100644 --- a/target-arm/helper.c +++ b/target-arm/helper.c @@ -48,17 +48,23 @@ static void cpu_reset_model_id(CPUARMState *env, uint32_t id) env->cp15.c0_cpuid = id; switch (id) { case ARM_CPUID_ARM926: + set_feature(env, ARM_FEATURE_V4T); + set_feature(env, ARM_FEATURE_V5); set_feature(env, ARM_FEATURE_VFP); env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090; env->cp15.c0_cachetype = 0x1dd20d2; env->cp15.c1_sys = 0x00090078; break; case ARM_CPUID_ARM946: + set_feature(env, ARM_FEATURE_V4T); + set_feature(env, ARM_FEATURE_V5); set_feature(env, ARM_FEATURE_MPU); env->cp15.c0_cachetype = 0x0f004006; env->cp15.c1_sys = 0x00000078; break; case ARM_CPUID_ARM1026: + set_feature(env, ARM_FEATURE_V4T); + set_feature(env, ARM_FEATURE_V5); set_feature(env, ARM_FEATURE_VFP); set_feature(env, ARM_FEATURE_AUXCR); env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0; @@ -67,6 +73,8 @@ static void cpu_reset_model_id(CPUARMState *env, uint32_t id) break; case ARM_CPUID_ARM1136_R2: case ARM_CPUID_ARM1136: + set_feature(env, ARM_FEATURE_V4T); + set_feature(env, ARM_FEATURE_V5); set_feature(env, ARM_FEATURE_V6); set_feature(env, ARM_FEATURE_VFP); set_feature(env, ARM_FEATURE_AUXCR); @@ -79,6 +87,8 @@ static void cpu_reset_model_id(CPUARMState *env, uint32_t id) env->cp15.c1_sys = 0x00050078; break; case ARM_CPUID_ARM11MPCORE: + set_feature(env, ARM_FEATURE_V4T); + set_feature(env, ARM_FEATURE_V5); set_feature(env, ARM_FEATURE_V6); set_feature(env, ARM_FEATURE_V6K); set_feature(env, ARM_FEATURE_VFP); @@ -91,6 +101,8 @@ static void cpu_reset_model_id(CPUARMState *env, uint32_t id) env->cp15.c0_cachetype = 0x1dd20d2; break; case ARM_CPUID_CORTEXA8: + set_feature(env, ARM_FEATURE_V4T); + set_feature(env, ARM_FEATURE_V5); set_feature(env, ARM_FEATURE_V6); set_feature(env, ARM_FEATURE_V6K); set_feature(env, ARM_FEATURE_V7); @@ -113,6 +125,8 @@ static void cpu_reset_model_id(CPUARMState *env, uint32_t id) env->cp15.c1_sys = 0x00c50078; break; case ARM_CPUID_CORTEXA9: + set_feature(env, ARM_FEATURE_V4T); + set_feature(env, ARM_FEATURE_V5); set_feature(env, ARM_FEATURE_V6); set_feature(env, ARM_FEATURE_V6K); set_feature(env, ARM_FEATURE_V7); @@ -140,6 +154,8 @@ static void cpu_reset_model_id(CPUARMState *env, uint32_t id) env->cp15.c1_sys = 0x00c50078; break; case ARM_CPUID_CORTEXM3: + set_feature(env, ARM_FEATURE_V4T); + set_feature(env, ARM_FEATURE_V5); set_feature(env, ARM_FEATURE_V6); set_feature(env, ARM_FEATURE_THUMB2); set_feature(env, ARM_FEATURE_V7); @@ -147,6 +163,8 @@ static void cpu_reset_model_id(CPUARMState *env, uint32_t id) set_feature(env, ARM_FEATURE_DIV); break; case ARM_CPUID_ANY: /* For userspace emulation. */ + set_feature(env, ARM_FEATURE_V4T); + set_feature(env, ARM_FEATURE_V5); set_feature(env, ARM_FEATURE_V6); set_feature(env, ARM_FEATURE_V6K); set_feature(env, ARM_FEATURE_V7); @@ -161,6 +179,7 @@ static void cpu_reset_model_id(CPUARMState *env, uint32_t id) break; case ARM_CPUID_TI915T: case ARM_CPUID_TI925T: + set_feature(env, ARM_FEATURE_V4T); set_feature(env, ARM_FEATURE_OMAPCP); env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring. */ env->cp15.c0_cachetype = 0x5109149; @@ -173,6 +192,8 @@ static void cpu_reset_model_id(CPUARMState *env, uint32_t id) case ARM_CPUID_PXA260: case ARM_CPUID_PXA261: case ARM_CPUID_PXA262: + set_feature(env, ARM_FEATURE_V4T); + set_feature(env, ARM_FEATURE_V5); set_feature(env, ARM_FEATURE_XSCALE); /* JTAG_ID is ((id << 28) | 0x09265013) */ env->cp15.c0_cachetype = 0xd172172; @@ -184,6 +205,8 @@ static void cpu_reset_model_id(CPUARMState *env, uint32_t id) case ARM_CPUID_PXA270_B1: case ARM_CPUID_PXA270_C0: case ARM_CPUID_PXA270_C5: + set_feature(env, ARM_FEATURE_V4T); + set_feature(env, ARM_FEATURE_V5); set_feature(env, ARM_FEATURE_XSCALE); /* JTAG_ID is ((id << 28) | 0x09265013) */ set_feature(env, ARM_FEATURE_IWMMXT); @@ -856,7 +879,11 @@ void do_interrupt(CPUARMState *env) /* Switch to the new mode, and to the correct instruction set. */ env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; env->uncached_cpsr |= mask; - env->thumb = (env->cp15.c1_sys & (1 << 30)) != 0; + /* this is a lie, as the was no c1_sys on V4T/V5, but who cares + * and we should just guard the thumb mode on V4 */ + if (arm_feature(env, ARM_FEATURE_V4T)) { + env->thumb = (env->cp15.c1_sys & (1 << 30)) != 0; + } env->regs[14] = env->regs[15] + offset; env->regs[15] = addr; env->interrupt_request |= CPU_INTERRUPT_EXITTB; diff --git a/target-arm/translate.c b/target-arm/translate.c index 55d524ba13..998cfd530c 100644 --- a/target-arm/translate.c +++ b/target-arm/translate.c @@ -34,6 +34,10 @@ #define GEN_HELPER 1 #include "helpers.h" +#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T) +#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5) +/* currently all emulated v5 cores are also v5TE, so don't bother */ +#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5) #define ENABLE_ARCH_5J 0 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6) #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K) @@ -750,6 +754,20 @@ static inline void store_reg_bx(CPUState *env, DisasContext *s, } } +/* Variant of store_reg which uses branch&exchange logic when storing + * to r15 in ARM architecture v5T and above. This is used for storing + * the results of a LDR/LDM/POP into r15, and corresponds to the cases + * in the ARM ARM which use the LoadWritePC() pseudocode function. */ +static inline void store_reg_from_load(CPUState *env, DisasContext *s, + int reg, TCGv var) +{ + if (reg == 15 && ENABLE_ARCH_5) { + gen_bx(s, var); + } else { + store_reg(s, reg, var); + } +} + static inline TCGv gen_ld8s(TCGv addr, int index) { TCGv tmp = tcg_temp_new_i32(); @@ -3436,6 +3454,10 @@ static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) { /* Mask out undefined bits. */ mask &= ~CPSR_RESERVED; + if (!arm_feature(env, ARM_FEATURE_V4T)) + mask &= ~CPSR_T; + if (!arm_feature(env, ARM_FEATURE_V5)) + mask &= ~CPSR_Q; /* V5TE in reality*/ if (!arm_feature(env, ARM_FEATURE_V6)) mask &= ~(CPSR_E | CPSR_GE); if (!arm_feature(env, ARM_FEATURE_THUMB2)) @@ -6127,6 +6149,12 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) goto illegal_op; cond = insn >> 28; if (cond == 0xf){ + /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we + * choose to UNDEF. In ARMv5 and above the space is used + * for miscellaneous unconditional instructions. + */ + ARCH(5); + /* Unconditional instructions. */ if (((insn >> 25) & 7) == 1) { /* NEON Data processing. */ @@ -6155,6 +6183,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) } } /* Otherwise PLD; v5TE+ */ + ARCH(5TE); return; } if (((insn & 0x0f70f000) == 0x0450f000) || @@ -6291,6 +6320,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) val += (offset << 2) | ((insn >> 23) & 2) | 1; /* pipeline offset */ val += 4; + /* protected by ARCH(5); above, near the start of uncond block */ gen_bx_im(s, val); return; } else if ((insn & 0x0e000f00) == 0x0c000100) { @@ -6302,6 +6332,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) } } else if ((insn & 0x0fe00000) == 0x0c400000) { /* Coprocessor double register transfer. */ + ARCH(5TE); } else if ((insn & 0x0f000010) == 0x0e000010) { /* Additional coprocessor register transfer. */ } else if ((insn & 0x0ff10020) == 0x01000000) { @@ -6402,10 +6433,12 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) case 0x1: if (op1 == 1) { /* branch/exchange thumb (bx). */ + ARCH(4T); tmp = load_reg(s, rm); gen_bx(s, tmp); } else if (op1 == 3) { /* clz */ + ARCH(5); rd = (insn >> 12) & 0xf; tmp = load_reg(s, rm); gen_helper_clz(tmp, tmp); @@ -6428,6 +6461,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) if (op1 != 1) goto illegal_op; + ARCH(5); /* branch link/exchange thumb (blx) */ tmp = load_reg(s, rm); tmp2 = tcg_temp_new_i32(); @@ -6436,6 +6470,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) gen_bx(s, tmp); break; case 0x5: /* saturating add/subtract */ + ARCH(5TE); rd = (insn >> 12) & 0xf; rn = (insn >> 16) & 0xf; tmp = load_reg(s, rm); @@ -6457,12 +6492,14 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) goto illegal_op; } /* bkpt */ + ARCH(5); gen_exception_insn(s, 4, EXCP_BKPT); break; case 0x8: /* signed multiply */ case 0xa: case 0xc: case 0xe: + ARCH(5TE); rs = (insn >> 8) & 0xf; rn = (insn >> 12) & 0xf; rd = (insn >> 16) & 0xf; @@ -6858,6 +6895,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) } load = 1; } else if (sh & 2) { + ARCH(5TE); /* doubleword */ if (sh & 1) { /* store */ @@ -7198,10 +7236,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) } if (insn & (1 << 20)) { /* Complete the load. */ - if (rd == 15) - gen_bx(s, tmp); - else - store_reg(s, rd, tmp); + store_reg_from_load(env, s, rd, tmp); } break; case 0x08: @@ -7254,9 +7289,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) if (insn & (1 << 20)) { /* load */ tmp = gen_ld32(addr, IS_USER(s)); - if (i == 15) { - gen_bx(s, tmp); - } else if (user) { + if (user) { tmp2 = tcg_const_i32(i); gen_helper_set_user_reg(tmp2, tmp); tcg_temp_free_i32(tmp2); @@ -7265,7 +7298,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) loaded_var = tmp; loaded_base = 1; } else { - store_reg(s, i, tmp); + store_reg_from_load(env, s, i, tmp); } } else { /* store */ @@ -7465,6 +7498,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) 16-bit instructions to get correct prefetch abort behavior. */ insn = insn_hw1; if ((insn & (1 << 12)) == 0) { + ARCH(5); /* Second half of blx. */ offset = ((insn & 0x7ff) << 1); tmp = load_reg(s, 14); @@ -8061,6 +8095,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) } else { /* blx */ offset &= ~(uint32_t)2; + /* thumb2 bx, no need to check */ gen_bx_im(s, offset); } } else if (((insn >> 23) & 7) == 7) { @@ -8642,11 +8677,13 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s) case 3:/* branch [and link] exchange thumb register */ tmp = load_reg(s, rm); if (insn & (1 << 7)) { + ARCH(5); val = (uint32_t)s->pc | 1; tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, val); store_reg(s, 14, tmp2); } + /* already thumb, no need to check */ gen_bx(s, tmp); break; } @@ -9006,8 +9043,9 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s) /* write back the new stack pointer */ store_reg(s, 13, addr); /* set the new PC value */ - if ((insn & 0x0900) == 0x0900) - gen_bx(s, tmp); + if ((insn & 0x0900) == 0x0900) { + store_reg_from_load(env, s, 15, tmp); + } break; case 1: case 3: case 9: case 11: /* czb */ @@ -9038,6 +9076,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s) break; case 0xe: /* bkpt */ + ARCH(5); gen_exception_insn(s, 2, EXCP_BKPT); break; |