aboutsummaryrefslogtreecommitdiff
path: root/target-arm
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2016-03-04 11:30:21 +0000
committerPeter Maydell <peter.maydell@linaro.org>2016-03-04 11:30:21 +0000
commite334bd3190f6c4ca12f1d40d316dc471c70009ab (patch)
tree6228fd057f6e3d4d35c0c322d2ead9ddf74ccd7c /target-arm
parent9886ecdf31165de2d4b8bccc1a220bd6ac8bc192 (diff)
target-arm: implement BE32 mode in system emulation
System emulation only has a little-endian target; BE32 mode is implemented by adjusting the low bits of the address for every byte and halfword load and store. 64-bit accesses flip the low and high words. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> [PC changes: * rebased against master (Jan 2016) ] Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Crosthwaite <crosthwaite.peter@gmail.com> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target-arm')
-rw-r--r--target-arm/cpu.h5
-rw-r--r--target-arm/translate.c86
2 files changed, 73 insertions, 18 deletions
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 279c91fefd..066ff678dc 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -2033,9 +2033,8 @@ static inline bool bswap_code(bool sctlr_b)
#endif
sctlr_b;
#else
- /* We do not implement BE32 mode for system-mode emulation, but
- * anyway it would always do little-endian accesses with
- * TARGET_WORDS_BIGENDIAN = 0.
+ /* All code access in ARM is little endian, and there are no loaders
+ * doing swaps that need to be reversed
*/
return 0;
#endif
diff --git a/target-arm/translate.c b/target-arm/translate.c
index c23ddb3bf2..25db09e9f5 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -911,6 +911,12 @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
}
}
+#ifdef CONFIG_USER_ONLY
+#define IS_USER_ONLY 1
+#else
+#define IS_USER_ONLY 0
+#endif
+
/* Abstractions of "generate code to do a guest load/store for
* AArch32", where a vaddr is always 32 bits (and is zero
* extended if we're a 64 bit core) and data is also
@@ -920,19 +926,35 @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
*/
#if TARGET_LONG_BITS == 32
-#define DO_GEN_LD(SUFF, OPC) \
+#define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
TCGv_i32 addr, int index) \
{ \
TCGMemOp opc = (OPC) | s->be_data; \
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
+ if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
+ TCGv addr_be = tcg_temp_new(); \
+ tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
+ tcg_gen_qemu_ld_i32(val, addr_be, index, opc); \
+ tcg_temp_free(addr_be); \
+ return; \
+ } \
tcg_gen_qemu_ld_i32(val, addr, index, opc); \
}
-#define DO_GEN_ST(SUFF, OPC) \
+#define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
TCGv_i32 addr, int index) \
{ \
TCGMemOp opc = (OPC) | s->be_data; \
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
+ if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
+ TCGv addr_be = tcg_temp_new(); \
+ tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
+ tcg_gen_qemu_st_i32(val, addr_be, index, opc); \
+ tcg_temp_free(addr_be); \
+ return; \
+ } \
tcg_gen_qemu_st_i32(val, addr, index, opc); \
}
@@ -941,35 +963,55 @@ static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
{
TCGMemOp opc = MO_Q | s->be_data;
tcg_gen_qemu_ld_i64(val, addr, index, opc);
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
+ if (!IS_USER_ONLY && s->sctlr_b) {
+ tcg_gen_rotri_i64(val, val, 32);
+ }
}
static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
TCGv_i32 addr, int index)
{
TCGMemOp opc = MO_Q | s->be_data;
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
+ if (!IS_USER_ONLY && s->sctlr_b) {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_rotri_i64(tmp, val, 32);
+ tcg_gen_qemu_st_i64(tmp, addr, index, opc);
+ tcg_temp_free_i64(tmp);
+ return;
+ }
tcg_gen_qemu_st_i64(val, addr, index, opc);
}
#else
-#define DO_GEN_LD(SUFF, OPC) \
+#define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
TCGv_i32 addr, int index) \
{ \
TCGMemOp opc = (OPC) | s->be_data; \
TCGv addr64 = tcg_temp_new(); \
tcg_gen_extu_i32_i64(addr64, addr); \
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
+ if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
+ tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
+ } \
tcg_gen_qemu_ld_i32(val, addr64, index, opc); \
tcg_temp_free(addr64); \
}
-#define DO_GEN_ST(SUFF, OPC) \
+#define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
TCGv_i32 addr, int index) \
{ \
TCGMemOp opc = (OPC) | s->be_data; \
TCGv addr64 = tcg_temp_new(); \
tcg_gen_extu_i32_i64(addr64, addr); \
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
+ if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
+ tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
+ } \
tcg_gen_qemu_st_i32(val, addr64, index, opc); \
tcg_temp_free(addr64); \
}
@@ -981,6 +1023,11 @@ static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
TCGv addr64 = tcg_temp_new();
tcg_gen_extu_i32_i64(addr64, addr);
tcg_gen_qemu_ld_i64(val, addr64, index, opc);
+
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
+ if (!IS_USER_ONLY && s->sctlr_b) {
+ tcg_gen_rotri_i64(val, val, 32);
+ }
tcg_temp_free(addr64);
}
@@ -990,23 +1037,32 @@ static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
TCGMemOp opc = MO_Q | s->be_data;
TCGv addr64 = tcg_temp_new();
tcg_gen_extu_i32_i64(addr64, addr);
- tcg_gen_qemu_st_i64(val, addr64, index, opc);
+
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
+ if (!IS_USER_ONLY && s->sctlr_b) {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_rotri_i64(tmp, val, 32);
+ tcg_gen_qemu_st_i64(tmp, addr64, index, opc);
+ tcg_temp_free(tmp);
+ } else {
+ tcg_gen_qemu_st_i64(val, addr64, index, opc);
+ }
tcg_temp_free(addr64);
}
#endif
-DO_GEN_LD(8s, MO_SB)
-DO_GEN_LD(8u, MO_UB)
-DO_GEN_LD(16s, MO_SW)
-DO_GEN_LD(16u, MO_UW)
-DO_GEN_LD(32u, MO_UL)
+DO_GEN_LD(8s, MO_SB, 3)
+DO_GEN_LD(8u, MO_UB, 3)
+DO_GEN_LD(16s, MO_SW, 2)
+DO_GEN_LD(16u, MO_UW, 2)
+DO_GEN_LD(32u, MO_UL, 0)
/* 'a' variants include an alignment check */
-DO_GEN_LD(16ua, MO_UW | MO_ALIGN)
-DO_GEN_LD(32ua, MO_UL | MO_ALIGN)
-DO_GEN_ST(8, MO_UB)
-DO_GEN_ST(16, MO_UW)
-DO_GEN_ST(32, MO_UL)
+DO_GEN_LD(16ua, MO_UW | MO_ALIGN, 2)
+DO_GEN_LD(32ua, MO_UL | MO_ALIGN, 0)
+DO_GEN_ST(8, MO_UB, 3)
+DO_GEN_ST(16, MO_UW, 2)
+DO_GEN_ST(32, MO_UL, 0)
static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
{