aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2021-04-19 13:22:40 -0700
committerPeter Maydell <peter.maydell@linaro.org>2021-04-30 11:16:50 +0100
commitabe66294e1d4899b312c296e93abcd3b88f2492e (patch)
tree28c043818e8e50172fadd023f3fd9220eebc530a
parent9565ac4cc7e1d1aaccf3d8c6aed423b776e7995f (diff)
target/arm: Adjust gen_aa32_{ld, st}_i64 for align+endianness
Adjust the interface to match what has been done to the TCGv_i32 load/store functions. This is less obvious, because at present the only user of these functions, trans_VLDST_multiple, also wants to manipulate the endianness to speed up loading multiple bytes. Thus we retain an "internal" interface which is identical to the current gen_aa32_{ld,st}_i64 interface. The "new" interface will gain users as we remove the legacy interfaces, gen_aa32_ld64 and gen_aa32_st64. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210419202257.161730-15-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--target/arm/translate-neon.c.inc6
-rw-r--r--target/arm/translate.c78
2 files changed, 49 insertions, 35 deletions
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
index c82aa1412e..18d9042130 100644
--- a/target/arm/translate-neon.c.inc
+++ b/target/arm/translate-neon.c.inc
@@ -494,11 +494,13 @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
int tt = a->vd + reg + spacing * xs;
if (a->l) {
- gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
+ gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx,
+ endian | size);
neon_store_element64(tt, n, size, tmp64);
} else {
neon_load_element64(tmp64, tt, n, size);
- gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
+ gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx,
+ endian | size);
}
tcg_gen_add_i32(addr, addr, tmp);
}
diff --git a/target/arm/translate.c b/target/arm/translate.c
index e99c0ab5cb..21b241b1ce 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -949,6 +949,37 @@ static void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val,
tcg_temp_free(addr);
}
+static void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 a32, int index, MemOp opc)
+{
+ TCGv addr = gen_aa32_addr(s, a32, opc);
+
+ tcg_gen_qemu_ld_i64(val, addr, index, opc);
+
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
+ tcg_gen_rotri_i64(val, val, 32);
+ }
+ tcg_temp_free(addr);
+}
+
+static void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 a32, int index, MemOp opc)
+{
+ TCGv addr = gen_aa32_addr(s, a32, opc);
+
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_rotri_i64(tmp, val, 32);
+ tcg_gen_qemu_st_i64(tmp, addr, index, opc);
+ tcg_temp_free_i64(tmp);
+ } else {
+ tcg_gen_qemu_st_i64(val, addr, index, opc);
+ }
+ tcg_temp_free(addr);
+}
+
static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
int index, MemOp opc)
{
@@ -961,6 +992,18 @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc));
}
+static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
+ int index, MemOp opc)
+{
+ gen_aa32_ld_internal_i64(s, val, a32, index, finalize_memop(s, opc));
+}
+
+static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
+ int index, MemOp opc)
+{
+ gen_aa32_st_internal_i64(s, val, a32, index, finalize_memop(s, opc));
+}
+
#define DO_GEN_LD(SUFF, OPC) \
static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
TCGv_i32 a32, int index) \
@@ -975,47 +1018,16 @@ static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
gen_aa32_st_i32(s, val, a32, index, OPC); \
}
-static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
- int index, MemOp opc)
-{
- TCGv addr = gen_aa32_addr(s, a32, opc);
- tcg_gen_qemu_ld_i64(val, addr, index, opc);
-
- /* Not needed for user-mode BE32, where we use MO_BE instead. */
- if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
- tcg_gen_rotri_i64(val, val, 32);
- }
-
- tcg_temp_free(addr);
-}
-
static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
TCGv_i32 a32, int index)
{
- gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
-}
-
-static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
- int index, MemOp opc)
-{
- TCGv addr = gen_aa32_addr(s, a32, opc);
-
- /* Not needed for user-mode BE32, where we use MO_BE instead. */
- if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
- TCGv_i64 tmp = tcg_temp_new_i64();
- tcg_gen_rotri_i64(tmp, val, 32);
- tcg_gen_qemu_st_i64(tmp, addr, index, opc);
- tcg_temp_free_i64(tmp);
- } else {
- tcg_gen_qemu_st_i64(val, addr, index, opc);
- }
- tcg_temp_free(addr);
+ gen_aa32_ld_i64(s, val, a32, index, MO_Q);
}
static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
TCGv_i32 a32, int index)
{
- gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
+ gen_aa32_st_i64(s, val, a32, index, MO_Q);
}
DO_GEN_LD(8u, MO_UB)