aboutsummaryrefslogtreecommitdiff
path: root/target/arm/translate.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/translate.c')
-rw-r--r--target/arm/translate.c68
1 files changed, 38 insertions, 30 deletions
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 781be1e219..7b5db15861 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -1559,6 +1559,13 @@ static inline void neon_store_reg64(TCGv_i64 var, int reg)
tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
}
+static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
+{
+ TCGv_ptr ret = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
+ return ret;
+}
+
#define tcg_gen_ld_f32 tcg_gen_ld_i32
#define tcg_gen_ld_f64 tcg_gen_ld_i64
#define tcg_gen_st_f32 tcg_gen_st_i32
@@ -5597,6 +5604,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
int u;
uint32_t imm, mask;
TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
+ TCGv_ptr ptr1, ptr2, ptr3;
TCGv_i64 tmp64;
/* FIXME: this access check should not take precedence over UNDEF
@@ -5643,34 +5651,34 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
return 1;
}
- tmp = tcg_const_i32(rd);
- tmp2 = tcg_const_i32(rn);
- tmp3 = tcg_const_i32(rm);
+ ptr1 = vfp_reg_ptr(true, rd);
+ ptr2 = vfp_reg_ptr(true, rn);
+ ptr3 = vfp_reg_ptr(true, rm);
tmp4 = tcg_const_i32(size);
- gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
+ gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
tcg_temp_free_i32(tmp4);
} else { /* SHA-256 */
if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
return 1;
}
- tmp = tcg_const_i32(rd);
- tmp2 = tcg_const_i32(rn);
- tmp3 = tcg_const_i32(rm);
+ ptr1 = vfp_reg_ptr(true, rd);
+ ptr2 = vfp_reg_ptr(true, rn);
+ ptr3 = vfp_reg_ptr(true, rm);
switch (size) {
case 0:
- gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
+ gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
break;
case 1:
- gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
+ gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
break;
case 2:
- gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
+ gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
break;
}
}
- tcg_temp_free_i32(tmp);
- tcg_temp_free_i32(tmp2);
- tcg_temp_free_i32(tmp3);
+ tcg_temp_free_ptr(ptr1);
+ tcg_temp_free_ptr(ptr2);
+ tcg_temp_free_ptr(ptr3);
return 0;
}
if (size == 3 && op != NEON_3R_LOGIC) {
@@ -7159,8 +7167,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
|| ((rm | rd) & 1)) {
return 1;
}
- tmp = tcg_const_i32(rd);
- tmp2 = tcg_const_i32(rm);
+ ptr1 = vfp_reg_ptr(true, rd);
+ ptr2 = vfp_reg_ptr(true, rm);
/* Bit 6 is the lowest opcode bit; it distinguishes between
* encryption (AESE/AESMC) and decryption (AESD/AESIMC)
@@ -7168,12 +7176,12 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
tmp3 = tcg_const_i32(extract32(insn, 6, 1));
if (op == NEON_2RM_AESE) {
- gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
+ gen_helper_crypto_aese(ptr1, ptr2, tmp3);
} else {
- gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
+ gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
}
- tcg_temp_free_i32(tmp);
- tcg_temp_free_i32(tmp2);
+ tcg_temp_free_ptr(ptr1);
+ tcg_temp_free_ptr(ptr2);
tcg_temp_free_i32(tmp3);
break;
case NEON_2RM_SHA1H:
@@ -7181,13 +7189,13 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
|| ((rm | rd) & 1)) {
return 1;
}
- tmp = tcg_const_i32(rd);
- tmp2 = tcg_const_i32(rm);
+ ptr1 = vfp_reg_ptr(true, rd);
+ ptr2 = vfp_reg_ptr(true, rm);
- gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
+ gen_helper_crypto_sha1h(ptr1, ptr2);
- tcg_temp_free_i32(tmp);
- tcg_temp_free_i32(tmp2);
+ tcg_temp_free_ptr(ptr1);
+ tcg_temp_free_ptr(ptr2);
break;
case NEON_2RM_SHA1SU1:
if ((rm | rd) & 1) {
@@ -7201,15 +7209,15 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
} else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
return 1;
}
- tmp = tcg_const_i32(rd);
- tmp2 = tcg_const_i32(rm);
+ ptr1 = vfp_reg_ptr(true, rd);
+ ptr2 = vfp_reg_ptr(true, rm);
if (q) {
- gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
+ gen_helper_crypto_sha256su0(ptr1, ptr2);
} else {
- gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
+ gen_helper_crypto_sha1su1(ptr1, ptr2);
}
- tcg_temp_free_i32(tmp);
- tcg_temp_free_i32(tmp2);
+ tcg_temp_free_ptr(ptr1);
+ tcg_temp_free_ptr(ptr2);
break;
default:
elementwise: