aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/arm/gdbstub.c25
-rw-r--r--target/arm/hvf/hvf.c7
-rw-r--r--target/arm/translate-a32.h3
-rw-r--r--target/arm/translate-sve.c17
-rw-r--r--target/arm/translate.c27
5 files changed, 51 insertions, 28 deletions
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
index e0dcb33e32..134da0d0ae 100644
--- a/target/arm/gdbstub.c
+++ b/target/arm/gdbstub.c
@@ -199,6 +199,27 @@ static int vfp_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
return 0;
}
+static int mve_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
+{
+ switch (reg) {
+ case 0:
+ return gdb_get_reg32(buf, env->v7m.vpr);
+ default:
+ return 0;
+ }
+}
+
+static int mve_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
+{
+ switch (reg) {
+ case 0:
+ env->v7m.vpr = ldl_p(buf);
+ return 4;
+ default:
+ return 0;
+ }
+}
+
/**
* arm_get/set_gdb_*: get/set a gdb register
* @env: the CPU state
@@ -468,6 +489,10 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
2, "arm-vfp-sysregs.xml", 0);
}
}
+ if (cpu_isar_feature(aa32_mve, cpu)) {
+ gdb_register_coprocessor(cs, mve_gdb_get_reg, mve_gdb_set_reg,
+ 1, "arm-m-profile-mve.xml", 0);
+ }
gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg,
arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs),
"system-registers.xml", 0);
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
index bff3e0cde7..0dc96560d3 100644
--- a/target/arm/hvf/hvf.c
+++ b/target/arm/hvf/hvf.c
@@ -1150,12 +1150,19 @@ int hvf_vcpu_exec(CPUState *cpu)
uint32_t sas = (syndrome >> 22) & 3;
uint32_t len = 1 << sas;
uint32_t srt = (syndrome >> 16) & 0x1f;
+ uint32_t cm = (syndrome >> 8) & 0x1;
uint64_t val = 0;
trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address,
hvf_exit->exception.physical_address, isv,
iswrite, s1ptw, len, srt);
+ if (cm) {
+ /* We don't cache MMIO regions */
+ advance_pc = true;
+ break;
+ }
+
assert(isv);
if (iswrite) {
diff --git a/target/arm/translate-a32.h b/target/arm/translate-a32.h
index 88f15df60e..17af8dc95a 100644
--- a/target/arm/translate-a32.h
+++ b/target/arm/translate-a32.h
@@ -70,6 +70,9 @@ static inline void store_cpu_offset(TCGv_i32 var, int offset)
#define store_cpu_field(var, name) \
store_cpu_offset(var, offsetof(CPUARMState, name))
+#define store_cpu_field_constant(val, name) \
+ tcg_gen_st_i32(tcg_constant_i32(val), cpu_env, offsetof(CPUARMState, name))
+
/* Create a new temporary and set it to the value of a CPU register. */
static inline TCGv_i32 load_reg(DisasContext *s, int reg)
{
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index bc91a64171..76b5fe9f31 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -1943,20 +1943,20 @@ static void do_sat_addsub_32(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
{
TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2;
if (u) {
if (d) {
tcg_gen_sub_i64(t0, reg, val);
- tcg_gen_movi_i64(t1, 0);
- tcg_gen_movcond_i64(TCG_COND_LTU, reg, reg, val, t1, t0);
+ t2 = tcg_constant_i64(0);
+ tcg_gen_movcond_i64(TCG_COND_LTU, reg, reg, val, t2, t0);
} else {
tcg_gen_add_i64(t0, reg, val);
- tcg_gen_movi_i64(t1, -1);
- tcg_gen_movcond_i64(TCG_COND_LTU, reg, t0, reg, t1, t0);
+ t2 = tcg_constant_i64(-1);
+ tcg_gen_movcond_i64(TCG_COND_LTU, reg, t0, reg, t2, t0);
}
} else {
+ TCGv_i64 t1 = tcg_temp_new_i64();
if (d) {
/* Detect signed overflow for subtraction. */
tcg_gen_xor_i64(t0, reg, val);
@@ -1966,7 +1966,7 @@ static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
/* Bound the result. */
tcg_gen_movi_i64(reg, INT64_MIN);
- t2 = tcg_const_i64(0);
+ t2 = tcg_constant_i64(0);
tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, reg, t1);
} else {
/* Detect signed overflow for addition. */
@@ -1977,13 +1977,12 @@ static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
/* Bound the result. */
tcg_gen_movi_i64(t1, INT64_MAX);
- t2 = tcg_const_i64(0);
+ t2 = tcg_constant_i64(0);
tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, t1, reg);
}
- tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t1);
}
tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
}
/* Similarly with a vector and a scalar operand. */
diff --git a/target/arm/translate.c b/target/arm/translate.c
index d6af5b1b03..98f5925928 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -364,8 +364,7 @@ void clear_eci_state(DisasContext *s)
* multiple insn executes.
*/
if (s->eci) {
- TCGv_i32 tmp = tcg_const_i32(0);
- store_cpu_field(tmp, condexec_bits);
+ store_cpu_field_constant(0, condexec_bits);
s->eci = 0;
}
}
@@ -389,13 +388,12 @@ static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
void gen_rev16(TCGv_i32 dest, TCGv_i32 var)
{
TCGv_i32 tmp = tcg_temp_new_i32();
- TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
+ TCGv_i32 mask = tcg_constant_i32(0x00ff00ff);
tcg_gen_shri_i32(tmp, var, 8);
tcg_gen_and_i32(tmp, tmp, mask);
tcg_gen_and_i32(var, var, mask);
tcg_gen_shli_i32(var, var, 8);
tcg_gen_or_i32(dest, var, tmp);
- tcg_temp_free_i32(mask);
tcg_temp_free_i32(tmp);
}
@@ -740,9 +738,8 @@ void gen_set_condexec(DisasContext *s)
{
if (s->condexec_mask) {
uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, val);
- store_cpu_field(tmp, condexec_bits);
+
+ store_cpu_field_constant(val, condexec_bits);
}
}
@@ -7849,10 +7846,9 @@ static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
t3 = tcg_temp_new_i32();
tcg_gen_sari_i32(t3, t1, 31);
qf = load_cpu_field(QF);
- one = tcg_const_i32(1);
+ one = tcg_constant_i32(1);
tcg_gen_movcond_i32(TCG_COND_NE, qf, t2, t3, one, qf);
store_cpu_field(qf, QF);
- tcg_temp_free_i32(one);
tcg_temp_free_i32(t3);
tcg_temp_free_i32(t2);
}
@@ -8363,8 +8359,6 @@ static bool trans_BL(DisasContext *s, arg_i *a)
static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a)
{
- TCGv_i32 tmp;
-
/*
* BLX <imm> would be useless on M-profile; the encoding space
* is used for other insns from v8.1M onward, and UNDEFs before that.
@@ -8378,8 +8372,7 @@ static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a)
return false;
}
tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
- tmp = tcg_const_i32(!s->thumb);
- store_cpu_field(tmp, thumb);
+ store_cpu_field_constant(!s->thumb, thumb);
gen_jmp(s, (read_pc(s) & ~3) + a->imm);
return true;
}
@@ -8678,7 +8671,6 @@ static bool trans_LCTP(DisasContext *s, arg_LCTP *a)
* doesn't cache branch information, all we need to do is reset
* FPSCR.LTPSIZE to 4.
*/
- TCGv_i32 ltpsize;
if (!dc_isar_feature(aa32_lob, s) ||
!dc_isar_feature(aa32_mve, s)) {
@@ -8689,8 +8681,7 @@ static bool trans_LCTP(DisasContext *s, arg_LCTP *a)
return true;
}
- ltpsize = tcg_const_i32(4);
- store_cpu_field(ltpsize, v7m.ltpsize);
+ store_cpu_field_constant(4, v7m.ltpsize);
return true;
}
@@ -9488,9 +9479,7 @@ static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
/* Reset the conditional execution bits immediately. This avoids
complications trying to do it at the end of the block. */
if (dc->condexec_mask || dc->condexec_cond) {
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, 0);
- store_cpu_field(tmp, condexec_bits);
+ store_cpu_field_constant(0, condexec_bits);
}
}