aboutsummaryrefslogtreecommitdiff
path: root/target-arm
diff options
context:
space:
mode:
Diffstat (limited to 'target-arm')
-rw-r--r--target-arm/cpu.h14
-rw-r--r--target-arm/op_helper.c49
-rw-r--r--target-arm/translate-a64.c140
-rw-r--r--target-arm/translate.c5
-rw-r--r--target-arm/translate.h2
5 files changed, 180 insertions, 30 deletions
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index afb60ee4f3..401955f825 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -93,7 +93,19 @@
#define ARM_CPU_VFIQ 3
#define NB_MMU_MODES 7
-#define TARGET_INSN_START_EXTRA_WORDS 1
+/* ARM-specific extra insn start words:
+ * 1: Conditional execution bits
+ * 2: Partial exception syndrome for data aborts
+ */
+#define TARGET_INSN_START_EXTRA_WORDS 2
+
+/* The 2nd extra word holding syndrome info for data aborts does not use
+ * the upper 6 bits nor the lower 14 bits. We mask and shift it down to
+ * help the sleb128 encoder do a better job.
+ * When restoring the CPU state, we shift it back up.
+ */
+#define ARM_INSN_START_WORD2_MASK ((1 << 26) - 1)
+#define ARM_INSN_START_WORD2_SHIFT 14
/* We currently assume float and double are IEEE single and double
precision respectively.
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index 0b29b9dbf2..35912a1192 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -76,6 +76,43 @@ uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
#if !defined(CONFIG_USER_ONLY)
+static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
+ unsigned int target_el,
+ bool same_el,
+ bool s1ptw, int is_write,
+ int fsc)
+{
+ uint32_t syn;
+
+ /* ISV is only set for data aborts routed to EL2 and
+ * never for stage-1 page table walks faulting on stage 2.
+ *
+ * Furthermore, ISV is only set for certain kinds of load/stores.
+ * If the template syndrome does not have ISV set, we should leave
+ * it cleared.
+ *
+ * See ARMv8 specs, D7-1974:
+ * ISS encoding for an exception from a Data Abort, the
+ * ISV field.
+ */
+ if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
+ syn = syn_data_abort_no_iss(same_el,
+ 0, 0, s1ptw, is_write == 1, fsc);
+ } else {
+ /* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
+ * syndrome created at translation time.
+ * Now we create the runtime syndrome with the remaining fields.
+ */
+ syn = syn_data_abort_with_iss(same_el,
+ 0, 0, 0, 0, 0,
+ 0, 0, s1ptw, is_write == 1, fsc,
+ false);
+ /* Merge the runtime syndrome with the template syndrome. */
+ syn |= template_syn;
+ }
+ return syn;
+}
+
/* try to fill the TLB and return an exception if error. If retaddr is
* NULL, it means that the function was called in C code (i.e. not
* from generated code or from helper.c)
@@ -116,8 +153,8 @@ void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
syn = syn_insn_abort(same_el, 0, fi.s1ptw, syn);
exc = EXCP_PREFETCH_ABORT;
} else {
- syn = syn_data_abort_no_iss(same_el,
- 0, 0, fi.s1ptw, is_write == 1, syn);
+ syn = merge_syn_data_abort(env->exception.syndrome, target_el,
+ same_el, fi.s1ptw, is_write, syn);
if (is_write == 1 && arm_feature(env, ARM_FEATURE_V6)) {
fsr |= (1 << 11);
}
@@ -138,6 +175,7 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, int is_write,
CPUARMState *env = &cpu->env;
int target_el;
bool same_el;
+ uint32_t syn;
if (retaddr) {
/* now we have a real cpu fault */
@@ -162,10 +200,9 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, int is_write,
env->exception.fsr |= (1 << 11);
}
- raise_exception(env, EXCP_DATA_ABORT,
- syn_data_abort_no_iss(same_el,
- 0, 0, 0, is_write == 1, 0x21),
- target_el);
+ syn = merge_syn_data_abort(env->exception.syndrome, target_el,
+ same_el, 0, is_write, 0x21);
+ raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
}
#endif /* !defined(CONFIG_USER_ONLY) */
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index ce8141a442..f5e29d20a1 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -308,6 +308,20 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
}
}
+static void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
+{
+ /* We don't need to save all of the syndrome so we mask and shift
+ * out uneeded bits to help the sleb128 encoder do a better job.
+ */
+ syn &= ARM_INSN_START_WORD2_MASK;
+ syn >>= ARM_INSN_START_WORD2_SHIFT;
+
+ /* We check and clear insn_start_idx to catch multiple updates. */
+ assert(s->insn_start_idx != 0);
+ tcg_set_insn_param(s->insn_start_idx, 2, syn);
+ s->insn_start_idx = 0;
+}
+
static void unallocated_encoding(DisasContext *s)
{
/* Unallocated and reserved encodings are uncategorized */
@@ -723,23 +737,47 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
* Store from GPR register to memory.
*/
static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
- TCGv_i64 tcg_addr, int size, int memidx)
+ TCGv_i64 tcg_addr, int size, int memidx,
+ bool iss_valid,
+ unsigned int iss_srt,
+ bool iss_sf, bool iss_ar)
{
g_assert(size <= 3);
tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
+
+ if (iss_valid) {
+ uint32_t syn;
+
+ syn = syn_data_abort_with_iss(0,
+ size,
+ false,
+ iss_srt,
+ iss_sf,
+ iss_ar,
+ 0, 0, 0, 0, 0, false);
+ disas_set_insn_syndrome(s, syn);
+ }
}
static void do_gpr_st(DisasContext *s, TCGv_i64 source,
- TCGv_i64 tcg_addr, int size)
+ TCGv_i64 tcg_addr, int size,
+ bool iss_valid,
+ unsigned int iss_srt,
+ bool iss_sf, bool iss_ar)
{
- do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s));
+ do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s),
+ iss_valid, iss_srt, iss_sf, iss_ar);
}
/*
* Load from memory to GPR register
*/
-static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
- int size, bool is_signed, bool extend, int memidx)
+static void do_gpr_ld_memidx(DisasContext *s,
+ TCGv_i64 dest, TCGv_i64 tcg_addr,
+ int size, bool is_signed,
+ bool extend, int memidx,
+ bool iss_valid, unsigned int iss_srt,
+ bool iss_sf, bool iss_ar)
{
TCGMemOp memop = s->be_data + size;
@@ -755,13 +793,30 @@ static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
g_assert(size < 3);
tcg_gen_ext32u_i64(dest, dest);
}
+
+ if (iss_valid) {
+ uint32_t syn;
+
+ syn = syn_data_abort_with_iss(0,
+ size,
+ is_signed,
+ iss_srt,
+ iss_sf,
+ iss_ar,
+ 0, 0, 0, 0, 0, false);
+ disas_set_insn_syndrome(s, syn);
+ }
}
-static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
- int size, bool is_signed, bool extend)
+static void do_gpr_ld(DisasContext *s,
+ TCGv_i64 dest, TCGv_i64 tcg_addr,
+ int size, bool is_signed, bool extend,
+ bool iss_valid, unsigned int iss_srt,
+ bool iss_sf, bool iss_ar)
{
do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
- get_mem_index(s));
+ get_mem_index(s),
+ iss_valid, iss_srt, iss_sf, iss_ar);
}
/*
@@ -1817,6 +1872,22 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
}
#endif
+/* Update the Sixty-Four bit (SF) registersize. This logic is derived
+ * from the ARMv8 specs for LDR (Shared decode for all encodings).
+ */
+static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
+{
+ int opc0 = extract32(opc, 0, 1);
+ int regsize;
+
+ if (is_signed) {
+ regsize = opc0 ? 32 : 64;
+ } else {
+ regsize = size == 3 ? 64 : 32;
+ }
+ return regsize == 64;
+}
+
/* C3.3.6 Load/store exclusive
*
* 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
@@ -1868,10 +1939,15 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
}
} else {
TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ bool iss_sf = disas_ldst_compute_iss_sf(size, false, 0);
+
+ /* Generate ISS for non-exclusive accesses including LASR. */
if (is_store) {
- do_gpr_st(s, tcg_rt, tcg_addr, size);
+ do_gpr_st(s, tcg_rt, tcg_addr, size,
+ true, rt, iss_sf, is_lasr);
} else {
- do_gpr_ld(s, tcg_rt, tcg_addr, size, false, false);
+ do_gpr_ld(s, tcg_rt, tcg_addr, size, false, false,
+ true, rt, iss_sf, is_lasr);
}
}
}
@@ -1923,7 +1999,11 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
if (is_vector) {
do_fp_ld(s, rt, tcg_addr, size);
} else {
- do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false);
+ /* Only unsigned 32bit loads target 32bit registers. */
+ bool iss_sf = opc == 0 ? 32 : 64;
+
+ do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false,
+ true, rt, iss_sf, false);
}
tcg_temp_free_i64(tcg_addr);
}
@@ -2042,9 +2122,11 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
} else {
TCGv_i64 tcg_rt = cpu_reg(s, rt);
if (is_load) {
- do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false);
+ do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false,
+ false, 0, false, false);
} else {
- do_gpr_st(s, tcg_rt, tcg_addr, size);
+ do_gpr_st(s, tcg_rt, tcg_addr, size,
+ false, 0, false, false);
}
}
tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
@@ -2057,9 +2139,11 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
} else {
TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
if (is_load) {
- do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false);
+ do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false,
+ false, 0, false, false);
} else {
- do_gpr_st(s, tcg_rt2, tcg_addr, size);
+ do_gpr_st(s, tcg_rt2, tcg_addr, size,
+ false, 0, false, false);
}
}
@@ -2102,6 +2186,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
bool is_store = false;
bool is_extended = false;
bool is_unpriv = (idx == 2);
+ bool iss_valid = !is_vector;
bool post_index;
bool writeback;
@@ -2169,12 +2254,15 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
} else {
TCGv_i64 tcg_rt = cpu_reg(s, rt);
int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
+ bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
if (is_store) {
- do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx);
+ do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx,
+ iss_valid, rt, iss_sf, false);
} else {
do_gpr_ld_memidx(s, tcg_rt, tcg_addr, size,
- is_signed, is_extended, memidx);
+ is_signed, is_extended, memidx,
+ iss_valid, rt, iss_sf, false);
}
}
@@ -2272,10 +2360,14 @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
}
} else {
TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
if (is_store) {
- do_gpr_st(s, tcg_rt, tcg_addr, size);
+ do_gpr_st(s, tcg_rt, tcg_addr, size,
+ true, rt, iss_sf, false);
} else {
- do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
+ do_gpr_ld(s, tcg_rt, tcg_addr, size,
+ is_signed, is_extended,
+ true, rt, iss_sf, false);
}
}
}
@@ -2352,10 +2444,13 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
}
} else {
TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
if (is_store) {
- do_gpr_st(s, tcg_rt, tcg_addr, size);
+ do_gpr_st(s, tcg_rt, tcg_addr, size,
+ true, rt, iss_sf, false);
} else {
- do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
+ do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended,
+ true, rt, iss_sf, false);
}
}
}
@@ -11102,7 +11197,8 @@ void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb)
tcg_clear_temp_count();
do {
- tcg_gen_insn_start(dc->pc, 0);
+ dc->insn_start_idx = tcg_op_buf_count();
+ tcg_gen_insn_start(dc->pc, 0, 0);
num_insns++;
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
diff --git a/target-arm/translate.c b/target-arm/translate.c
index e525f1eb4e..6815bc1a79 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -11732,7 +11732,8 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
}
do {
tcg_gen_insn_start(dc->pc,
- (dc->condexec_cond << 4) | (dc->condexec_mask >> 1));
+ (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
+ 0);
num_insns++;
#ifdef CONFIG_USER_ONLY
@@ -12049,8 +12050,10 @@ void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
if (is_a64(env)) {
env->pc = data[0];
env->condexec_bits = 0;
+ env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
} else {
env->regs[15] = data[0];
env->condexec_bits = data[1];
+ env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
}
}
diff --git a/target-arm/translate.h b/target-arm/translate.h
index 6a18d7badc..dbd7ac83d5 100644
--- a/target-arm/translate.h
+++ b/target-arm/translate.h
@@ -59,6 +59,8 @@ typedef struct DisasContext {
bool ss_same_el;
/* Bottom two bits of XScale c15_cpar coprocessor access control reg */
int c15_cpar;
+ /* TCG op index of the current insn_start. */
+ int insn_start_idx;
#define TMP_A64_MAX 16
int tmp_a64_count;
TCGv_i64 tmp_a64[TMP_A64_MAX];