diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2020-06-25 20:31:22 -0700 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2020-06-26 14:31:12 +0100 |
commit | 73ceeb0011b23bac8bd2c09ebe3c18d034aa69ce (patch) | |
tree | 20fb1705299e950c168f267495b7b73a19762816 /target/arm/translate-a64.c | |
parent | 0a405be2b8fd9506a009b10d7d2d98c394b36db6 (diff) |
target/arm: Add gen_mte_checkN
Replace existing uses of check_data_tbi in translate-a64.c that
perform multiple logical memory access. Leave the helper blank
for now to reduce the patch size.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20200626033144.790098-25-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target/arm/translate-a64.c')
-rw-r--r-- | target/arm/translate-a64.c | 71 |
1 files changed, 55 insertions, 16 deletions
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index 4d0453c895..52be0400d7 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -284,6 +284,34 @@ TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write, false, get_mem_index(s)); } +/* + * For MTE, check multiple logical sequential accesses. + */ +TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, + bool tag_checked, int log2_esize, int total_size) +{ + if (tag_checked && s->mte_active[0] && total_size != (1 << log2_esize)) { + TCGv_i32 tcg_desc; + TCGv_i64 ret; + int desc = 0; + + desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s)); + desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); + desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); + desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); + desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << log2_esize); + desc = FIELD_DP32(desc, MTEDESC, TSIZE, total_size); + tcg_desc = tcg_const_i32(desc); + + ret = new_tmp_a64(s); + gen_helper_mte_checkN(ret, cpu_env, tcg_desc, addr); + tcg_temp_free_i32(tcg_desc); + + return ret; + } + return gen_mte_check1(s, addr, is_write, tag_checked, log2_esize); +} + typedef struct DisasCompare64 { TCGCond cond; TCGv_i64 value; @@ -2848,7 +2876,10 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn) } } - clean_addr = clean_data_tbi(s, dirty_addr); + clean_addr = gen_mte_checkN(s, dirty_addr, !is_load, + (wback || rn != 31) && !set_tag, + size, 2 << size); + if (is_vector) { if (is_load) { do_fp_ld(s, rt, clean_addr, size); @@ -3514,7 +3545,7 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; MemOp endian = s->be_data; - int ebytes; /* bytes per element */ + int total; /* total bytes */ int elements; /* elements per vector */ int rpt; /* num iterations */ int selem; /* structure elements */ @@ -3584,19 +3615,26 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) endian = MO_LE; } - /* Consecutive little-endian elements from a single register + total = rpt * selem * (is_q ? 16 : 8); + tcg_rn = cpu_reg_sp(s, rn); + + /* + * Issue the MTE check vs the logical repeat count, before we + * promote consecutive little-endian elements below. + */ + clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31, + size, total); + + /* + * Consecutive little-endian elements from a single register * can be promoted to a larger little-endian operation. */ if (selem == 1 && endian == MO_LE) { size = 3; } - ebytes = 1 << size; - elements = (is_q ? 16 : 8) / ebytes; - - tcg_rn = cpu_reg_sp(s, rn); - clean_addr = clean_data_tbi(s, tcg_rn); - tcg_ebytes = tcg_const_i64(ebytes); + elements = (is_q ? 16 : 8) >> size; + tcg_ebytes = tcg_const_i64(1 << size); for (r = 0; r < rpt; r++) { int e; for (e = 0; e < elements; e++) { @@ -3630,7 +3668,7 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) if (is_postidx) { if (rm == 31) { - tcg_gen_addi_i64(tcg_rn, tcg_rn, rpt * elements * selem * ebytes); + tcg_gen_addi_i64(tcg_rn, tcg_rn, total); } else { tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm)); } @@ -3676,7 +3714,7 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) int selem = (extract32(opc, 0, 1) << 1 | R) + 1; bool replicate = false; int index = is_q << 3 | S << 2 | size; - int ebytes, xs; + int xs, total; TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; if (extract32(insn, 31, 1)) { @@ -3730,16 +3768,17 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) return; } - ebytes = 1 << scale; - if (rn == 31) { gen_check_sp_alignment(s); } + total = selem << scale; tcg_rn = cpu_reg_sp(s, rn); - clean_addr = clean_data_tbi(s, tcg_rn); - tcg_ebytes = tcg_const_i64(ebytes); + clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31, + scale, total); + + tcg_ebytes = tcg_const_i64(1 << scale); for (xs = 0; xs < selem; xs++) { if (replicate) { /* Load and replicate to all elements */ @@ -3766,7 +3805,7 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) if (is_postidx) { if (rm == 31) { - tcg_gen_addi_i64(tcg_rn, tcg_rn, selem * ebytes); + tcg_gen_addi_i64(tcg_rn, tcg_rn, total); } else { tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm)); } |