aboutsummaryrefslogtreecommitdiff
path: root/tcg/aarch64
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2019-03-22 22:03:39 -0700
committerRichard Henderson <richard.henderson@linaro.org>2019-06-10 07:03:42 -0700
commit269bd5d8f61c6b0825ed3c6a5fe01a3ad71c3b4a (patch)
treeb51b8c1ca4eadac1eee562a5e1671b778e022250 /tcg/aarch64
parent5e1401969b25f676fee6b1c564441759cf967a43 (diff)
cpu: Move the softmmu tlb to CPUNegativeOffsetState
We have for some time had code within the tcg backends to handle large positive offsets from env. This move makes sure that need not happen. Indeed, we are able to assert at build time that simple offsets suffice for all hosts. Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg/aarch64')
-rw-r--r--tcg/aarch64/tcg-target.inc.c29
1 files changed, 6 insertions, 23 deletions
diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
index 90957593a3..57c297f9d7 100644
--- a/tcg/aarch64/tcg-target.inc.c
+++ b/tcg/aarch64/tcg-target.inc.c
@@ -1637,9 +1637,9 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
label->label_ptr[0] = label_ptr;
}
-/* We expect to use a 24-bit unsigned offset from ENV. */
-QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_.f[NB_MMU_MODES - 1].table)
- > 0xffffff);
+/* We expect to use a 7-bit scaled negative offset from ENV. */
+QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
+QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -512);
/* Load and compare a TLB entry, emitting the conditional jump to the
slow path for the failure case, which will be patched later when finalizing
@@ -1649,8 +1649,9 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc,
tcg_insn_unit **label_ptr, int mem_index,
bool is_read)
{
- int mask_ofs = offsetof(CPUArchState, tlb_.f[mem_index].mask);
- int table_ofs = offsetof(CPUArchState, tlb_.f[mem_index].table);
+ int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
+ int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
+ int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
unsigned a_bits = get_alignment_bits(opc);
unsigned s_bits = opc & MO_SIZE;
unsigned a_mask = (1u << a_bits) - 1;
@@ -1659,24 +1660,6 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc,
TCGType mask_type;
uint64_t compare_mask;
- if (table_ofs > 0xfff) {
- int table_hi = table_ofs & ~0xfff;
- int mask_hi = mask_ofs & ~0xfff;
-
- table_base = TCG_REG_X1;
- if (mask_hi == table_hi) {
- mask_base = table_base;
- } else if (mask_hi) {
- mask_base = TCG_REG_X0;
- tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64,
- mask_base, TCG_AREG0, mask_hi);
- }
- tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64,
- table_base, TCG_AREG0, table_hi);
- mask_ofs -= mask_hi;
- table_ofs -= table_hi;
- }
-
mask_type = (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32
? TCG_TYPE_I64 : TCG_TYPE_I32);