diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2019-03-22 22:03:39 -0700 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2019-06-10 07:03:42 -0700 |
commit | 269bd5d8f61c6b0825ed3c6a5fe01a3ad71c3b4a (patch) | |
tree | b51b8c1ca4eadac1eee562a5e1671b778e022250 /tcg/s390/tcg-target.inc.c | |
parent | 5e1401969b25f676fee6b1c564441759cf967a43 (diff) |
cpu: Move the softmmu tlb to CPUNegativeOffsetState
We have for some time had code within the tcg backends to
handle large positive offsets from env. This move makes
sure that need not happen. Indeed, we are able to assert
at build time that simple offsets suffice for all hosts.
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg/s390/tcg-target.inc.c')
-rw-r--r-- | tcg/s390/tcg-target.inc.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c index 4d896d0b58..fe42939d98 100644 --- a/tcg/s390/tcg-target.inc.c +++ b/tcg/s390/tcg-target.inc.c @@ -1538,9 +1538,9 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data, #if defined(CONFIG_SOFTMMU) #include "tcg-ldst.inc.c" -/* We're expecting to use a 20-bit signed offset on the tlb memory ops. */ -QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_.f[NB_MMU_MODES - 1].table) - > 0x7ffff); +/* We're expecting to use a 20-bit negative offset on the tlb memory ops. */ +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19)); /* Load and compare a TLB entry, leaving the flags set. Loads the TLB addend into R2. Returns a register with the santitized guest address. */ @@ -1551,8 +1551,9 @@ static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc, unsigned a_bits = get_alignment_bits(opc); unsigned s_mask = (1 << s_bits) - 1; unsigned a_mask = (1 << a_bits) - 1; - int mask_off = offsetof(CPUArchState, tlb_.f[mem_index].mask); - int table_off = offsetof(CPUArchState, tlb_.f[mem_index].table); + int fast_off = TLB_MASK_TABLE_OFS(mem_index); + int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); + int table_off = fast_off + offsetof(CPUTLBDescFast, table); int ofs, a_off; uint64_t tlb_mask; |