aboutsummaryrefslogtreecommitdiff
path: root/tcg
diff options
context:
space:
mode:
Diffstat (limited to 'tcg')
-rw-r--r--tcg/aarch64/tcg-target.inc.c10
-rw-r--r--tcg/arm/tcg-target.inc.c10
-rw-r--r--tcg/i386/tcg-target.inc.c4
-rw-r--r--tcg/mips/tcg-target.inc.c12
-rw-r--r--tcg/ppc/tcg-target.inc.c8
-rw-r--r--tcg/riscv/tcg-target.inc.c12
-rw-r--r--tcg/s390/tcg-target.inc.c8
-rw-r--r--tcg/sparc/tcg-target.inc.c12
8 files changed, 19 insertions, 57 deletions
diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
index 9e1dad9696..90957593a3 100644
--- a/tcg/aarch64/tcg-target.inc.c
+++ b/tcg/aarch64/tcg-target.inc.c
@@ -1637,12 +1637,8 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
label->label_ptr[0] = label_ptr;
}
-/* We expect tlb_mask to be before tlb_table. */
-QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) <
- offsetof(CPUArchState, tlb_mask));
-
/* We expect to use a 24-bit unsigned offset from ENV. */
-QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1])
+QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_.f[NB_MMU_MODES - 1].table)
> 0xffffff);
/* Load and compare a TLB entry, emitting the conditional jump to the
@@ -1653,8 +1649,8 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc,
tcg_insn_unit **label_ptr, int mem_index,
bool is_read)
{
- int mask_ofs = offsetof(CPUArchState, tlb_mask[mem_index]);
- int table_ofs = offsetof(CPUArchState, tlb_table[mem_index]);
+ int mask_ofs = offsetof(CPUArchState, tlb_.f[mem_index].mask);
+ int table_ofs = offsetof(CPUArchState, tlb_.f[mem_index].table);
unsigned a_bits = get_alignment_bits(opc);
unsigned s_bits = opc & MO_SIZE;
unsigned a_mask = (1u << a_bits) - 1;
diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c
index 7316504c9d..38de6d59c7 100644
--- a/tcg/arm/tcg-target.inc.c
+++ b/tcg/arm/tcg-target.inc.c
@@ -1220,12 +1220,8 @@ static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
-/* We expect tlb_mask to be before tlb_table. */
-QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) <
- offsetof(CPUArchState, tlb_mask));
-
/* We expect to use a 20-bit unsigned offset from ENV. */
-QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1])
+QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_.f[NB_MMU_MODES - 1].table)
> 0xfffff);
/* Load and compare a TLB entry, leaving the flags set. Returns the register
@@ -1236,8 +1232,8 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
{
int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
: offsetof(CPUTLBEntry, addr_write));
- int mask_off = offsetof(CPUArchState, tlb_mask[mem_index]);
- int table_off = offsetof(CPUArchState, tlb_table[mem_index]);
+ int mask_off = offsetof(CPUArchState, tlb_.f[mem_index].mask);
+ int table_off = offsetof(CPUArchState, tlb_.f[mem_index].table);
TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0;
unsigned s_bits = opc & MO_SIZE;
unsigned a_bits = get_alignment_bits(opc);
diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c
index c0443da4af..5f5b886c04 100644
--- a/tcg/i386/tcg-target.inc.c
+++ b/tcg/i386/tcg-target.inc.c
@@ -1730,10 +1730,10 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, r0, TCG_AREG0,
- offsetof(CPUArchState, tlb_mask[mem_index]));
+ offsetof(CPUArchState, tlb_.f[mem_index].mask));
tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r0, TCG_AREG0,
- offsetof(CPUArchState, tlb_table[mem_index]));
+ offsetof(CPUArchState, tlb_.f[mem_index].table));
/* If the required alignment is at least as large as the access, simply
copy the address and mask. For lesser alignments, check that we don't
diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c
index 7cafd4a790..ef6633587e 100644
--- a/tcg/mips/tcg-target.inc.c
+++ b/tcg/mips/tcg-target.inc.c
@@ -1202,14 +1202,6 @@ static int tcg_out_call_iarg_reg2(TCGContext *s, int i, TCGReg al, TCGReg ah)
return i;
}
-/* We expect tlb_mask to be before tlb_table. */
-QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) <
- offsetof(CPUArchState, tlb_mask));
-
-/* We expect tlb_mask to be "near" tlb_table. */
-QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) -
- offsetof(CPUArchState, tlb_mask) >= 0x8000);
-
/*
* Perform the tlb comparison operation.
* The complete host address is placed in BASE.
@@ -1223,8 +1215,8 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
unsigned s_bits = opc & MO_SIZE;
unsigned a_bits = get_alignment_bits(opc);
int mem_index = get_mmuidx(oi);
- int mask_off = offsetof(CPUArchState, tlb_mask[mem_index]);
- int table_off = offsetof(CPUArchState, tlb_table[mem_index]);
+ int mask_off = offsetof(CPUArchState, tlb_.f[mem_index].mask);
+ int table_off = offsetof(CPUArchState, tlb_.f[mem_index].mask);
int add_off = offsetof(CPUTLBEntry, addend);
int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
: offsetof(CPUTLBEntry, addr_write));
diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
index 30c095d3d5..d69c18ac1e 100644
--- a/tcg/ppc/tcg-target.inc.c
+++ b/tcg/ppc/tcg-target.inc.c
@@ -1498,10 +1498,6 @@ static void * const qemu_st_helpers[16] = {
[MO_BEQ] = helper_be_stq_mmu,
};
-/* We expect tlb_mask to be before tlb_table. */
-QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) <
- offsetof(CPUArchState, tlb_mask));
-
/* Perform the TLB load and compare. Places the result of the comparison
in CR7, loads the addend of the TLB into R3, and returns the register
containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
@@ -1514,8 +1510,8 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp opc,
= (is_read
? offsetof(CPUTLBEntry, addr_read)
: offsetof(CPUTLBEntry, addr_write));
- int mask_off = offsetof(CPUArchState, tlb_mask[mem_index]);
- int table_off = offsetof(CPUArchState, tlb_table[mem_index]);
+ int mask_off = offsetof(CPUArchState, tlb_.f[mem_index].mask);
+ int table_off = offsetof(CPUArchState, tlb_.f[mem_index].table);
TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0;
unsigned s_bits = opc & MO_SIZE;
unsigned a_bits = get_alignment_bits(opc);
diff --git a/tcg/riscv/tcg-target.inc.c b/tcg/riscv/tcg-target.inc.c
index 6497a4dab2..96c33bf621 100644
--- a/tcg/riscv/tcg-target.inc.c
+++ b/tcg/riscv/tcg-target.inc.c
@@ -962,14 +962,6 @@ static void * const qemu_st_helpers[16] = {
/* We don't support oversize guests */
QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS < TARGET_LONG_BITS);
-/* We expect tlb_mask to be before tlb_table. */
-QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) <
- offsetof(CPUArchState, tlb_mask));
-
-/* We expect tlb_mask to be "near" tlb_table. */
-QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) -
- offsetof(CPUArchState, tlb_mask) >= 0x800);
-
static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
TCGReg addrh, TCGMemOpIdx oi,
tcg_insn_unit **label_ptr, bool is_load)
@@ -982,8 +974,8 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
int mask_off, table_off;
TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0;
- mask_off = offsetof(CPUArchState, tlb_mask[mem_index]);
- table_off = offsetof(CPUArchState, tlb_table[mem_index]);
+ mask_off = offsetof(CPUArchState, tlb_.f[mem_index].mask);
+ table_off = offsetof(CPUArchState, tlb_.f[mem_index].table);
if (table_off > 0x7ff) {
int mask_hi = mask_off - sextreg(mask_off, 0, 12);
int table_hi = table_off - sextreg(table_off, 0, 12);
diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c
index 331d51852c..4d896d0b58 100644
--- a/tcg/s390/tcg-target.inc.c
+++ b/tcg/s390/tcg-target.inc.c
@@ -1539,9 +1539,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
#include "tcg-ldst.inc.c"
/* We're expecting to use a 20-bit signed offset on the tlb memory ops. */
-QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_mask[NB_MMU_MODES - 1])
- > 0x7ffff);
-QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1])
+QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_.f[NB_MMU_MODES - 1].table)
> 0x7ffff);
/* Load and compare a TLB entry, leaving the flags set. Loads the TLB
@@ -1553,8 +1551,8 @@ static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
unsigned a_bits = get_alignment_bits(opc);
unsigned s_mask = (1 << s_bits) - 1;
unsigned a_mask = (1 << a_bits) - 1;
- int mask_off = offsetof(CPUArchState, tlb_mask[mem_index]);
- int table_off = offsetof(CPUArchState, tlb_table[mem_index]);
+ int mask_off = offsetof(CPUArchState, tlb_.f[mem_index].mask);
+ int table_off = offsetof(CPUArchState, tlb_.f[mem_index].table);
int ofs, a_off;
uint64_t tlb_mask;
diff --git a/tcg/sparc/tcg-target.inc.c b/tcg/sparc/tcg-target.inc.c
index 83295955a7..066cb0e892 100644
--- a/tcg/sparc/tcg-target.inc.c
+++ b/tcg/sparc/tcg-target.inc.c
@@ -1075,19 +1075,11 @@ static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
The result of the TLB comparison is in %[ix]cc. The sanitized address
is in the returned register, maybe %o0. The TLB addend is in %o1. */
-/* We expect tlb_mask to be before tlb_table. */
-QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) <
- offsetof(CPUArchState, tlb_mask));
-
-/* We expect tlb_mask to be "near" tlb_table. */
-QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) -
- offsetof(CPUArchState, tlb_mask) >= (1 << 13));
-
static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
TCGMemOp opc, int which)
{
- int mask_off = offsetof(CPUArchState, tlb_mask[mem_index]);
- int table_off = offsetof(CPUArchState, tlb_table[mem_index]);
+ int mask_off = offsetof(CPUArchState, tlb_.f[mem_index].mask);
+ int table_off = offsetof(CPUArchState, tlb_.f[mem_index].table);
TCGReg base = TCG_AREG0;
const TCGReg r0 = TCG_REG_O0;
const TCGReg r1 = TCG_REG_O1;