diff options
author | Sergey Sorokin <afarallax@yandex.ru> | 2016-06-23 21:16:46 +0300 |
---|---|---|
committer | Richard Henderson <rth@twiddle.net> | 2016-07-05 20:50:13 -0700 |
commit | 1f00b27f17518a1bcb4cedca49eaec96a4d560bd (patch) | |
tree | 90abe3fc60c60ce9ff0aa5c46a87c5589ff55229 /tcg/i386/tcg-target.inc.c | |
parent | 59d7c14eeff8d2ad7f61aed86ce5a176113bc153 (diff) |
tcg: Improve the alignment check infrastructure
Some architectures (e.g. ARMv8) need the address which is aligned
to a size more than the size of the memory access.
To support such check it's enough the current costless alignment
check implementation in QEMU, but we need to support
an alignment size specifying.
Signed-off-by: Sergey Sorokin <afarallax@yandex.ru>
Message-Id: <1466705806-679898-1-git-send-email-afarallax@yandex.ru>
Signed-off-by: Richard Henderson <rth@twiddle.net>
[rth: Assert in tcg_canonicalize_memop. Leave get_alignment_bits
available for, though unused by, user-mode. Retain logging difference
based on ALIGNED_ONLY.]
Diffstat (limited to 'tcg/i386/tcg-target.inc.c')
-rw-r--r-- | tcg/i386/tcg-target.inc.c | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c index bc34535738..6f8cdca756 100644 --- a/tcg/i386/tcg-target.inc.c +++ b/tcg/i386/tcg-target.inc.c @@ -1202,8 +1202,8 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, TCGType ttype = TCG_TYPE_I32; TCGType tlbtype = TCG_TYPE_I32; int trexw = 0, hrexw = 0, tlbrexw = 0; - int s_mask = (1 << (opc & MO_SIZE)) - 1; - bool aligned = (opc & MO_AMASK) == MO_ALIGN || s_mask == 0; + int a_bits = get_alignment_bits(opc); + target_ulong tlb_mask; if (TCG_TARGET_REG_BITS == 64) { if (TARGET_LONG_BITS == 64) { @@ -1220,19 +1220,22 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, } tcg_out_mov(s, tlbtype, r0, addrlo); - if (aligned) { + if (a_bits >= 0) { + /* A byte access or an alignment check required */ tcg_out_mov(s, ttype, r1, addrlo); + tlb_mask = TARGET_PAGE_MASK | ((1 << a_bits) - 1); } else { /* For unaligned access check that we don't cross pages using the page address of the last byte. */ - tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask); + tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, + (1 << (opc & MO_SIZE)) - 1); + tlb_mask = TARGET_PAGE_MASK; } tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); - tgen_arithi(s, ARITH_AND + trexw, r1, - TARGET_PAGE_MASK | (aligned ? s_mask : 0), 0); + tgen_arithi(s, ARITH_AND + trexw, r1, tlb_mask, 0); tgen_arithi(s, ARITH_AND + tlbrexw, r0, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0); |