aboutsummaryrefslogtreecommitdiff
path: root/tcg/arm
diff options
context:
space:
mode:
authorRichard Henderson <rth@twiddle.net>2016-07-14 12:43:06 -0700
committerRichard Henderson <rth@twiddle.net>2016-09-16 08:12:06 -0700
commit85aa80813dd9f5c1f581c743e45678a3bee220f8 (patch)
tree2a140ecb81d60cf1a593a160c0d09f88ae5a3c7d /tcg/arm
parentebc231d7daf1f41b23d8b6a6d1234800b86e5fe2 (diff)
tcg: Support arbitrary size + alignment
Previously we allowed fully unaligned operations, but not operations that are aligned but with less alignment than the operation size. In addition, arm32, ia64, mips, and sparc had been omitted from the previous overalignment patch, which would have led to that alignment being enforced. Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'tcg/arm')
-rw-r--r--tcg/arm/tcg-target.inc.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c
index 172febafdd..094f3f804d 100644
--- a/tcg/arm/tcg-target.inc.c
+++ b/tcg/arm/tcg-target.inc.c
@@ -1168,7 +1168,7 @@ QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */
static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
- TCGMemOp s_bits, int mem_index, bool is_load)
+ TCGMemOp opc, int mem_index, bool is_load)
{
TCGReg base = TCG_AREG0;
int cmp_off =
@@ -1176,6 +1176,8 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
: offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
+ unsigned s_bits = opc & MO_SIZE;
+ unsigned a_bits = get_alignment_bits(opc);
/* Should generate something like the following:
* shr tmp, addrlo, #TARGET_PAGE_BITS (1)
@@ -1216,10 +1218,13 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
}
}
- /* Check alignment. */
- if (s_bits) {
- tcg_out_dat_imm(s, COND_AL, ARITH_TST,
- 0, addrlo, (1 << s_bits) - 1);
+ /* Check alignment. We don't support inline unaligned acceses,
+ but we can easily support overalignment checks. */
+ if (a_bits < s_bits) {
+ a_bits = s_bits;
+ }
+ if (a_bits) {
+ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, (1 << a_bits) - 1);
}
/* Load the tlb addend. */
@@ -1499,7 +1504,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
#ifdef CONFIG_SOFTMMU
mem_index = get_mmuidx(oi);
- addend = tcg_out_tlb_read(s, addrlo, addrhi, opc & MO_SIZE, mem_index, 1);
+ addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1);
/* This a conditional BL only to load a pointer within this opcode into LR
for the slow path. We will not be using the value for a tail call. */
@@ -1630,7 +1635,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
#ifdef CONFIG_SOFTMMU
mem_index = get_mmuidx(oi);
- addend = tcg_out_tlb_read(s, addrlo, addrhi, opc & MO_SIZE, mem_index, 0);
+ addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0);
tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, addrlo, addend);