aboutsummaryrefslogtreecommitdiff
path: root/tcg/riscv/tcg-target.c.inc
diff options
context:
space:
mode:
Diffstat (limited to 'tcg/riscv/tcg-target.c.inc')
-rw-r--r--tcg/riscv/tcg-target.c.inc48
1 files changed, 28 insertions, 20 deletions
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
index 19cd4507fb..415e6c6e15 100644
--- a/tcg/riscv/tcg-target.c.inc
+++ b/tcg/riscv/tcg-target.c.inc
@@ -910,12 +910,13 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
#ifdef CONFIG_SOFTMMU
unsigned s_bits = opc & MO_SIZE;
+ unsigned s_mask = (1u << s_bits) - 1;
int mem_index = get_mmuidx(oi);
int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
- TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0;
- tcg_target_long compare_mask;
+ int compare_mask;
+ TCGReg addr_adj;
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
@@ -924,44 +925,51 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs);
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
- /* Load the tlb comparator and the addend. */
- tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2,
- is_ld ? offsetof(CPUTLBEntry, addr_read)
- : offsetof(CPUTLBEntry, addr_write));
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
- offsetof(CPUTLBEntry, addend));
-
- /* We don't support unaligned accesses. */
+ /*
+ * For aligned accesses, we check the first byte and include the alignment
+ * bits within the address. For unaligned access, we check that we don't
+ * cross pages using the address of the last byte of the access.
+ */
+ addr_adj = addr_reg;
if (a_bits < s_bits) {
- a_bits = s_bits;
+ addr_adj = TCG_REG_TMP0;
+ tcg_out_opc_imm(s, TARGET_LONG_BITS == 32 ? OPC_ADDIW : OPC_ADDI,
+ addr_adj, addr_reg, s_mask - a_mask);
}
- /* Clear the non-page, non-alignment bits from the address. */
- compare_mask = (tcg_target_long)TARGET_PAGE_MASK | a_mask;
+ compare_mask = TARGET_PAGE_MASK | a_mask;
if (compare_mask == sextreg(compare_mask, 0, 12)) {
- tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, compare_mask);
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask);
} else {
tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
- tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_reg);
+ tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj);
}
+ /* Load the tlb comparator and the addend. */
+ tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2,
+ is_ld ? offsetof(CPUTLBEntry, addr_read)
+ : offsetof(CPUTLBEntry, addr_write));
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
+ offsetof(CPUTLBEntry, addend));
+
/* Compare masked address with the TLB entry. */
ldst->label_ptr[0] = s->code_ptr;
tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
/* TLB Hit - translate address using addend. */
+ addr_adj = addr_reg;
if (TARGET_LONG_BITS == 32) {
- tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg);
- addr_reg = TCG_REG_TMP0;
+ addr_adj = TCG_REG_TMP0;
+ tcg_out_ext32u(s, addr_adj, addr_reg);
}
- tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addr_reg);
+ tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addr_adj);
*pbase = TCG_REG_TMP0;
#else
if (a_mask) {