aboutsummaryrefslogtreecommitdiff
path: root/tcg/s390/tcg-target.inc.c
diff options
context:
space:
mode:
Diffstat (limited to 'tcg/s390/tcg-target.inc.c')
-rw-r--r--tcg/s390/tcg-target.inc.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c
index e0a60e618c..5a7495b063 100644
--- a/tcg/s390/tcg-target.inc.c
+++ b/tcg/s390/tcg-target.inc.c
@@ -798,6 +798,12 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
}
}
+static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
+ TCGReg base, intptr_t ofs)
+{
+ return false;
+}
+
/* load data from an absolute host address */
static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
{
@@ -1499,18 +1505,19 @@ QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
int mem_index, bool is_ld)
{
- int s_mask = (1 << (opc & MO_SIZE)) - 1;
+ int a_bits = get_alignment_bits(opc);
int ofs, a_off;
uint64_t tlb_mask;
/* For aligned accesses, we check the first byte and include the alignment
bits within the address. For unaligned access, we check that we don't
cross pages using the address of the last byte of the access. */
- if ((opc & MO_AMASK) == MO_ALIGN || s_mask == 0) {
+ if (a_bits >= 0) {
+ /* A byte access or an alignment check required */
a_off = 0;
- tlb_mask = TARGET_PAGE_MASK | s_mask;
+ tlb_mask = TARGET_PAGE_MASK | ((1 << a_bits) - 1);
} else {
- a_off = s_mask;
+ a_off = (1 << (opc & MO_SIZE)) - 1;
tlb_mask = TARGET_PAGE_MASK;
}