aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--accel/tcg/cputlb.c72
-rw-r--r--include/exec/cpu-all.h4
2 files changed, 46 insertions, 30 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index eeba8c9847..028eebcb44 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -737,8 +737,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
address |= TLB_INVALID_MASK;
}
if (attrs.byte_swap) {
- /* Force the access through the I/O slow path. */
- address |= TLB_MMIO;
+ address |= TLB_BSWAP;
}
if (!memory_region_is_ram(section->mr) &&
!memory_region_is_romd(section->mr)) {
@@ -901,10 +900,6 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
bool locked = false;
MemTxResult r;
- if (iotlbentry->attrs.byte_swap) {
- op ^= MO_BSWAP;
- }
-
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
mr = section->mr;
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
@@ -947,10 +942,6 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
bool locked = false;
MemTxResult r;
- if (iotlbentry->attrs.byte_swap) {
- op ^= MO_BSWAP;
- }
-
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
mr = section->mr;
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
@@ -1133,8 +1124,8 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
wp_access, retaddr);
}
- if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO)) {
- /* I/O access */
+ /* Reject I/O access, or other required slow-path. */
+ if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO | TLB_BSWAP)) {
return NULL;
}
@@ -1344,6 +1335,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
/* Handle anything that isn't just a straight memory access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
CPUIOTLBEntry *iotlbentry;
+ bool need_swap;
/* For anything that is unaligned, recurse through full_load. */
if ((addr & (size - 1)) != 0) {
@@ -1357,17 +1349,27 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
/* On watchpoint hit, this will longjmp out. */
cpu_check_watchpoint(env_cpu(env), addr, size,
iotlbentry->attrs, BP_MEM_READ, retaddr);
-
- /* The backing page may or may not require I/O. */
- tlb_addr &= ~TLB_WATCHPOINT;
- if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
- goto do_aligned_access;
- }
}
+ need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
+
/* Handle I/O access. */
- return io_readx(env, iotlbentry, mmu_idx, addr,
- retaddr, access_type, op);
+ if (likely(tlb_addr & TLB_MMIO)) {
+ return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
+ access_type, op ^ (need_swap * MO_BSWAP));
+ }
+
+ haddr = (void *)((uintptr_t)addr + entry->addend);
+
+ /*
+ * Keep these two load_memop separate to ensure that the compiler
+ * is able to fold the entire function to a single instruction.
+ * There is a build-time assert inside to remind you of this. ;-)
+ */
+ if (unlikely(need_swap)) {
+ return load_memop(haddr, op ^ MO_BSWAP);
+ }
+ return load_memop(haddr, op);
}
/* Handle slow unaligned access (it spans two pages or IO). */
@@ -1394,7 +1396,6 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
return res & MAKE_64BIT_MASK(0, size * 8);
}
- do_aligned_access:
haddr = (void *)((uintptr_t)addr + entry->addend);
return load_memop(haddr, op);
}
@@ -1591,6 +1592,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
/* Handle anything that isn't just a straight memory access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
CPUIOTLBEntry *iotlbentry;
+ bool need_swap;
/* For anything that is unaligned, recurse through byte stores. */
if ((addr & (size - 1)) != 0) {
@@ -1604,16 +1606,29 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
/* On watchpoint hit, this will longjmp out. */
cpu_check_watchpoint(env_cpu(env), addr, size,
iotlbentry->attrs, BP_MEM_WRITE, retaddr);
-
- /* The backing page may or may not require I/O. */
- tlb_addr &= ~TLB_WATCHPOINT;
- if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
- goto do_aligned_access;
- }
}
+ need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
+
/* Handle I/O access. */
- io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, op);
+ if (likely(tlb_addr & (TLB_MMIO | TLB_NOTDIRTY))) {
+ io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
+ op ^ (need_swap * MO_BSWAP));
+ return;
+ }
+
+ haddr = (void *)((uintptr_t)addr + entry->addend);
+
+ /*
+ * Keep these two store_memop separate to ensure that the compiler
+ * is able to fold the entire function to a single instruction.
+ * There is a build-time assert inside to remind you of this. ;-)
+ */
+ if (unlikely(need_swap)) {
+ store_memop(haddr, val, op ^ MO_BSWAP);
+ } else {
+ store_memop(haddr, val, op);
+ }
return;
}
@@ -1682,7 +1697,6 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
return;
}
- do_aligned_access:
haddr = (void *)((uintptr_t)addr + entry->addend);
store_memop(haddr, val, op);
}
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index e0c8dc540c..d148bded35 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -335,12 +335,14 @@ CPUArchState *cpu_copy(CPUArchState *env);
#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
/* Set if TLB entry contains a watchpoint. */
#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
+/* Set if TLB entry requires byte swap. */
+#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
/* Use this mask to check interception with an alignment mask
* in a TCG backend.
*/
#define TLB_FLAGS_MASK \
- (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT)
+ (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT | TLB_BSWAP)
/**
* tlb_hit_page: return true if page aligned @addr is a hit against the