diff options
author | Tony Nguyen <tony.nguyen@bt.com> | 2019-08-24 04:36:54 +1000 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2019-09-03 08:30:39 -0700 |
commit | 9bf825bf3df4ebae3af51566c8088e3f1249a910 (patch) | |
tree | 1d6d1d8a389a7da714c8021bb5f1d256c5d64987 /accel | |
parent | be5c4787e9a6eed12fd765d9e890f7cc6cd63220 (diff) |
memory: Single byte swap along the I/O path
Now that MemOp has been pushed down into the memory API, and
callers are encoding endianness, we can collapse byte swaps
along the I/O path into the accelerator and target independent
adjust_endianness.
Collapsing byte swaps along the I/O path enables additional endian
inversion logic, e.g. SPARC64 Invert Endian TTE bit, with redundant
byte swaps cancelling out.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Tony Nguyen <tony.nguyen@bt.com>
Message-Id: <911ff31af11922a9afba9b7ce128af8b8b80f316.1566466906.git.tony.nguyen@bt.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r-- | accel/tcg/cputlb.c | 42 |
1 files changed, 3 insertions, 39 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 5c12eef292..3c9e634d99 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1200,38 +1200,6 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, cpu_loop_exit_atomic(env_cpu(env), retaddr); } -#ifdef TARGET_WORDS_BIGENDIAN -#define NEED_BE_BSWAP 0 -#define NEED_LE_BSWAP 1 -#else -#define NEED_BE_BSWAP 1 -#define NEED_LE_BSWAP 0 -#endif - -/* - * Byte Swap Helper - * - * This should all dead code away depending on the build host and - * access type. - */ - -static inline uint64_t handle_bswap(uint64_t val, MemOp op) -{ - if ((memop_big_endian(op) && NEED_BE_BSWAP) || - (!memop_big_endian(op) && NEED_LE_BSWAP)) { - switch (op & MO_SIZE) { - case MO_8: return val; - case MO_16: return bswap16(val); - case MO_32: return bswap32(val); - case MO_64: return bswap64(val); - default: - g_assert_not_reached(); - } - } else { - return val; - } -} - /* * Load Helpers * @@ -1306,10 +1274,8 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, } } - /* TODO: Merge bswap into io_readx -> memory_region_dispatch_read. */ - res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], - mmu_idx, addr, retaddr, access_type, op); - return handle_bswap(res, op); + return io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], + mmu_idx, addr, retaddr, access_type, op); } /* Handle slow unaligned access (it spans two pages or IO). */ @@ -1552,10 +1518,8 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, } } - /* TODO: Merge bswap into io_writex -> memory_region_dispatch_write. */ io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, - handle_bswap(val, op), - addr, retaddr, op); + val, addr, retaddr, op); return; } |