diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2021-10-13 11:43:29 -0700 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2021-10-13 11:43:29 -0700 |
commit | e5b2333f24ff207f08cf96e73d2e11438c985801 (patch) | |
tree | 14589d17d9316913fe5a6fdc772bd28a64fa2cb0 /target | |
parent | 984b2b504942d9adb31d7743f02138fb79a73c12 (diff) | |
parent | 76e366e728549b3324cc2dee6745d6a4f1af18e6 (diff) |
Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20211013' into staging
Use MO_128 for 16-byte atomic memory operations.
Add cpu_ld/st_mmu memory primitives.
Move helper_ld/st memory helpers out of tcg.h.
Canonicalize alignment flags in MemOp.
# gpg: Signature made Wed 13 Oct 2021 10:48:45 AM PDT
# gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg: issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [ultimate]
* remotes/rth/tags/pull-tcg-20211013:
tcg: Canonicalize alignment flags in MemOp
tcg: Move helper_*_mmu decls to tcg/tcg-ldst.h
target/arm: Use cpu_*_mmu instead of helper_*_mmu
target/sparc: Use cpu_*_mmu instead of helper_*_mmu
target/s390x: Use cpu_*_mmu instead of helper_*_mmu
target/mips: Use 8-byte memory ops for msa load/store
target/mips: Use cpu_*_data_ra for msa load/store
accel/tcg: Move cpu_atomic decls to exec/cpu_ldst.h
accel/tcg: Add cpu_{ld,st}*_mmu interfaces
target/hexagon: Implement cpu_mmu_index
target/s390x: Use MO_128 for 16 byte atomics
target/ppc: Use MO_128 for 16 byte atomics
target/i386: Use MO_128 for 16 byte atomics
target/arm: Use MO_128 for 16 byte atomics
memory: Log access direction for invalid accesses
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'target')
-rw-r--r-- | target/arm/helper-a64.c | 61 | ||||
-rw-r--r-- | target/arm/m_helper.c | 6 | ||||
-rw-r--r-- | target/hexagon/cpu.h | 9 | ||||
-rw-r--r-- | target/i386/tcg/mem_helper.c | 2 | ||||
-rw-r--r-- | target/m68k/op_helper.c | 1 | ||||
-rw-r--r-- | target/mips/tcg/msa_helper.c | 389 | ||||
-rw-r--r-- | target/ppc/mem_helper.c | 1 | ||||
-rw-r--r-- | target/ppc/translate.c | 12 | ||||
-rw-r--r-- | target/s390x/tcg/mem_helper.c | 13 | ||||
-rw-r--r-- | target/sparc/ldst_helper.c | 14 |
10 files changed, 135 insertions, 373 deletions
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c index c5af779006..5ae2ecb0f3 100644 --- a/target/arm/helper-a64.c +++ b/target/arm/helper-a64.c @@ -32,7 +32,6 @@ #include "exec/cpu_ldst.h" #include "qemu/int128.h" #include "qemu/atomic128.h" -#include "tcg/tcg.h" #include "fpu/softfloat.h" #include <zlib.h> /* For crc32 */ @@ -513,37 +512,19 @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr, uintptr_t ra = GETPC(); uint64_t o0, o1; bool success; - -#ifdef CONFIG_USER_ONLY - /* ??? Enforce alignment. */ - uint64_t *haddr = g2h(env_cpu(env), addr); - - set_helper_retaddr(ra); - o0 = ldq_le_p(haddr + 0); - o1 = ldq_le_p(haddr + 1); - oldv = int128_make128(o0, o1); - - success = int128_eq(oldv, cmpv); - if (success) { - stq_le_p(haddr + 0, int128_getlo(newv)); - stq_le_p(haddr + 1, int128_gethi(newv)); - } - clear_helper_retaddr(); -#else int mem_idx = cpu_mmu_index(env, false); MemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); MemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx); - o0 = helper_le_ldq_mmu(env, addr + 0, oi0, ra); - o1 = helper_le_ldq_mmu(env, addr + 8, oi1, ra); + o0 = cpu_ldq_le_mmu(env, addr + 0, oi0, ra); + o1 = cpu_ldq_le_mmu(env, addr + 8, oi1, ra); oldv = int128_make128(o0, o1); success = int128_eq(oldv, cmpv); if (success) { - helper_le_stq_mmu(env, addr + 0, int128_getlo(newv), oi1, ra); - helper_le_stq_mmu(env, addr + 8, int128_gethi(newv), oi1, ra); + cpu_stq_le_mmu(env, addr + 0, int128_getlo(newv), oi1, ra); + cpu_stq_le_mmu(env, addr + 8, int128_gethi(newv), oi1, ra); } -#endif return !success; } @@ -560,7 +541,7 @@ uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr, assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); - oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); + oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx); cmpv = int128_make128(env->exclusive_val, env->exclusive_high); newv = int128_make128(new_lo, new_hi); @@ -583,37 +564,19 @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr, uintptr_t ra = GETPC(); uint64_t o0, o1; bool success; - -#ifdef CONFIG_USER_ONLY - /* ??? Enforce alignment. */ - uint64_t *haddr = g2h(env_cpu(env), addr); - - set_helper_retaddr(ra); - o1 = ldq_be_p(haddr + 0); - o0 = ldq_be_p(haddr + 1); - oldv = int128_make128(o0, o1); - - success = int128_eq(oldv, cmpv); - if (success) { - stq_be_p(haddr + 0, int128_gethi(newv)); - stq_be_p(haddr + 1, int128_getlo(newv)); - } - clear_helper_retaddr(); -#else int mem_idx = cpu_mmu_index(env, false); MemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); MemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx); - o1 = helper_be_ldq_mmu(env, addr + 0, oi0, ra); - o0 = helper_be_ldq_mmu(env, addr + 8, oi1, ra); + o1 = cpu_ldq_be_mmu(env, addr + 0, oi0, ra); + o0 = cpu_ldq_be_mmu(env, addr + 8, oi1, ra); oldv = int128_make128(o0, o1); success = int128_eq(oldv, cmpv); if (success) { - helper_be_stq_mmu(env, addr + 0, int128_gethi(newv), oi1, ra); - helper_be_stq_mmu(env, addr + 8, int128_getlo(newv), oi1, ra); + cpu_stq_be_mmu(env, addr + 0, int128_gethi(newv), oi1, ra); + cpu_stq_be_mmu(env, addr + 8, int128_getlo(newv), oi1, ra); } -#endif return !success; } @@ -630,7 +593,7 @@ uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr, assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); - oi = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); + oi = make_memop_idx(MO_BE | MO_128 | MO_ALIGN, mem_idx); /* * High and low need to be switched here because this is not actually a @@ -656,7 +619,7 @@ void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr, assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); - oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); + oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx); cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]); newv = int128_make128(new_lo, new_hi); @@ -677,7 +640,7 @@ void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr, assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); - oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); + oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx); cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]); newv = int128_make128(new_lo, new_hi); diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c index 62aa12c9d8..2c9922dc29 100644 --- a/target/arm/m_helper.c +++ b/target/arm/m_helper.c @@ -1947,9 +1947,9 @@ static bool do_v7m_function_return(ARMCPU *cpu) * do them as secure, so work out what MMU index that is. */ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); - oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx)); - newpc = helper_le_ldul_mmu(env, frameptr, oi, 0); - newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0); + oi = make_memop_idx(MO_LEUL, arm_to_core_mmu_idx(mmu_idx)); + newpc = cpu_ldl_le_mmu(env, frameptr, oi, 0); + newpsr = cpu_ldl_le_mmu(env, frameptr + 4, oi, 0); /* Consistency checks on new IPSR */ newpsr_exc = newpsr & XPSR_EXCP; diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h index f7d043865b..f90c187888 100644 --- a/target/hexagon/cpu.h +++ b/target/hexagon/cpu.h @@ -141,6 +141,15 @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, target_ulong *pc, #endif } +static inline int cpu_mmu_index(CPUHexagonState *env, bool ifetch) +{ +#ifdef CONFIG_USER_ONLY + return MMU_USER_IDX; +#else +#error System mode not supported on Hexagon yet +#endif +} + typedef struct CPUHexagonState CPUArchState; typedef HexagonCPU ArchCPU; diff --git a/target/i386/tcg/mem_helper.c b/target/i386/tcg/mem_helper.c index 0fd696f9c1..a207e624cb 100644 --- a/target/i386/tcg/mem_helper.c +++ b/target/i386/tcg/mem_helper.c @@ -136,7 +136,7 @@ void helper_cmpxchg16b(CPUX86State *env, target_ulong a0) Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]); int mem_idx = cpu_mmu_index(env, false); - MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + MemOpIdx oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx); Int128 oldv = cpu_atomic_cmpxchgo_le_mmu(env, a0, cmpv, newv, oi, ra); if (int128_eq(oldv, cmpv)) { diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c index c1bf73b6f9..cfbc987ba6 100644 --- a/target/m68k/op_helper.c +++ b/target/m68k/op_helper.c @@ -22,7 +22,6 @@ #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "semihosting/semihost.h" -#include "tcg/tcg.h" #if !defined(CONFIG_USER_ONLY) diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c index 167d9a591c..e40c1b7057 100644 --- a/target/mips/tcg/msa_helper.c +++ b/target/mips/tcg/msa_helper.c @@ -8218,178 +8218,86 @@ void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, #define MEMOP_IDX(DF) #endif +#ifdef TARGET_WORDS_BIGENDIAN +static inline uint64_t bswap16x4(uint64_t x) +{ + uint64_t m = 0x00ff00ff00ff00ffull; + return ((x & m) << 8) | ((x >> 8) & m); +} + +static inline uint64_t bswap32x2(uint64_t x) +{ + return ror64(bswap64(x), 32); +} +#endif + void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - MEMOP_IDX(DF_BYTE) -#if !defined(CONFIG_USER_ONLY) -#if !defined(HOST_WORDS_BIGENDIAN) - pwd->b[0] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC()); - pwd->b[1] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC()); - pwd->b[2] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC()); - pwd->b[3] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC()); - pwd->b[4] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC()); - pwd->b[5] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC()); - pwd->b[6] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC()); - pwd->b[7] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC()); - pwd->b[8] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC()); - pwd->b[9] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC()); - pwd->b[10] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC()); - pwd->b[11] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC()); - pwd->b[12] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC()); - pwd->b[13] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC()); - pwd->b[14] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC()); - pwd->b[15] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC()); -#else - pwd->b[0] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC()); - pwd->b[1] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC()); - pwd->b[2] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC()); - pwd->b[3] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC()); - pwd->b[4] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC()); - pwd->b[5] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC()); - pwd->b[6] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC()); - pwd->b[7] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC()); - pwd->b[8] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC()); - pwd->b[9] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC()); - pwd->b[10] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC()); - pwd->b[11] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC()); - pwd->b[12] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC()); - pwd->b[13] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC()); - pwd->b[14] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC()); - pwd->b[15] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC()); -#endif -#else -#if !defined(HOST_WORDS_BIGENDIAN) - pwd->b[0] = cpu_ldub_data(env, addr + (0 << DF_BYTE)); - pwd->b[1] = cpu_ldub_data(env, addr + (1 << DF_BYTE)); - pwd->b[2] = cpu_ldub_data(env, addr + (2 << DF_BYTE)); - pwd->b[3] = cpu_ldub_data(env, addr + (3 << DF_BYTE)); - pwd->b[4] = cpu_ldub_data(env, addr + (4 << DF_BYTE)); - pwd->b[5] = cpu_ldub_data(env, addr + (5 << DF_BYTE)); - pwd->b[6] = cpu_ldub_data(env, addr + (6 << DF_BYTE)); - pwd->b[7] = cpu_ldub_data(env, addr + (7 << DF_BYTE)); - pwd->b[8] = cpu_ldub_data(env, addr + (8 << DF_BYTE)); - pwd->b[9] = cpu_ldub_data(env, addr + (9 << DF_BYTE)); - pwd->b[10] = cpu_ldub_data(env, addr + (10 << DF_BYTE)); - pwd->b[11] = cpu_ldub_data(env, addr + (11 << DF_BYTE)); - pwd->b[12] = cpu_ldub_data(env, addr + (12 << DF_BYTE)); - pwd->b[13] = cpu_ldub_data(env, addr + (13 << DF_BYTE)); - pwd->b[14] = cpu_ldub_data(env, addr + (14 << DF_BYTE)); - pwd->b[15] = cpu_ldub_data(env, addr + (15 << DF_BYTE)); -#else - pwd->b[0] = cpu_ldub_data(env, addr + (7 << DF_BYTE)); - pwd->b[1] = cpu_ldub_data(env, addr + (6 << DF_BYTE)); - pwd->b[2] = cpu_ldub_data(env, addr + (5 << DF_BYTE)); - pwd->b[3] = cpu_ldub_data(env, addr + (4 << DF_BYTE)); - pwd->b[4] = cpu_ldub_data(env, addr + (3 << DF_BYTE)); - pwd->b[5] = cpu_ldub_data(env, addr + (2 << DF_BYTE)); - pwd->b[6] = cpu_ldub_data(env, addr + (1 << DF_BYTE)); - pwd->b[7] = cpu_ldub_data(env, addr + (0 << DF_BYTE)); - pwd->b[8] = cpu_ldub_data(env, addr + (15 << DF_BYTE)); - pwd->b[9] = cpu_ldub_data(env, addr + (14 << DF_BYTE)); - pwd->b[10] = cpu_ldub_data(env, addr + (13 << DF_BYTE)); - pwd->b[11] = cpu_ldub_data(env, addr + (12 << DF_BYTE)); - pwd->b[12] = cpu_ldub_data(env, addr + (11 << DF_BYTE)); - pwd->b[13] = cpu_ldub_data(env, addr + (10 << DF_BYTE)); - pwd->b[14] = cpu_ldub_data(env, addr + (9 << DF_BYTE)); - pwd->b[15] = cpu_ldub_data(env, addr + (8 << DF_BYTE)); -#endif -#endif + uintptr_t ra = GETPC(); + uint64_t d0, d1; + + /* Load 8 bytes at a time. Vector element ordering makes this LE. */ + d0 = cpu_ldq_le_data_ra(env, addr + 0, ra); + d1 = cpu_ldq_le_data_ra(env, addr + 8, ra); + pwd->d[0] = d0; + pwd->d[1] = d1; } void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - MEMOP_IDX(DF_HALF) -#if !defined(CONFIG_USER_ONLY) -#if !defined(HOST_WORDS_BIGENDIAN) - pwd->h[0] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC()); - pwd->h[1] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC()); - pwd->h[2] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC()); - pwd->h[3] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC()); - pwd->h[4] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC()); - pwd->h[5] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC()); - pwd->h[6] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC()); - pwd->h[7] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC()); -#else - pwd->h[0] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC()); - pwd->h[1] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC()); - pwd->h[2] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC()); - pwd->h[3] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC()); - pwd->h[4] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC()); - pwd->h[5] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC()); - pwd->h[6] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC()); - pwd->h[7] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC()); -#endif -#else -#if !defined(HOST_WORDS_BIGENDIAN) - pwd->h[0] = cpu_lduw_data(env, addr + (0 << DF_HALF)); - pwd->h[1] = cpu_lduw_data(env, addr + (1 << DF_HALF)); - pwd->h[2] = cpu_lduw_data(env, addr + (2 << DF_HALF)); - pwd->h[3] = cpu_lduw_data(env, addr + (3 << DF_HALF)); - pwd->h[4] = cpu_lduw_data(env, addr + (4 << DF_HALF)); - pwd->h[5] = cpu_lduw_data(env, addr + (5 << DF_HALF)); - pwd->h[6] = cpu_lduw_data(env, addr + (6 << DF_HALF)); - pwd->h[7] = cpu_lduw_data(env, addr + (7 << DF_HALF)); -#else - pwd->h[0] = cpu_lduw_data(env, addr + (3 << DF_HALF)); - pwd->h[1] = cpu_lduw_data(env, addr + (2 << DF_HALF)); - pwd->h[2] = cpu_lduw_data(env, addr + (1 << DF_HALF)); - pwd->h[3] = cpu_lduw_data(env, addr + (0 << DF_HALF)); - pwd->h[4] = cpu_lduw_data(env, addr + (7 << DF_HALF)); - pwd->h[5] = cpu_lduw_data(env, addr + (6 << DF_HALF)); - pwd->h[6] = cpu_lduw_data(env, addr + (5 << DF_HALF)); - pwd->h[7] = cpu_lduw_data(env, addr + (4 << DF_HALF)); -#endif + uintptr_t ra = GETPC(); + uint64_t d0, d1; + + /* + * Load 8 bytes at a time. Use little-endian load, then for + * big-endian target, we must then swap the four halfwords. + */ + d0 = cpu_ldq_le_data_ra(env, addr + 0, ra); + d1 = cpu_ldq_le_data_ra(env, addr + 8, ra); +#ifdef TARGET_WORDS_BIGENDIAN + d0 = bswap16x4(d0); + d1 = bswap16x4(d1); #endif + pwd->d[0] = d0; + pwd->d[1] = d1; } void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - MEMOP_IDX(DF_WORD) -#if !defined(CONFIG_USER_ONLY) -#if !defined(HOST_WORDS_BIGENDIAN) - pwd->w[0] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC()); - pwd->w[1] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC()); - pwd->w[2] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC()); - pwd->w[3] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC()); -#else - pwd->w[0] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC()); - pwd->w[1] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC()); - pwd->w[2] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC()); - pwd->w[3] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC()); -#endif -#else -#if !defined(HOST_WORDS_BIGENDIAN) - pwd->w[0] = cpu_ldl_data(env, addr + (0 << DF_WORD)); - pwd->w[1] = cpu_ldl_data(env, addr + (1 << DF_WORD)); - pwd->w[2] = cpu_ldl_data(env, addr + (2 << DF_WORD)); - pwd->w[3] = cpu_ldl_data(env, addr + (3 << DF_WORD)); -#else - pwd->w[0] = cpu_ldl_data(env, addr + (1 << DF_WORD)); - pwd->w[1] = cpu_ldl_data(env, addr + (0 << DF_WORD)); - pwd->w[2] = cpu_ldl_data(env, addr + (3 << DF_WORD)); - pwd->w[3] = cpu_ldl_data(env, addr + (2 << DF_WORD)); -#endif + uintptr_t ra = GETPC(); + uint64_t d0, d1; + + /* + * Load 8 bytes at a time. Use little-endian load, then for + * big-endian target, we must then bswap the two words. + */ + d0 = cpu_ldq_le_data_ra(env, addr + 0, ra); + d1 = cpu_ldq_le_data_ra(env, addr + 8, ra); +#ifdef TARGET_WORDS_BIGENDIAN + d0 = bswap32x2(d0); + d1 = bswap32x2(d1); #endif + pwd->d[0] = d0; + pwd->d[1] = d1; } void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - MEMOP_IDX(DF_DOUBLE) -#if !defined(CONFIG_USER_ONLY) - pwd->d[0] = helper_ret_ldq_mmu(env, addr + (0 << DF_DOUBLE), oi, GETPC()); - pwd->d[1] = helper_ret_ldq_mmu(env, addr + (1 << DF_DOUBLE), oi, GETPC()); -#else - pwd->d[0] = cpu_ldq_data(env, addr + (0 << DF_DOUBLE)); - pwd->d[1] = cpu_ldq_data(env, addr + (1 << DF_DOUBLE)); -#endif + uintptr_t ra = GETPC(); + uint64_t d0, d1; + + d0 = cpu_ldq_data_ra(env, addr + 0, ra); + d1 = cpu_ldq_data_ra(env, addr + 8, ra); + pwd->d[0] = d0; + pwd->d[1] = d1; } #define MSA_PAGESPAN(x) \ @@ -8415,82 +8323,13 @@ void helper_msa_st_b(CPUMIPSState *env, uint32_t wd, { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); int mmu_idx = cpu_mmu_index(env, false); + uintptr_t ra = GETPC(); - MEMOP_IDX(DF_BYTE) - ensure_writable_pages(env, addr, mmu_idx, GETPC()); -#if !defined(CONFIG_USER_ONLY) -#if !defined(HOST_WORDS_BIGENDIAN) - helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[0], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[1], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[2], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[3], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[4], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[5], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[6], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[7], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[8], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[9], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[10], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[11], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[12], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[13], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[14], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[15], oi, GETPC()); -#else - helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[0], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[1], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[2], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[3], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[4], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[5], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[6], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[7], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[8], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[9], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[10], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[11], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[12], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[13], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[14], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[15], oi, GETPC()); -#endif -#else -#if !defined(HOST_WORDS_BIGENDIAN) - cpu_stb_data(env, addr + (0 << DF_BYTE), pwd->b[0]); - cpu_stb_data(env, addr + (1 << DF_BYTE), pwd->b[1]); - cpu_stb_data(env, addr + (2 << DF_BYTE), pwd->b[2]); - cpu_stb_data(env, addr + (3 << DF_BYTE), pwd->b[3]); - cpu_stb_data(env, addr + (4 << DF_BYTE), pwd->b[4]); - cpu_stb_data(env, addr + (5 << DF_BYTE), pwd->b[5]); - cpu_stb_data(env, addr + (6 << DF_BYTE), pwd->b[6]); - cpu_stb_data(env, addr + (7 << DF_BYTE), pwd->b[7]); - cpu_stb_data(env, addr + (8 << DF_BYTE), pwd->b[8]); - cpu_stb_data(env, addr + (9 << DF_BYTE), pwd->b[9]); - cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[10]); - cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[11]); - cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[12]); - cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[13]); - cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[14]); - cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[15]); -#else - cpu_stb_data(env, addr + (7 << DF_BYTE), pwd->b[0]); - cpu_stb_data(env, addr + (6 << DF_BYTE), pwd->b[1]); - cpu_stb_data(env, addr + (5 << DF_BYTE), pwd->b[2]); - cpu_stb_data(env, addr + (4 << DF_BYTE), pwd->b[3]); - cpu_stb_data(env, addr + (3 << DF_BYTE), pwd->b[4]); - cpu_stb_data(env, addr + (2 << DF_BYTE), pwd->b[5]); - cpu_stb_data(env, addr + (1 << DF_BYTE), pwd->b[6]); - cpu_stb_data(env, addr + (0 << DF_BYTE), pwd->b[7]); - cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[8]); - cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[9]); - cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[10]); - cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[11]); - cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[12]); - cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[13]); - cpu_stb_data(env, addr + (9 << DF_BYTE), pwd->b[14]); - cpu_stb_data(env, addr + (8 << DF_BYTE), pwd->b[15]); -#endif -#endif + ensure_writable_pages(env, addr, mmu_idx, ra); + + /* Store 8 bytes at a time. Vector element ordering makes this LE. */ + cpu_stq_le_data_ra(env, addr + 0, pwd->d[0], ra); + cpu_stq_le_data_ra(env, addr + 0, pwd->d[1], ra); } void helper_msa_st_h(CPUMIPSState *env, uint32_t wd, @@ -8498,50 +8337,20 @@ void helper_msa_st_h(CPUMIPSState *env, uint32_t wd, { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); int mmu_idx = cpu_mmu_index(env, false); + uintptr_t ra = GETPC(); + uint64_t d0, d1; - MEMOP_IDX(DF_HALF) - ensure_writable_pages(env, addr, mmu_idx, GETPC()); -#if !defined(CONFIG_USER_ONLY) -#if !defined(HOST_WORDS_BIGENDIAN) - helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[0], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[1], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[2], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[3], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[4], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[5], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[6], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[7], oi, GETPC()); -#else - helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[0], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[1], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[2], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[3], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[4], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[5], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[6], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[7], oi, GETPC()); -#endif -#else -#if !defined(HOST_WORDS_BIGENDIAN) - cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[0]); - cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[1]); - cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[2]); - cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[3]); - cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[4]); - cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[5]); - cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[6]); - cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[7]); -#else - cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[0]); - cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[1]); - cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[2]); - cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[3]); - cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[4]); - cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[5]); - cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[6]); - cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[7]); -#endif + ensure_writable_pages(env, addr, mmu_idx, ra); + + /* Store 8 bytes at a time. See helper_msa_ld_h. */ + d0 = pwd->d[0]; + d1 = pwd->d[1]; +#ifdef TARGET_WORDS_BIGENDIAN + d0 = bswap16x4(d0); + d1 = bswap16x4(d1); #endif + cpu_stq_le_data_ra(env, addr + 0, d0, ra); + cpu_stq_le_data_ra(env, addr + 8, d1, ra); } void helper_msa_st_w(CPUMIPSState *env, uint32_t wd, @@ -8549,34 +8358,20 @@ void helper_msa_st_w(CPUMIPSState *env, uint32_t wd, { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); int mmu_idx = cpu_mmu_index(env, false); + uintptr_t ra = GETPC(); + uint64_t d0, d1; - MEMOP_IDX(DF_WORD) - ensure_writable_pages(env, addr, mmu_idx, GETPC()); -#if !defined(CONFIG_USER_ONLY) -#if !defined(HOST_WORDS_BIGENDIAN) - helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[0], oi, GETPC()); - helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[1], oi, GETPC()); - helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[2], oi, GETPC()); - helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[3], oi, GETPC()); -#else - helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[0], oi, GETPC()); - helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[1], oi, GETPC()); - helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[2], oi, GETPC()); - helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[3], oi, GETPC()); -#endif -#else -#if !defined(HOST_WORDS_BIGENDIAN) - cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[0]); - cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[1]); - cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[2]); - cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[3]); -#else - cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[0]); - cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[1]); - cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[2]); - cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[3]); -#endif + ensure_writable_pages(env, addr, mmu_idx, ra); + + /* Store 8 bytes at a time. See helper_msa_ld_w. */ + d0 = pwd->d[0]; + d1 = pwd->d[1]; +#ifdef TARGET_WORDS_BIGENDIAN + d0 = bswap32x2(d0); + d1 = bswap32x2(d1); #endif + cpu_stq_le_data_ra(env, addr + 0, d0, ra); + cpu_stq_le_data_ra(env, addr + 8, d1, ra); } void helper_msa_st_d(CPUMIPSState *env, uint32_t wd, @@ -8584,14 +8379,10 @@ void helper_msa_st_d(CPUMIPSState *env, uint32_t wd, { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); int mmu_idx = cpu_mmu_index(env, false); + uintptr_t ra = GETPC(); - MEMOP_IDX(DF_DOUBLE) ensure_writable_pages(env, addr, mmu_idx, GETPC()); -#if !defined(CONFIG_USER_ONLY) - helper_ret_stq_mmu(env, addr + (0 << DF_DOUBLE), pwd->d[0], oi, GETPC()); - helper_ret_stq_mmu(env, addr + (1 << DF_DOUBLE), pwd->d[1], oi, GETPC()); -#else - cpu_stq_data(env, addr + (0 << DF_DOUBLE), pwd->d[0]); - cpu_stq_data(env, addr + (1 << DF_DOUBLE), pwd->d[1]); -#endif + + cpu_stq_data_ra(env, addr + 0, pwd->d[0], ra); + cpu_stq_data_ra(env, addr + 8, pwd->d[1], ra); } diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c index e2282baa8d..39945d9ea5 100644 --- a/target/ppc/mem_helper.c +++ b/target/ppc/mem_helper.c @@ -25,7 +25,6 @@ #include "exec/helper-proto.h" #include "helper_regs.h" #include "exec/cpu_ldst.h" -#include "tcg/tcg.h" #include "internal.h" #include "qemu/atomic128.h" diff --git a/target/ppc/translate.c b/target/ppc/translate.c index b985e9e55b..9ca78ee156 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -3462,10 +3462,12 @@ static void gen_std(DisasContext *ctx) if (HAVE_ATOMIC128) { TCGv_i32 oi = tcg_temp_new_i32(); if (ctx->le_mode) { - tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx)); + tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128, + ctx->mem_idx)); gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi); } else { - tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx)); + tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128, + ctx->mem_idx)); gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi); } tcg_temp_free_i32(oi); @@ -4067,11 +4069,11 @@ static void gen_lqarx(DisasContext *ctx) if (HAVE_ATOMIC128) { TCGv_i32 oi = tcg_temp_new_i32(); if (ctx->le_mode) { - tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ | MO_ALIGN_16, + tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128 | MO_ALIGN, ctx->mem_idx)); gen_helper_lq_le_parallel(lo, cpu_env, EA, oi); } else { - tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ | MO_ALIGN_16, + tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128 | MO_ALIGN, ctx->mem_idx)); gen_helper_lq_be_parallel(lo, cpu_env, EA, oi); } @@ -4122,7 +4124,7 @@ static void gen_stqcx_(DisasContext *ctx) if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { if (HAVE_CMPXCHG128) { - TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_Q) | MO_ALIGN_16); + TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_128) | MO_ALIGN); if (ctx->le_mode) { gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env, EA, lo, hi, oi); diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c index 75f6735545..17e3f83641 100644 --- a/target/s390x/tcg/mem_helper.c +++ b/target/s390x/tcg/mem_helper.c @@ -27,7 +27,6 @@ #include "exec/cpu_ldst.h" #include "qemu/int128.h" #include "qemu/atomic128.h" -#include "tcg/tcg.h" #include "trace.h" #if !defined(CONFIG_USER_ONLY) @@ -250,13 +249,13 @@ static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr, * page. This is especially relevant to speed up TLB_NOTDIRTY. */ g_assert(size > 0); - helper_ret_stb_mmu(env, vaddr, byte, oi, ra); + cpu_stb_mmu(env, vaddr, byte, oi, ra); haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx); if (likely(haddr)) { memset(haddr + 1, byte, size - 1); } else { for (i = 1; i < size; i++) { - helper_ret_stb_mmu(env, vaddr + i, byte, oi, ra); + cpu_stb_mmu(env, vaddr + i, byte, oi, ra); } } } @@ -292,7 +291,7 @@ static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr, * Do a single access and test if we can then get access to the * page. This is especially relevant to speed up TLB_NOTDIRTY. */ - byte = helper_ret_ldub_mmu(env, vaddr + offset, oi, ra); + byte = cpu_ldb_mmu(env, vaddr + offset, oi, ra); *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx); return byte; #endif @@ -326,7 +325,7 @@ static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr, * Do a single access and test if we can then get access to the * page. This is especially relevant to speed up TLB_NOTDIRTY. */ - helper_ret_stb_mmu(env, vaddr + offset, byte, oi, ra); + cpu_stb_mmu(env, vaddr + offset, byte, oi, ra); *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx); #endif } @@ -1811,7 +1810,7 @@ void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr, assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); - oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx); oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); fail = !int128_eq(oldv, cmpv); @@ -1940,7 +1939,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra); cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra); } else if (HAVE_CMPXCHG128) { - MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + MemOpIdx oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx); ov = cpu_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra); cc = !int128_eq(ov, cv); } else { diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c index abe2889d27..bbf3601cb1 100644 --- a/target/sparc/ldst_helper.c +++ b/target/sparc/ldst_helper.c @@ -1333,27 +1333,27 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, oi = make_memop_idx(memop, idx); switch (size) { case 1: - ret = helper_ret_ldub_mmu(env, addr, oi, GETPC()); + ret = cpu_ldb_mmu(env, addr, oi, GETPC()); break; case 2: if (asi & 8) { - ret = helper_le_lduw_mmu(env, addr, oi, GETPC()); + ret = cpu_ldw_le_mmu(env, addr, oi, GETPC()); } else { - ret = helper_be_lduw_mmu(env, addr, oi, GETPC()); + ret = cpu_ldw_be_mmu(env, addr, oi, GETPC()); } break; case 4: if (asi & 8) { - ret = helper_le_ldul_mmu(env, addr, oi, GETPC()); + ret = cpu_ldl_le_mmu(env, addr, oi, GETPC()); } else { - ret = helper_be_ldul_mmu(env, addr, oi, GETPC()); + ret = cpu_ldl_be_mmu(env, addr, oi, GETPC()); } break; case 8: if (asi & 8) { - ret = helper_le_ldq_mmu(env, addr, oi, GETPC()); + ret = cpu_ldq_le_mmu(env, addr, oi, GETPC()); } else { - ret = helper_be_ldq_mmu(env, addr, oi, GETPC()); + ret = cpu_ldq_be_mmu(env, addr, oi, GETPC()); } break; default: |