diff options
author | Tony Nguyen <tony.nguyen@bt.com> | 2019-08-24 04:10:58 +1000 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2019-09-03 08:30:38 -0700 |
commit | 14776ab5a12972ea439c7fb2203a4c15a09094b4 (patch) | |
tree | b53091625b410a722bf5f4e17a9631457994eed4 /tcg | |
parent | fec105c2abda8567ec15230429c41429b5ee307c (diff) |
tcg: TCGMemOp is now accelerator independent MemOp
Preparation for collapsing the two byte swaps, adjust_endianness and
handle_bswap, along the I/O path.
Target dependant attributes are conditionalized upon NEED_CPU_H.
Signed-off-by: Tony Nguyen <tony.nguyen@bt.com>
Acked-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Acked-by: Cornelia Huck <cohuck@redhat.com>
Message-Id: <81d9cd7d7f5aaadfa772d6c48ecee834e9cf7882.1566466906.git.tony.nguyen@bt.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg')
-rw-r--r-- | tcg/README | 2 | ||||
-rw-r--r-- | tcg/aarch64/tcg-target.inc.c | 26 | ||||
-rw-r--r-- | tcg/arm/tcg-target.inc.c | 26 | ||||
-rw-r--r-- | tcg/i386/tcg-target.inc.c | 24 | ||||
-rw-r--r-- | tcg/mips/tcg-target.inc.c | 16 | ||||
-rw-r--r-- | tcg/optimize.c | 2 | ||||
-rw-r--r-- | tcg/ppc/tcg-target.inc.c | 12 | ||||
-rw-r--r-- | tcg/riscv/tcg-target.inc.c | 20 | ||||
-rw-r--r-- | tcg/s390/tcg-target.inc.c | 14 | ||||
-rw-r--r-- | tcg/sparc/tcg-target.inc.c | 6 | ||||
-rw-r--r-- | tcg/tcg-op.c | 38 | ||||
-rw-r--r-- | tcg/tcg-op.h | 86 | ||||
-rw-r--r-- | tcg/tcg.c | 2 | ||||
-rw-r--r-- | tcg/tcg.h | 101 |
14 files changed, 143 insertions, 232 deletions
diff --git a/tcg/README b/tcg/README index 21fcdf737f..b4382fa8b5 100644 --- a/tcg/README +++ b/tcg/README @@ -512,7 +512,7 @@ Both t0 and t1 may be split into little-endian ordered pairs of registers if dealing with 64-bit quantities on a 32-bit host. The memidx selects the qemu tlb index to use (e.g. user or kernel access). -The flags are the TCGMemOp bits, selecting the sign, width, and endianness +The flags are the MemOp bits, selecting the sign, width, and endianness of the memory access. For a 32-bit host, qemu_ld/st_i64 is guaranteed to only be used with a diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c index 0713448bf5..3f921015d3 100644 --- a/tcg/aarch64/tcg-target.inc.c +++ b/tcg/aarch64/tcg-target.inc.c @@ -1423,7 +1423,7 @@ static inline void tcg_out_rev16(TCGContext *s, TCGReg rd, TCGReg rn) tcg_out_insn(s, 3507, REV16, TCG_TYPE_I32, rd, rn); } -static inline void tcg_out_sxt(TCGContext *s, TCGType ext, TCGMemOp s_bits, +static inline void tcg_out_sxt(TCGContext *s, TCGType ext, MemOp s_bits, TCGReg rd, TCGReg rn) { /* Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 */ @@ -1431,7 +1431,7 @@ static inline void tcg_out_sxt(TCGContext *s, TCGType ext, TCGMemOp s_bits, tcg_out_sbfm(s, ext, rd, rn, 0, bits); } -static inline void tcg_out_uxt(TCGContext *s, TCGMemOp s_bits, +static inline void tcg_out_uxt(TCGContext *s, MemOp s_bits, TCGReg rd, TCGReg rn) { /* Using ALIASes UXTB, UXTH of UBFM Wd, Wn, #0, #7|15 */ @@ -1580,8 +1580,8 @@ static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target) static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); - TCGMemOp size = opc & MO_SIZE; + MemOp opc = get_memop(oi); + MemOp size = opc & MO_SIZE; if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) { return false; @@ -1605,8 +1605,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); - TCGMemOp size = opc & MO_SIZE; + MemOp opc = get_memop(oi); + MemOp size = opc & MO_SIZE; if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) { return false; @@ -1649,7 +1649,7 @@ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8); slow path for the failure case, which will be patched later when finalizing the slow path. Generated code returns the host addend in X1, clobbers X0,X2,X3,TMP. */ -static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc, +static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, tcg_insn_unit **label_ptr, int mem_index, bool is_read) { @@ -1709,11 +1709,11 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc, #endif /* CONFIG_SOFTMMU */ -static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, TCGType ext, +static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext, TCGReg data_r, TCGReg addr_r, TCGType otype, TCGReg off_r) { - const TCGMemOp bswap = memop & MO_BSWAP; + const MemOp bswap = memop & MO_BSWAP; switch (memop & MO_SSIZE) { case MO_UB: @@ -1765,11 +1765,11 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, TCGType ext, } } -static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop, +static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop, TCGReg data_r, TCGReg addr_r, TCGType otype, TCGReg off_r) { - const TCGMemOp bswap = memop & MO_BSWAP; + const MemOp bswap = memop & MO_BSWAP; switch (memop & MO_SIZE) { case MO_8: @@ -1804,7 +1804,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop, static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, TCGMemOpIdx oi, TCGType ext) { - TCGMemOp memop = get_memop(oi); + MemOp memop = get_memop(oi); const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; #ifdef CONFIG_SOFTMMU unsigned mem_index = get_mmuidx(oi); @@ -1829,7 +1829,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, TCGMemOpIdx oi) { - TCGMemOp memop = get_memop(oi); + MemOp memop = get_memop(oi); const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; #ifdef CONFIG_SOFTMMU unsigned mem_index = get_mmuidx(oi); diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c index ece88dc2eb..94d80d79d1 100644 --- a/tcg/arm/tcg-target.inc.c +++ b/tcg/arm/tcg-target.inc.c @@ -1233,7 +1233,7 @@ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, - TCGMemOp opc, int mem_index, bool is_load) + MemOp opc, int mem_index, bool is_load) { int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) : offsetof(CPUTLBEntry, addr_write)); @@ -1348,7 +1348,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg argreg, datalo, datahi; TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); void *func; if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) { @@ -1412,7 +1412,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg argreg, datalo, datahi; TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) { return false; @@ -1453,11 +1453,11 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) } #endif /* SOFTMMU */ -static inline void tcg_out_qemu_ld_index(TCGContext *s, TCGMemOp opc, +static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addend) { - TCGMemOp bswap = opc & MO_BSWAP; + MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SSIZE) { case MO_UB: @@ -1514,11 +1514,11 @@ static inline void tcg_out_qemu_ld_index(TCGContext *s, TCGMemOp opc, } } -static inline void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, +static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo) { - TCGMemOp bswap = opc & MO_BSWAP; + MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SSIZE) { case MO_UB: @@ -1577,7 +1577,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) { TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #ifdef CONFIG_SOFTMMU int mem_index; TCGReg addend; @@ -1614,11 +1614,11 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) #endif } -static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, TCGMemOp opc, +static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addend) { - TCGMemOp bswap = opc & MO_BSWAP; + MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SIZE) { case MO_8: @@ -1659,11 +1659,11 @@ static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, TCGMemOp opc, } } -static inline void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, +static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo) { - TCGMemOp bswap = opc & MO_BSWAP; + MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SIZE) { case MO_8: @@ -1708,7 +1708,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) { TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #ifdef CONFIG_SOFTMMU int mem_index; TCGReg addend; diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c index 6ddeebf4bc..9d8ed974e0 100644 --- a/tcg/i386/tcg-target.inc.c +++ b/tcg/i386/tcg-target.inc.c @@ -1697,7 +1697,7 @@ static void * const qemu_st_helpers[16] = { First argument register is clobbered. */ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, - int mem_index, TCGMemOp opc, + int mem_index, MemOp opc, tcg_insn_unit **label_ptr, int which) { const TCGReg r0 = TCG_REG_L0; @@ -1810,7 +1810,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); TCGReg data_reg; tcg_insn_unit **label_ptr = &l->label_ptr[0]; int rexw = (l->type == TCG_TYPE_I64 ? P_REXW : 0); @@ -1895,8 +1895,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; - TCGMemOp opc = get_memop(oi); - TCGMemOp s_bits = opc & MO_SIZE; + MemOp opc = get_memop(oi); + MemOp s_bits = opc & MO_SIZE; tcg_insn_unit **label_ptr = &l->label_ptr[0]; TCGReg retaddr; @@ -1995,10 +1995,10 @@ static inline int setup_guest_base_seg(void) static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, TCGReg base, int index, intptr_t ofs, - int seg, bool is64, TCGMemOp memop) + int seg, bool is64, MemOp memop) { - const TCGMemOp real_bswap = memop & MO_BSWAP; - TCGMemOp bswap = real_bswap; + const MemOp real_bswap = memop & MO_BSWAP; + MemOp bswap = real_bswap; int rexw = is64 * P_REXW; int movop = OPC_MOVL_GvEv; @@ -2103,7 +2103,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) TCGReg datalo, datahi, addrlo; TCGReg addrhi __attribute__((unused)); TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #if defined(CONFIG_SOFTMMU) int mem_index; tcg_insn_unit *label_ptr[2]; @@ -2137,15 +2137,15 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, TCGReg base, int index, intptr_t ofs, - int seg, TCGMemOp memop) + int seg, MemOp memop) { /* ??? Ideally we wouldn't need a scratch register. For user-only, we could perform the bswap twice to restore the original value instead of moving to the scratch. But as it is, the L constraint means that TCG_REG_L0 is definitely free here. */ const TCGReg scratch = TCG_REG_L0; - const TCGMemOp real_bswap = memop & MO_BSWAP; - TCGMemOp bswap = real_bswap; + const MemOp real_bswap = memop & MO_BSWAP; + MemOp bswap = real_bswap; int movop = OPC_MOVL_EvGv; if (have_movbe && real_bswap) { @@ -2221,7 +2221,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) TCGReg datalo, datahi, addrlo; TCGReg addrhi __attribute__((unused)); TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #if defined(CONFIG_SOFTMMU) int mem_index; tcg_insn_unit *label_ptr[2]; diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c index 41bff32fb4..5442167045 100644 --- a/tcg/mips/tcg-target.inc.c +++ b/tcg/mips/tcg-target.inc.c @@ -1215,7 +1215,7 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, TCGReg addrh, TCGMemOpIdx oi, tcg_insn_unit *label_ptr[2], bool is_load) { - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); unsigned s_bits = opc & MO_SIZE; unsigned a_bits = get_alignment_bits(opc); int mem_index = get_mmuidx(oi); @@ -1313,7 +1313,7 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); TCGReg v0; int i; @@ -1363,8 +1363,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; - TCGMemOp opc = get_memop(oi); - TCGMemOp s_bits = opc & MO_SIZE; + MemOp opc = get_memop(oi); + MemOp s_bits = opc & MO_SIZE; int i; /* resolve label address */ @@ -1413,7 +1413,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) #endif static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, - TCGReg base, TCGMemOp opc, bool is_64) + TCGReg base, MemOp opc, bool is_64) { switch (opc & (MO_SSIZE | MO_BSWAP)) { case MO_UB: @@ -1521,7 +1521,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[2]; #endif @@ -1558,7 +1558,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) } static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, - TCGReg base, TCGMemOp opc) + TCGReg base, MemOp opc) { /* Don't clutter the code below with checks to avoid bswapping ZERO. */ if ((lo | hi) == 0) { @@ -1624,7 +1624,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[2]; #endif diff --git a/tcg/optimize.c b/tcg/optimize.c index cee2a36a60..f7f4e873c9 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1013,7 +1013,7 @@ void tcg_optimize(TCGContext *s) CASE_OP_32_64(qemu_ld): { TCGMemOpIdx oi = op->args[nb_oargs + nb_iargs]; - TCGMemOp mop = get_memop(oi); + MemOp mop = get_memop(oi); if (!(mop & MO_SIGN)) { mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1; } diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 852b8940fb..815edac077 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -1506,7 +1506,7 @@ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768); in CR7, loads the addend of the TLB into R3, and returns the register containing the guest address (zero-extended into R4). Clobbers R0 and R2. */ -static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp opc, +static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc, TCGReg addrlo, TCGReg addrhi, int mem_index, bool is_read) { @@ -1633,7 +1633,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); TCGReg hi, lo, arg = TCG_REG_R3; if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) { @@ -1680,8 +1680,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); - TCGMemOp s_bits = opc & MO_SIZE; + MemOp opc = get_memop(oi); + MemOp s_bits = opc & MO_SIZE; TCGReg hi, lo, arg = TCG_REG_R3; if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) { @@ -1744,7 +1744,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) TCGReg datalo, datahi, addrlo, rbase; TCGReg addrhi __attribute__((unused)); TCGMemOpIdx oi; - TCGMemOp opc, s_bits; + MemOp opc, s_bits; #ifdef CONFIG_SOFTMMU int mem_index; tcg_insn_unit *label_ptr; @@ -1819,7 +1819,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) TCGReg datalo, datahi, addrlo, rbase; TCGReg addrhi __attribute__((unused)); TCGMemOpIdx oi; - TCGMemOp opc, s_bits; + MemOp opc, s_bits; #ifdef CONFIG_SOFTMMU int mem_index; tcg_insn_unit *label_ptr; diff --git a/tcg/riscv/tcg-target.inc.c b/tcg/riscv/tcg-target.inc.c index 3e76bf5738..7018509693 100644 --- a/tcg/riscv/tcg-target.inc.c +++ b/tcg/riscv/tcg-target.inc.c @@ -970,7 +970,7 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, TCGReg addrh, TCGMemOpIdx oi, tcg_insn_unit **label_ptr, bool is_load) { - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); unsigned s_bits = opc & MO_SIZE; unsigned a_bits = get_alignment_bits(opc); tcg_target_long compare_mask; @@ -1044,7 +1044,7 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); TCGReg a0 = tcg_target_call_iarg_regs[0]; TCGReg a1 = tcg_target_call_iarg_regs[1]; TCGReg a2 = tcg_target_call_iarg_regs[2]; @@ -1077,8 +1077,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; - TCGMemOp opc = get_memop(oi); - TCGMemOp s_bits = opc & MO_SIZE; + MemOp opc = get_memop(oi); + MemOp s_bits = opc & MO_SIZE; TCGReg a0 = tcg_target_call_iarg_regs[0]; TCGReg a1 = tcg_target_call_iarg_regs[1]; TCGReg a2 = tcg_target_call_iarg_regs[2]; @@ -1121,9 +1121,9 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) #endif /* CONFIG_SOFTMMU */ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, - TCGReg base, TCGMemOp opc, bool is_64) + TCGReg base, MemOp opc, bool is_64) { - const TCGMemOp bswap = opc & MO_BSWAP; + const MemOp bswap = opc & MO_BSWAP; /* We don't yet handle byteswapping, assert */ g_assert(!bswap); @@ -1172,7 +1172,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[1]; #endif @@ -1208,9 +1208,9 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) } static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, - TCGReg base, TCGMemOp opc) + TCGReg base, MemOp opc) { - const TCGMemOp bswap = opc & MO_BSWAP; + const MemOp bswap = opc & MO_BSWAP; /* We don't yet handle byteswapping, assert */ g_assert(!bswap); @@ -1243,7 +1243,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[1]; #endif diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c index fe42939d98..8aaa4cebe8 100644 --- a/tcg/s390/tcg-target.inc.c +++ b/tcg/s390/tcg-target.inc.c @@ -1430,7 +1430,7 @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest) } } -static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data, +static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data, TCGReg base, TCGReg index, int disp) { switch (opc & (MO_SSIZE | MO_BSWAP)) { @@ -1489,7 +1489,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data, } } -static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data, +static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data, TCGReg base, TCGReg index, int disp) { switch (opc & (MO_SIZE | MO_BSWAP)) { @@ -1544,7 +1544,7 @@ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19)); /* Load and compare a TLB entry, leaving the flags set. Loads the TLB addend into R2. Returns a register with the santitized guest address. */ -static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc, +static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, int mem_index, bool is_ld) { unsigned s_bits = opc & MO_SIZE; @@ -1614,7 +1614,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) TCGReg addr_reg = lb->addrlo_reg; TCGReg data_reg = lb->datalo_reg; TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, 2)) { @@ -1639,7 +1639,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) TCGReg addr_reg = lb->addrlo_reg; TCGReg data_reg = lb->datalo_reg; TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, 2)) { @@ -1694,7 +1694,7 @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, TCGMemOpIdx oi) { - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned mem_index = get_mmuidx(oi); tcg_insn_unit *label_ptr; @@ -1721,7 +1721,7 @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, TCGMemOpIdx oi) { - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned mem_index = get_mmuidx(oi); tcg_insn_unit *label_ptr; diff --git a/tcg/sparc/tcg-target.inc.c b/tcg/sparc/tcg-target.inc.c index 10b1cea63b..d7986cda5c 100644 --- a/tcg/sparc/tcg-target.inc.c +++ b/tcg/sparc/tcg-target.inc.c @@ -1081,7 +1081,7 @@ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12)); is in the returned register, maybe %o0. The TLB addend is in %o1. */ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index, - TCGMemOp opc, int which) + MemOp opc, int which) { int fast_off = TLB_MASK_TABLE_OFS(mem_index); int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); @@ -1164,7 +1164,7 @@ static const int qemu_st_opc[16] = { static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, TCGMemOpIdx oi, bool is_64) { - TCGMemOp memop = get_memop(oi); + MemOp memop = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned memi = get_mmuidx(oi); TCGReg addrz, param; @@ -1246,7 +1246,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, TCGMemOpIdx oi) { - TCGMemOp memop = get_memop(oi); + MemOp memop = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned memi = get_mmuidx(oi); TCGReg addrz, param; diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index 587d092238..e87c327fbf 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -2714,7 +2714,7 @@ void tcg_gen_lookup_and_goto_ptr(void) } } -static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st) +static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) { /* Trigger the asserts within as early as possible. */ (void)get_alignment_bits(op); @@ -2743,7 +2743,7 @@ static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st) } static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr, - TCGMemOp memop, TCGArg idx) + MemOp memop, TCGArg idx) { TCGMemOpIdx oi = make_memop_idx(memop, idx); #if TARGET_LONG_BITS == 32 @@ -2758,7 +2758,7 @@ static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr, } static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr, - TCGMemOp memop, TCGArg idx) + MemOp memop, TCGArg idx) { TCGMemOpIdx oi = make_memop_idx(memop, idx); #if TARGET_LONG_BITS == 32 @@ -2788,9 +2788,9 @@ static void tcg_gen_req_mo(TCGBar type) } } -void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) +void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { - TCGMemOp orig_memop; + MemOp orig_memop; tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 0, 0); @@ -2825,7 +2825,7 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) } } -void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) +void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { TCGv_i32 swap = NULL; @@ -2858,9 +2858,9 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) } } -void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) +void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { - TCGMemOp orig_memop; + MemOp orig_memop; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop); @@ -2911,7 +2911,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) } } -void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) +void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { TCGv_i64 swap = NULL; @@ -2953,7 +2953,7 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) } } -static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, TCGMemOp opc) +static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc) { switch (opc & MO_SSIZE) { case MO_SB: @@ -2974,7 +2974,7 @@ static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, TCGMemOp opc) } } -static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, TCGMemOp opc) +static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc) { switch (opc & MO_SSIZE) { case MO_SB: @@ -3034,7 +3034,7 @@ static void * const table_cmpxchg[16] = { }; void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, - TCGv_i32 newv, TCGArg idx, TCGMemOp memop) + TCGv_i32 newv, TCGArg idx, MemOp memop) { memop = tcg_canonicalize_memop(memop, 0, 0); @@ -3078,7 +3078,7 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, } void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, - TCGv_i64 newv, TCGArg idx, TCGMemOp memop) + TCGv_i64 newv, TCGArg idx, MemOp memop) { memop = tcg_canonicalize_memop(memop, 1, 0); @@ -3142,7 +3142,7 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, } static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, - TCGArg idx, TCGMemOp memop, bool new_val, + TCGArg idx, MemOp memop, bool new_val, void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32)) { TCGv_i32 t1 = tcg_temp_new_i32(); @@ -3160,7 +3160,7 @@ static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, } static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, - TCGArg idx, TCGMemOp memop, void * const table[]) + TCGArg idx, MemOp memop, void * const table[]) { gen_atomic_op_i32 gen; @@ -3185,7 +3185,7 @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, } static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, - TCGArg idx, TCGMemOp memop, bool new_val, + TCGArg idx, MemOp memop, bool new_val, void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64)) { TCGv_i64 t1 = tcg_temp_new_i64(); @@ -3203,7 +3203,7 @@ static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, } static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, - TCGArg idx, TCGMemOp memop, void * const table[]) + TCGArg idx, MemOp memop, void * const table[]) { memop = tcg_canonicalize_memop(memop, 1, 0); @@ -3257,7 +3257,7 @@ static void * const table_##NAME[16] = { \ WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \ }; \ void tcg_gen_atomic_##NAME##_i32 \ - (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \ + (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop) \ { \ if (tcg_ctx->tb_cflags & CF_PARALLEL) { \ do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \ @@ -3267,7 +3267,7 @@ void tcg_gen_atomic_##NAME##_i32 \ } \ } \ void tcg_gen_atomic_##NAME##_i64 \ - (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \ + (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop) \ { \ if (tcg_ctx->tb_cflags & CF_PARALLEL) { \ do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \ diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h index 2d4dd5cd7d..e9cf172762 100644 --- a/tcg/tcg-op.h +++ b/tcg/tcg-op.h @@ -851,10 +851,10 @@ void tcg_gen_lookup_and_goto_ptr(void); #define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64 #endif -void tcg_gen_qemu_ld_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp); -void tcg_gen_qemu_st_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp); -void tcg_gen_qemu_ld_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp); -void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp); +void tcg_gen_qemu_ld_i32(TCGv_i32, TCGv, TCGArg, MemOp); +void tcg_gen_qemu_st_i32(TCGv_i32, TCGv, TCGArg, MemOp); +void tcg_gen_qemu_ld_i64(TCGv_i64, TCGv, TCGArg, MemOp); +void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, MemOp); static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index) { @@ -912,46 +912,46 @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index) } void tcg_gen_atomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32, - TCGArg, TCGMemOp); + TCGArg, MemOp); void tcg_gen_atomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64, - TCGArg, TCGMemOp); - -void tcg_gen_atomic_xchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_xchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); - -void tcg_gen_atomic_fetch_add_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_add_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_and_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_and_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_or_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_or_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_xor_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_xor_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_smin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_smin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_umin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_umin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_smax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_smax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_umax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_umax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); - -void tcg_gen_atomic_add_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_add_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_and_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_and_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_or_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_or_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_xor_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_smin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_smin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_umin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_umin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_smax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_smax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_umax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_umax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); + TCGArg, MemOp); + +void tcg_gen_atomic_xchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_xchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); + +void tcg_gen_atomic_fetch_add_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_add_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_and_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_and_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_or_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_or_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_xor_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_xor_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_smin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_smin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_umin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_umin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_smax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_smax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_umax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_umax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); + +void tcg_gen_atomic_add_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_add_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_and_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_and_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_or_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_or_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_xor_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_smin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_smin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_umin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_umin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_smax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_smax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_umax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_umax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_mov_vec(TCGv_vec, TCGv_vec); void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec, TCGv_i32); @@ -2055,7 +2055,7 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) case INDEX_op_qemu_st_i64: { TCGMemOpIdx oi = op->args[k++]; - TCGMemOp op = get_memop(oi); + MemOp op = get_memop(oi); unsigned ix = get_mmuidx(oi); if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) { @@ -26,6 +26,7 @@ #define TCG_H #include "cpu.h" +#include "exec/memop.h" #include "exec/tb-context.h" #include "qemu/bitops.h" #include "qemu/queue.h" @@ -309,103 +310,13 @@ typedef enum TCGType { #endif } TCGType; -/* Constants for qemu_ld and qemu_st for the Memory Operation field. */ -typedef enum TCGMemOp { - MO_8 = 0, - MO_16 = 1, - MO_32 = 2, - MO_64 = 3, - MO_SIZE = 3, /* Mask for the above. */ - - MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */ - - MO_BSWAP = 8, /* Host reverse endian. */ -#ifdef HOST_WORDS_BIGENDIAN - MO_LE = MO_BSWAP, - MO_BE = 0, -#else - MO_LE = 0, - MO_BE = MO_BSWAP, -#endif -#ifdef TARGET_WORDS_BIGENDIAN - MO_TE = MO_BE, -#else - MO_TE = MO_LE, -#endif - - /* - * MO_UNALN accesses are never checked for alignment. - * MO_ALIGN accesses will result in a call to the CPU's - * do_unaligned_access hook if the guest address is not aligned. - * The default depends on whether the target CPU defines - * TARGET_ALIGNED_ONLY. - * - * Some architectures (e.g. ARMv8) need the address which is aligned - * to a size more than the size of the memory access. - * Some architectures (e.g. SPARCv9) need an address which is aligned, - * but less strictly than the natural alignment. - * - * MO_ALIGN supposes the alignment size is the size of a memory access. - * - * There are three options: - * - unaligned access permitted (MO_UNALN). - * - an alignment to the size of an access (MO_ALIGN); - * - an alignment to a specified size, which may be more or less than - * the access size (MO_ALIGN_x where 'x' is a size in bytes); - */ - MO_ASHIFT = 4, - MO_AMASK = 7 << MO_ASHIFT, -#ifdef TARGET_ALIGNED_ONLY - MO_ALIGN = 0, - MO_UNALN = MO_AMASK, -#else - MO_ALIGN = MO_AMASK, - MO_UNALN = 0, -#endif - MO_ALIGN_2 = 1 << MO_ASHIFT, - MO_ALIGN_4 = 2 << MO_ASHIFT, - MO_ALIGN_8 = 3 << MO_ASHIFT, - MO_ALIGN_16 = 4 << MO_ASHIFT, - MO_ALIGN_32 = 5 << MO_ASHIFT, - MO_ALIGN_64 = 6 << MO_ASHIFT, - - /* Combinations of the above, for ease of use. */ - MO_UB = MO_8, - MO_UW = MO_16, - MO_UL = MO_32, - MO_SB = MO_SIGN | MO_8, - MO_SW = MO_SIGN | MO_16, - MO_SL = MO_SIGN | MO_32, - MO_Q = MO_64, - - MO_LEUW = MO_LE | MO_UW, - MO_LEUL = MO_LE | MO_UL, - MO_LESW = MO_LE | MO_SW, - MO_LESL = MO_LE | MO_SL, - MO_LEQ = MO_LE | MO_Q, - - MO_BEUW = MO_BE | MO_UW, - MO_BEUL = MO_BE | MO_UL, - MO_BESW = MO_BE | MO_SW, - MO_BESL = MO_BE | MO_SL, - MO_BEQ = MO_BE | MO_Q, - - MO_TEUW = MO_TE | MO_UW, - MO_TEUL = MO_TE | MO_UL, - MO_TESW = MO_TE | MO_SW, - MO_TESL = MO_TE | MO_SL, - MO_TEQ = MO_TE | MO_Q, - - MO_SSIZE = MO_SIZE | MO_SIGN, -} TCGMemOp; - /** * get_alignment_bits - * @memop: TCGMemOp value + * @memop: MemOp value * * Extract the alignment size from the memop. */ -static inline unsigned get_alignment_bits(TCGMemOp memop) +static inline unsigned get_alignment_bits(MemOp memop) { unsigned a = memop & MO_AMASK; @@ -1186,7 +1097,7 @@ static inline size_t tcg_current_code_size(TCGContext *s) return tcg_ptr_byte_diff(s->code_ptr, s->code_buf); } -/* Combine the TCGMemOp and mmu_idx parameters into a single value. */ +/* Combine the MemOp and mmu_idx parameters into a single value. */ typedef uint32_t TCGMemOpIdx; /** @@ -1196,7 +1107,7 @@ typedef uint32_t TCGMemOpIdx; * * Encode these values into a single parameter. */ -static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx) +static inline TCGMemOpIdx make_memop_idx(MemOp op, unsigned idx) { tcg_debug_assert(idx <= 15); return (op << 4) | idx; @@ -1208,7 +1119,7 @@ static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx) * * Extract the memory operation from the combined value. */ -static inline TCGMemOp get_memop(TCGMemOpIdx oi) +static inline MemOp get_memop(TCGMemOpIdx oi) { return oi >> 4; } |