aboutsummaryrefslogtreecommitdiff
path: root/tcg
diff options
context:
space:
mode:
Diffstat (limited to 'tcg')
-rw-r--r--tcg/aarch64/tcg-target.h2
-rw-r--r--tcg/arm/tcg-target.h2
-rw-r--r--tcg/ia64/tcg-target.h198
-rw-r--r--tcg/ia64/tcg-target.inc.c2482
-rw-r--r--tcg/mips/tcg-target.h2
-rw-r--r--tcg/ppc/tcg-target.h2
-rw-r--r--tcg/s390/tcg-target.h4
-rw-r--r--tcg/s390/tcg-target.inc.c700
-rw-r--r--tcg/sparc/tcg-target.h2
-rw-r--r--tcg/tcg-op.c16
10 files changed, 408 insertions, 3002 deletions
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
index 55a46ac825..b41a248bee 100644
--- a/tcg/aarch64/tcg-target.h
+++ b/tcg/aarch64/tcg-target.h
@@ -117,4 +117,6 @@ static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
__builtin___clear_cache((char *)start, (char *)stop);
}
+#define TCG_TARGET_DEFAULT_MO (0)
+
#endif /* AARCH64_TCG_TARGET_H */
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
index 5ef1086710..a38be15a39 100644
--- a/tcg/arm/tcg-target.h
+++ b/tcg/arm/tcg-target.h
@@ -134,4 +134,6 @@ static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
__builtin___clear_cache((char *) start, (char *) stop);
}
+#define TCG_TARGET_DEFAULT_MO (0)
+
#endif
diff --git a/tcg/ia64/tcg-target.h b/tcg/ia64/tcg-target.h
deleted file mode 100644
index 901bb7575d..0000000000
--- a/tcg/ia64/tcg-target.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Tiny Code Generator for QEMU
- *
- * Copyright (c) 2009-2010 Aurelien Jarno <aurelien@aurel32.net>
- * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#ifndef IA64_TCG_TARGET_H
-#define IA64_TCG_TARGET_H
-
-#define TCG_TARGET_INSN_UNIT_SIZE 16
-#define TCG_TARGET_TLB_DISPLACEMENT_BITS 21
-
-typedef struct {
- uint64_t lo __attribute__((aligned(16)));
- uint64_t hi;
-} tcg_insn_unit;
-
-/* We only map the first 64 registers */
-#define TCG_TARGET_NB_REGS 64
-typedef enum {
- TCG_REG_R0 = 0,
- TCG_REG_R1,
- TCG_REG_R2,
- TCG_REG_R3,
- TCG_REG_R4,
- TCG_REG_R5,
- TCG_REG_R6,
- TCG_REG_R7,
- TCG_REG_R8,
- TCG_REG_R9,
- TCG_REG_R10,
- TCG_REG_R11,
- TCG_REG_R12,
- TCG_REG_R13,
- TCG_REG_R14,
- TCG_REG_R15,
- TCG_REG_R16,
- TCG_REG_R17,
- TCG_REG_R18,
- TCG_REG_R19,
- TCG_REG_R20,
- TCG_REG_R21,
- TCG_REG_R22,
- TCG_REG_R23,
- TCG_REG_R24,
- TCG_REG_R25,
- TCG_REG_R26,
- TCG_REG_R27,
- TCG_REG_R28,
- TCG_REG_R29,
- TCG_REG_R30,
- TCG_REG_R31,
- TCG_REG_R32,
- TCG_REG_R33,
- TCG_REG_R34,
- TCG_REG_R35,
- TCG_REG_R36,
- TCG_REG_R37,
- TCG_REG_R38,
- TCG_REG_R39,
- TCG_REG_R40,
- TCG_REG_R41,
- TCG_REG_R42,
- TCG_REG_R43,
- TCG_REG_R44,
- TCG_REG_R45,
- TCG_REG_R46,
- TCG_REG_R47,
- TCG_REG_R48,
- TCG_REG_R49,
- TCG_REG_R50,
- TCG_REG_R51,
- TCG_REG_R52,
- TCG_REG_R53,
- TCG_REG_R54,
- TCG_REG_R55,
- TCG_REG_R56,
- TCG_REG_R57,
- TCG_REG_R58,
- TCG_REG_R59,
- TCG_REG_R60,
- TCG_REG_R61,
- TCG_REG_R62,
- TCG_REG_R63,
-
- TCG_AREG0 = TCG_REG_R32,
-} TCGReg;
-
-#define TCG_CT_CONST_ZERO 0x100
-#define TCG_CT_CONST_S22 0x200
-
-/* used for function call generation */
-#define TCG_REG_CALL_STACK TCG_REG_R12
-#define TCG_TARGET_STACK_ALIGN 16
-#define TCG_TARGET_CALL_STACK_OFFSET 16
-
-/* optional instructions */
-#define TCG_TARGET_HAS_div_i32 0
-#define TCG_TARGET_HAS_rem_i32 0
-#define TCG_TARGET_HAS_div_i64 0
-#define TCG_TARGET_HAS_rem_i64 0
-#define TCG_TARGET_HAS_andc_i32 1
-#define TCG_TARGET_HAS_andc_i64 1
-#define TCG_TARGET_HAS_bswap16_i32 1
-#define TCG_TARGET_HAS_bswap16_i64 1
-#define TCG_TARGET_HAS_bswap32_i32 1
-#define TCG_TARGET_HAS_bswap32_i64 1
-#define TCG_TARGET_HAS_bswap64_i64 1
-#define TCG_TARGET_HAS_eqv_i32 1
-#define TCG_TARGET_HAS_eqv_i64 1
-#define TCG_TARGET_HAS_ext8s_i32 1
-#define TCG_TARGET_HAS_ext16s_i32 1
-#define TCG_TARGET_HAS_ext8s_i64 1
-#define TCG_TARGET_HAS_ext16s_i64 1
-#define TCG_TARGET_HAS_ext32s_i64 1
-#define TCG_TARGET_HAS_ext8u_i32 1
-#define TCG_TARGET_HAS_ext16u_i32 1
-#define TCG_TARGET_HAS_ext8u_i64 1
-#define TCG_TARGET_HAS_ext16u_i64 1
-#define TCG_TARGET_HAS_ext32u_i64 1
-#define TCG_TARGET_HAS_nand_i32 1
-#define TCG_TARGET_HAS_nand_i64 1
-#define TCG_TARGET_HAS_nor_i32 1
-#define TCG_TARGET_HAS_clz_i32 0
-#define TCG_TARGET_HAS_clz_i64 0
-#define TCG_TARGET_HAS_ctz_i32 0
-#define TCG_TARGET_HAS_ctz_i64 0
-#define TCG_TARGET_HAS_ctpop_i32 0
-#define TCG_TARGET_HAS_ctpop_i64 0
-#define TCG_TARGET_HAS_nor_i64 1
-#define TCG_TARGET_HAS_orc_i32 1
-#define TCG_TARGET_HAS_orc_i64 1
-#define TCG_TARGET_HAS_rot_i32 1
-#define TCG_TARGET_HAS_rot_i64 1
-#define TCG_TARGET_HAS_movcond_i32 1
-#define TCG_TARGET_HAS_movcond_i64 1
-#define TCG_TARGET_HAS_deposit_i32 1
-#define TCG_TARGET_HAS_deposit_i64 1
-#define TCG_TARGET_HAS_extract_i32 0
-#define TCG_TARGET_HAS_extract_i64 0
-#define TCG_TARGET_HAS_sextract_i32 0
-#define TCG_TARGET_HAS_sextract_i64 0
-#define TCG_TARGET_HAS_add2_i32 0
-#define TCG_TARGET_HAS_add2_i64 0
-#define TCG_TARGET_HAS_sub2_i32 0
-#define TCG_TARGET_HAS_sub2_i64 0
-#define TCG_TARGET_HAS_mulu2_i32 0
-#define TCG_TARGET_HAS_mulu2_i64 0
-#define TCG_TARGET_HAS_muls2_i32 0
-#define TCG_TARGET_HAS_muls2_i64 0
-#define TCG_TARGET_HAS_muluh_i32 0
-#define TCG_TARGET_HAS_muluh_i64 0
-#define TCG_TARGET_HAS_mulsh_i32 0
-#define TCG_TARGET_HAS_mulsh_i64 0
-#define TCG_TARGET_HAS_extrl_i64_i32 0
-#define TCG_TARGET_HAS_extrh_i64_i32 0
-#define TCG_TARGET_HAS_goto_ptr 0
-
-#define TCG_TARGET_deposit_i32_valid(ofs, len) ((len) <= 16)
-#define TCG_TARGET_deposit_i64_valid(ofs, len) ((len) <= 16)
-
-/* optional instructions automatically implemented */
-#define TCG_TARGET_HAS_neg_i32 0 /* sub r1, r0, r3 */
-#define TCG_TARGET_HAS_neg_i64 0 /* sub r1, r0, r3 */
-#define TCG_TARGET_HAS_not_i32 0 /* xor r1, -1, r3 */
-#define TCG_TARGET_HAS_not_i64 0 /* xor r1, -1, r3 */
-
-static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
-{
- start = start & ~(32UL - 1UL);
- stop = (stop + (32UL - 1UL)) & ~(32UL - 1UL);
-
- for (; start < stop; start += 32UL) {
- asm volatile ("fc.i %0" :: "r" (start));
- }
- asm volatile (";;sync.i;;srlz.i;;");
-}
-
-#endif
diff --git a/tcg/ia64/tcg-target.inc.c b/tcg/ia64/tcg-target.inc.c
deleted file mode 100644
index bf9a97d75c..0000000000
--- a/tcg/ia64/tcg-target.inc.c
+++ /dev/null
@@ -1,2482 +0,0 @@
-/*
- * Tiny Code Generator for QEMU
- *
- * Copyright (c) 2009-2010 Aurelien Jarno <aurelien@aurel32.net>
- * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-/*
- * Register definitions
- */
-
-#ifdef CONFIG_DEBUG_TCG
-static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
- "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
- "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
- "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
- "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
- "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
- "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
- "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
- "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
-};
-#endif
-
-#ifndef CONFIG_SOFTMMU
-#define TCG_GUEST_BASE_REG TCG_REG_R55
-#endif
-
-/* Branch registers */
-enum {
- TCG_REG_B0 = 0,
- TCG_REG_B1,
- TCG_REG_B2,
- TCG_REG_B3,
- TCG_REG_B4,
- TCG_REG_B5,
- TCG_REG_B6,
- TCG_REG_B7,
-};
-
-/* Floating point registers */
-enum {
- TCG_REG_F0 = 0,
- TCG_REG_F1,
- TCG_REG_F2,
- TCG_REG_F3,
- TCG_REG_F4,
- TCG_REG_F5,
- TCG_REG_F6,
- TCG_REG_F7,
- TCG_REG_F8,
- TCG_REG_F9,
- TCG_REG_F10,
- TCG_REG_F11,
- TCG_REG_F12,
- TCG_REG_F13,
- TCG_REG_F14,
- TCG_REG_F15,
-};
-
-/* Predicate registers */
-enum {
- TCG_REG_P0 = 0,
- TCG_REG_P1,
- TCG_REG_P2,
- TCG_REG_P3,
- TCG_REG_P4,
- TCG_REG_P5,
- TCG_REG_P6,
- TCG_REG_P7,
- TCG_REG_P8,
- TCG_REG_P9,
- TCG_REG_P10,
- TCG_REG_P11,
- TCG_REG_P12,
- TCG_REG_P13,
- TCG_REG_P14,
- TCG_REG_P15,
-};
-
-/* Application registers */
-enum {
- TCG_REG_PFS = 64,
-};
-
-static const int tcg_target_reg_alloc_order[] = {
- TCG_REG_R35,
- TCG_REG_R36,
- TCG_REG_R37,
- TCG_REG_R38,
- TCG_REG_R39,
- TCG_REG_R40,
- TCG_REG_R41,
- TCG_REG_R42,
- TCG_REG_R43,
- TCG_REG_R44,
- TCG_REG_R45,
- TCG_REG_R46,
- TCG_REG_R47,
- TCG_REG_R48,
- TCG_REG_R49,
- TCG_REG_R50,
- TCG_REG_R51,
- TCG_REG_R52,
- TCG_REG_R53,
- TCG_REG_R54,
- TCG_REG_R55,
- TCG_REG_R14,
- TCG_REG_R15,
- TCG_REG_R16,
- TCG_REG_R17,
- TCG_REG_R18,
- TCG_REG_R19,
- TCG_REG_R20,
- TCG_REG_R21,
- TCG_REG_R22,
- TCG_REG_R23,
- TCG_REG_R24,
- TCG_REG_R25,
- TCG_REG_R26,
- TCG_REG_R27,
- TCG_REG_R28,
- TCG_REG_R29,
- TCG_REG_R30,
- TCG_REG_R31,
- TCG_REG_R56,
- TCG_REG_R57,
- TCG_REG_R58,
- TCG_REG_R59,
- TCG_REG_R60,
- TCG_REG_R61,
- TCG_REG_R62,
- TCG_REG_R63,
- TCG_REG_R8,
- TCG_REG_R9,
- TCG_REG_R10,
- TCG_REG_R11
-};
-
-static const int tcg_target_call_iarg_regs[8] = {
- TCG_REG_R56,
- TCG_REG_R57,
- TCG_REG_R58,
- TCG_REG_R59,
- TCG_REG_R60,
- TCG_REG_R61,
- TCG_REG_R62,
- TCG_REG_R63,
-};
-
-static const int tcg_target_call_oarg_regs[] = {
- TCG_REG_R8
-};
-
-/*
- * opcode formation
- */
-
-/* bundle templates: stops (double bar in the IA64 manual) are marked with
- an uppercase letter. */
-enum {
- mii = 0x00,
- miI = 0x01,
- mIi = 0x02,
- mII = 0x03,
- mlx = 0x04,
- mLX = 0x05,
- mmi = 0x08,
- mmI = 0x09,
- Mmi = 0x0a,
- MmI = 0x0b,
- mfi = 0x0c,
- mfI = 0x0d,
- mmf = 0x0e,
- mmF = 0x0f,
- mib = 0x10,
- miB = 0x11,
- mbb = 0x12,
- mbB = 0x13,
- bbb = 0x16,
- bbB = 0x17,
- mmb = 0x18,
- mmB = 0x19,
- mfb = 0x1c,
- mfB = 0x1d,
-};
-
-enum {
- OPC_ADD_A1 = 0x10000000000ull,
- OPC_AND_A1 = 0x10060000000ull,
- OPC_AND_A3 = 0x10160000000ull,
- OPC_ANDCM_A1 = 0x10068000000ull,
- OPC_ANDCM_A3 = 0x10168000000ull,
- OPC_ADDS_A4 = 0x10800000000ull,
- OPC_ADDL_A5 = 0x12000000000ull,
- OPC_ALLOC_M34 = 0x02c00000000ull,
- OPC_BR_DPTK_FEW_B1 = 0x08400000000ull,
- OPC_BR_SPTK_MANY_B1 = 0x08000001000ull,
- OPC_BR_CALL_SPNT_FEW_B3 = 0x0a200000000ull,
- OPC_BR_SPTK_MANY_B4 = 0x00100001000ull,
- OPC_BR_CALL_SPTK_MANY_B5 = 0x02100001000ull,
- OPC_BR_RET_SPTK_MANY_B4 = 0x00108001100ull,
- OPC_BRL_SPTK_MANY_X3 = 0x18000001000ull,
- OPC_BRL_CALL_SPNT_MANY_X4 = 0x1a200001000ull,
- OPC_BRL_CALL_SPTK_MANY_X4 = 0x1a000001000ull,
- OPC_CMP_LT_A6 = 0x18000000000ull,
- OPC_CMP_LTU_A6 = 0x1a000000000ull,
- OPC_CMP_EQ_A6 = 0x1c000000000ull,
- OPC_CMP4_LT_A6 = 0x18400000000ull,
- OPC_CMP4_LTU_A6 = 0x1a400000000ull,
- OPC_CMP4_EQ_A6 = 0x1c400000000ull,
- OPC_DEP_I14 = 0x0ae00000000ull,
- OPC_DEP_I15 = 0x08000000000ull,
- OPC_DEP_Z_I12 = 0x0a600000000ull,
- OPC_EXTR_I11 = 0x0a400002000ull,
- OPC_EXTR_U_I11 = 0x0a400000000ull,
- OPC_FCVT_FX_TRUNC_S1_F10 = 0x004d0000000ull,
- OPC_FCVT_FXU_TRUNC_S1_F10 = 0x004d8000000ull,
- OPC_FCVT_XF_F11 = 0x000e0000000ull,
- OPC_FMA_S1_F1 = 0x10400000000ull,
- OPC_FNMA_S1_F1 = 0x18400000000ull,
- OPC_FRCPA_S1_F6 = 0x00600000000ull,
- OPC_GETF_SIG_M19 = 0x08708000000ull,
- OPC_LD1_M1 = 0x08000000000ull,
- OPC_LD1_M3 = 0x0a000000000ull,
- OPC_LD2_M1 = 0x08040000000ull,
- OPC_LD2_M3 = 0x0a040000000ull,
- OPC_LD4_M1 = 0x08080000000ull,
- OPC_LD4_M3 = 0x0a080000000ull,
- OPC_LD8_M1 = 0x080c0000000ull,
- OPC_LD8_M3 = 0x0a0c0000000ull,
- OPC_MF_M24 = 0x00110000000ull,
- OPC_MUX1_I3 = 0x0eca0000000ull,
- OPC_NOP_B9 = 0x04008000000ull,
- OPC_NOP_F16 = 0x00008000000ull,
- OPC_NOP_I18 = 0x00008000000ull,
- OPC_NOP_M48 = 0x00008000000ull,
- OPC_MOV_I21 = 0x00e00100000ull,
- OPC_MOV_RET_I21 = 0x00e00500000ull,
- OPC_MOV_I22 = 0x00188000000ull,
- OPC_MOV_I_I26 = 0x00150000000ull,
- OPC_MOVL_X2 = 0x0c000000000ull,
- OPC_OR_A1 = 0x10070000000ull,
- OPC_OR_A3 = 0x10170000000ull,
- OPC_SETF_EXP_M18 = 0x0c748000000ull,
- OPC_SETF_SIG_M18 = 0x0c708000000ull,
- OPC_SHL_I7 = 0x0f240000000ull,
- OPC_SHR_I5 = 0x0f220000000ull,
- OPC_SHR_U_I5 = 0x0f200000000ull,
- OPC_SHRP_I10 = 0x0ac00000000ull,
- OPC_SXT1_I29 = 0x000a0000000ull,
- OPC_SXT2_I29 = 0x000a8000000ull,
- OPC_SXT4_I29 = 0x000b0000000ull,
- OPC_ST1_M4 = 0x08c00000000ull,
- OPC_ST2_M4 = 0x08c40000000ull,
- OPC_ST4_M4 = 0x08c80000000ull,
- OPC_ST8_M4 = 0x08cc0000000ull,
- OPC_SUB_A1 = 0x10028000000ull,
- OPC_SUB_A3 = 0x10128000000ull,
- OPC_UNPACK4_L_I2 = 0x0f860000000ull,
- OPC_XMA_L_F2 = 0x1d000000000ull,
- OPC_XOR_A1 = 0x10078000000ull,
- OPC_XOR_A3 = 0x10178000000ull,
- OPC_ZXT1_I29 = 0x00080000000ull,
- OPC_ZXT2_I29 = 0x00088000000ull,
- OPC_ZXT4_I29 = 0x00090000000ull,
-
- INSN_NOP_M = OPC_NOP_M48, /* nop.m 0 */
- INSN_NOP_I = OPC_NOP_I18, /* nop.i 0 */
-};
-
-static inline uint64_t tcg_opc_a1(int qp, uint64_t opc, int r1,
- int r2, int r3)
-{
- return opc
- | ((r3 & 0x7f) << 20)
- | ((r2 & 0x7f) << 13)
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_a3(int qp, uint64_t opc, int r1,
- uint64_t imm, int r3)
-{
- return opc
- | ((imm & 0x80) << 29) /* s */
- | ((imm & 0x7f) << 13) /* imm7b */
- | ((r3 & 0x7f) << 20)
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_a4(int qp, uint64_t opc, int r1,
- uint64_t imm, int r3)
-{
- return opc
- | ((imm & 0x2000) << 23) /* s */
- | ((imm & 0x1f80) << 20) /* imm6d */
- | ((imm & 0x007f) << 13) /* imm7b */
- | ((r3 & 0x7f) << 20)
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_a5(int qp, uint64_t opc, int r1,
- uint64_t imm, int r3)
-{
- return opc
- | ((imm & 0x200000) << 15) /* s */
- | ((imm & 0x1f0000) << 6) /* imm5c */
- | ((imm & 0x00ff80) << 20) /* imm9d */
- | ((imm & 0x00007f) << 13) /* imm7b */
- | ((r3 & 0x03) << 20)
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_a6(int qp, uint64_t opc, int p1,
- int p2, int r2, int r3)
-{
- return opc
- | ((p2 & 0x3f) << 27)
- | ((r3 & 0x7f) << 20)
- | ((r2 & 0x7f) << 13)
- | ((p1 & 0x3f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_b1(int qp, uint64_t opc, uint64_t imm)
-{
- return opc
- | ((imm & 0x100000) << 16) /* s */
- | ((imm & 0x0fffff) << 13) /* imm20b */
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_b3(int qp, uint64_t opc, int b1, uint64_t imm)
-{
- return opc
- | ((imm & 0x100000) << 16) /* s */
- | ((imm & 0x0fffff) << 13) /* imm20b */
- | ((b1 & 0x7) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_b4(int qp, uint64_t opc, int b2)
-{
- return opc
- | ((b2 & 0x7) << 13)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_b5(int qp, uint64_t opc, int b1, int b2)
-{
- return opc
- | ((b2 & 0x7) << 13)
- | ((b1 & 0x7) << 6)
- | (qp & 0x3f);
-}
-
-
-static inline uint64_t tcg_opc_b9(int qp, uint64_t opc, uint64_t imm)
-{
- return opc
- | ((imm & 0x100000) << 16) /* i */
- | ((imm & 0x0fffff) << 6) /* imm20a */
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_f1(int qp, uint64_t opc, int f1,
- int f3, int f4, int f2)
-{
- return opc
- | ((f4 & 0x7f) << 27)
- | ((f3 & 0x7f) << 20)
- | ((f2 & 0x7f) << 13)
- | ((f1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_f2(int qp, uint64_t opc, int f1,
- int f3, int f4, int f2)
-{
- return opc
- | ((f4 & 0x7f) << 27)
- | ((f3 & 0x7f) << 20)
- | ((f2 & 0x7f) << 13)
- | ((f1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_f6(int qp, uint64_t opc, int f1,
- int p2, int f2, int f3)
-{
- return opc
- | ((p2 & 0x3f) << 27)
- | ((f3 & 0x7f) << 20)
- | ((f2 & 0x7f) << 13)
- | ((f1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_f10(int qp, uint64_t opc, int f1, int f2)
-{
- return opc
- | ((f2 & 0x7f) << 13)
- | ((f1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_f11(int qp, uint64_t opc, int f1, int f2)
-{
- return opc
- | ((f2 & 0x7f) << 13)
- | ((f1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_f16(int qp, uint64_t opc, uint64_t imm)
-{
- return opc
- | ((imm & 0x100000) << 16) /* i */
- | ((imm & 0x0fffff) << 6) /* imm20a */
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_i2(int qp, uint64_t opc, int r1,
- int r2, int r3)
-{
- return opc
- | ((r3 & 0x7f) << 20)
- | ((r2 & 0x7f) << 13)
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_i3(int qp, uint64_t opc, int r1,
- int r2, int mbtype)
-{
- return opc
- | ((mbtype & 0x0f) << 20)
- | ((r2 & 0x7f) << 13)
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_i5(int qp, uint64_t opc, int r1,
- int r3, int r2)
-{
- return opc
- | ((r3 & 0x7f) << 20)
- | ((r2 & 0x7f) << 13)
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_i7(int qp, uint64_t opc, int r1,
- int r2, int r3)
-{
- return opc
- | ((r3 & 0x7f) << 20)
- | ((r2 & 0x7f) << 13)
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_i10(int qp, uint64_t opc, int r1,
- int r2, int r3, uint64_t count)
-{
- return opc
- | ((count & 0x3f) << 27)
- | ((r3 & 0x7f) << 20)
- | ((r2 & 0x7f) << 13)
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_i11(int qp, uint64_t opc, int r1,
- int r3, uint64_t pos, uint64_t len)
-{
- return opc
- | ((len & 0x3f) << 27)
- | ((r3 & 0x7f) << 20)
- | ((pos & 0x3f) << 14)
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_i12(int qp, uint64_t opc, int r1,
- int r2, uint64_t pos, uint64_t len)
-{
- return opc
- | ((len & 0x3f) << 27)
- | ((pos & 0x3f) << 20)
- | ((r2 & 0x7f) << 13)
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_i14(int qp, uint64_t opc, int r1, uint64_t imm,
- int r3, uint64_t pos, uint64_t len)
-{
- return opc
- | ((imm & 0x01) << 36)
- | ((len & 0x3f) << 27)
- | ((r3 & 0x7f) << 20)
- | ((pos & 0x3f) << 14)
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_i15(int qp, uint64_t opc, int r1, int r2,
- int r3, uint64_t pos, uint64_t len)
-{
- return opc
- | ((pos & 0x3f) << 31)
- | ((len & 0x0f) << 27)
- | ((r3 & 0x7f) << 20)
- | ((r2 & 0x7f) << 13)
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_i18(int qp, uint64_t opc, uint64_t imm)
-{
- return opc
- | ((imm & 0x100000) << 16) /* i */
- | ((imm & 0x0fffff) << 6) /* imm20a */
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_i21(int qp, uint64_t opc, int b1,
- int r2, uint64_t imm)
-{
- return opc
- | ((imm & 0x1ff) << 24)
- | ((r2 & 0x7f) << 13)
- | ((b1 & 0x7) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_i22(int qp, uint64_t opc, int r1, int b2)
-{
- return opc
- | ((b2 & 0x7) << 13)
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_i26(int qp, uint64_t opc, int ar3, int r2)
-{
- return opc
- | ((ar3 & 0x7f) << 20)
- | ((r2 & 0x7f) << 13)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_i29(int qp, uint64_t opc, int r1, int r3)
-{
- return opc
- | ((r3 & 0x7f) << 20)
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_l2(uint64_t imm)
-{
- return (imm & 0x7fffffffffc00000ull) >> 22;
-}
-
-static inline uint64_t tcg_opc_l3(uint64_t imm)
-{
- return (imm & 0x07fffffffff00000ull) >> 18;
-}
-
-#define tcg_opc_l4 tcg_opc_l3
-
-static inline uint64_t tcg_opc_m1(int qp, uint64_t opc, int r1, int r3)
-{
- return opc
- | ((r3 & 0x7f) << 20)
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_m3(int qp, uint64_t opc, int r1,
- int r3, uint64_t imm)
-{
- return opc
- | ((imm & 0x100) << 28) /* s */
- | ((imm & 0x080) << 20) /* i */
- | ((imm & 0x07f) << 13) /* imm7b */
- | ((r3 & 0x7f) << 20)
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_m4(int qp, uint64_t opc, int r2, int r3)
-{
- return opc
- | ((r3 & 0x7f) << 20)
- | ((r2 & 0x7f) << 13)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_m18(int qp, uint64_t opc, int f1, int r2)
-{
- return opc
- | ((r2 & 0x7f) << 13)
- | ((f1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_m19(int qp, uint64_t opc, int r1, int f2)
-{
- return opc
- | ((f2 & 0x7f) << 13)
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_m34(int qp, uint64_t opc, int r1,
- int sof, int sol, int sor)
-{
- return opc
- | ((sor & 0x0f) << 27)
- | ((sol & 0x7f) << 20)
- | ((sof & 0x7f) << 13)
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_m48(int qp, uint64_t opc, uint64_t imm)
-{
- return opc
- | ((imm & 0x100000) << 16) /* i */
- | ((imm & 0x0fffff) << 6) /* imm20a */
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_x2(int qp, uint64_t opc,
- int r1, uint64_t imm)
-{
- return opc
- | ((imm & 0x8000000000000000ull) >> 27) /* i */
- | (imm & 0x0000000000200000ull) /* ic */
- | ((imm & 0x00000000001f0000ull) << 6) /* imm5c */
- | ((imm & 0x000000000000ff80ull) << 20) /* imm9d */
- | ((imm & 0x000000000000007full) << 13) /* imm7b */
- | ((r1 & 0x7f) << 6)
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_x3(int qp, uint64_t opc, uint64_t imm)
-{
- return opc
- | ((imm & 0x0800000000000000ull) >> 23) /* i */
- | ((imm & 0x00000000000fffffull) << 13) /* imm20b */
- | (qp & 0x3f);
-}
-
-static inline uint64_t tcg_opc_x4(int qp, uint64_t opc, int b1, uint64_t imm)
-{
- return opc
- | ((imm & 0x0800000000000000ull) >> 23) /* i */
- | ((imm & 0x00000000000fffffull) << 13) /* imm20b */
- | ((b1 & 0x7) << 6)
- | (qp & 0x3f);
-}
-
-
-/*
- * Relocations - Note that we never encode branches elsewhere than slot 2.
- */
-
-static void reloc_pcrel21b_slot2(tcg_insn_unit *pc, tcg_insn_unit *target)
-{
- uint64_t imm = target - pc;
-
- pc->hi = (pc->hi & 0xf700000fffffffffull)
- | ((imm & 0x100000) << 39) /* s */
- | ((imm & 0x0fffff) << 36); /* imm20b */
-}
-
-static uint64_t get_reloc_pcrel21b_slot2(tcg_insn_unit *pc)
-{
- int64_t high = pc->hi;
-
- return ((high >> 39) & 0x100000) + /* s */
- ((high >> 36) & 0x0fffff); /* imm20b */
-}
-
-static void patch_reloc(tcg_insn_unit *code_ptr, int type,
- intptr_t value, intptr_t addend)
-{
- tcg_debug_assert(addend == 0);
- tcg_debug_assert(type == R_IA64_PCREL21B);
- reloc_pcrel21b_slot2(code_ptr, (tcg_insn_unit *)value);
-}
-
-/*
- * Constraints
- */
-
-/* parse target specific constraints */
-static const char *target_parse_constraint(TCGArgConstraint *ct,
- const char *ct_str, TCGType type)
-{
- switch(*ct_str++) {
- case 'r':
- ct->ct |= TCG_CT_REG;
- tcg_regset_set(ct->u.regs, 0xffffffffffffffffull);
- break;
- case 'I':
- ct->ct |= TCG_CT_CONST_S22;
- break;
- case 'S':
- ct->ct |= TCG_CT_REG;
- tcg_regset_set(ct->u.regs, 0xffffffffffffffffull);
-#if defined(CONFIG_SOFTMMU)
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R56);
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R57);
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R58);
-#endif
- break;
- case 'Z':
- /* We are cheating a bit here, using the fact that the register
- r0 is also the register number 0. Hence there is no need
- to check for const_args in each instruction. */
- ct->ct |= TCG_CT_CONST_ZERO;
- break;
- default:
- return NULL;
- }
- return ct_str;
-}
-
-/* test if a constant matches the constraint */
-static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
- const TCGArgConstraint *arg_ct)
-{
- int ct;
- ct = arg_ct->ct;
- if (ct & TCG_CT_CONST)
- return 1;
- else if ((ct & TCG_CT_CONST_ZERO) && val == 0)
- return 1;
- else if ((ct & TCG_CT_CONST_S22) && val == ((int32_t)val << 10) >> 10)
- return 1;
- else
- return 0;
-}
-
-/*
- * Code generation
- */
-
-static tcg_insn_unit *tb_ret_addr;
-
-static inline void tcg_out_bundle(TCGContext *s, int template,
- uint64_t slot0, uint64_t slot1,
- uint64_t slot2)
-{
- template &= 0x1f; /* 5 bits */
- slot0 &= 0x1ffffffffffull; /* 41 bits */
- slot1 &= 0x1ffffffffffull; /* 41 bits */
- slot2 &= 0x1ffffffffffull; /* 41 bits */
-
- *s->code_ptr++ = (tcg_insn_unit){
- (slot1 << 46) | (slot0 << 5) | template,
- (slot2 << 23) | (slot1 >> 18)
- };
-}
-
-static inline uint64_t tcg_opc_mov_a(int qp, TCGReg dst, TCGReg src)
-{
- return tcg_opc_a4(qp, OPC_ADDS_A4, dst, 0, src);
-}
-
-static inline void tcg_out_mov(TCGContext *s, TCGType type,
- TCGReg ret, TCGReg arg)
-{
- tcg_out_bundle(s, mmI,
- INSN_NOP_M,
- INSN_NOP_M,
- tcg_opc_mov_a(TCG_REG_P0, ret, arg));
-}
-
-static inline uint64_t tcg_opc_movi_a(int qp, TCGReg dst, int64_t src)
-{
- tcg_debug_assert(src == sextract64(src, 0, 22));
- return tcg_opc_a5(qp, OPC_ADDL_A5, dst, src, TCG_REG_R0);
-}
-
-static inline void tcg_out_movi(TCGContext *s, TCGType type,
- TCGReg reg, tcg_target_long arg)
-{
- tcg_out_bundle(s, mLX,
- INSN_NOP_M,
- tcg_opc_l2 (arg),
- tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, reg, arg));
-}
-
-static void tcg_out_br(TCGContext *s, TCGLabel *l)
-{
- uint64_t imm;
-
- /* We pay attention here to not modify the branch target by reading
- the existing value and using it again. This ensure that caches and
- memory are kept coherent during retranslation. */
- if (l->has_value) {
- imm = l->u.value_ptr - s->code_ptr;
- } else {
- imm = get_reloc_pcrel21b_slot2(s->code_ptr);
- tcg_out_reloc(s, s->code_ptr, R_IA64_PCREL21B, l, 0);
- }
-
- tcg_out_bundle(s, mmB,
- INSN_NOP_M,
- INSN_NOP_M,
- tcg_opc_b1(TCG_REG_P0, OPC_BR_SPTK_MANY_B1, imm));
-}
-
-static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *desc)
-{
- uintptr_t func = desc->lo, gp = desc->hi, disp;
-
- /* Look through the function descriptor. */
- tcg_out_bundle(s, mlx,
- INSN_NOP_M,
- tcg_opc_l2 (gp),
- tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, TCG_REG_R1, gp));
- disp = (tcg_insn_unit *)func - s->code_ptr;
- tcg_out_bundle(s, mLX,
- INSN_NOP_M,
- tcg_opc_l4 (disp),
- tcg_opc_x4 (TCG_REG_P0, OPC_BRL_CALL_SPTK_MANY_X4,
- TCG_REG_B0, disp));
-}
-
-static void tcg_out_exit_tb(TCGContext *s, tcg_target_long arg)
-{
- uint64_t imm, opc1;
-
- /* At least arg == 0 is a common operation. */
- if (arg == sextract64(arg, 0, 22)) {
- opc1 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R8, arg);
- } else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R8, arg);
- opc1 = INSN_NOP_M;
- }
-
- imm = tb_ret_addr - s->code_ptr;
-
- tcg_out_bundle(s, mLX,
- opc1,
- tcg_opc_l3 (imm),
- tcg_opc_x3 (TCG_REG_P0, OPC_BRL_SPTK_MANY_X3, imm));
-}
-
-static inline void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
-{
- if (s->tb_jmp_insn_offset) {
- /* direct jump method */
- tcg_abort();
- } else {
- /* indirect jump method */
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2,
- (tcg_target_long)(s->tb_jmp_target_addr + arg));
- tcg_out_bundle(s, MmI,
- tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1,
- TCG_REG_R2, TCG_REG_R2),
- INSN_NOP_M,
- tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, TCG_REG_B6,
- TCG_REG_R2, 0));
- tcg_out_bundle(s, mmB,
- INSN_NOP_M,
- INSN_NOP_M,
- tcg_opc_b4 (TCG_REG_P0, OPC_BR_SPTK_MANY_B4,
- TCG_REG_B6));
- }
- s->tb_jmp_reset_offset[arg] = tcg_current_code_size(s);
-}
-
-static inline void tcg_out_jmp(TCGContext *s, TCGArg addr)
-{
- tcg_out_bundle(s, mmI,
- INSN_NOP_M,
- INSN_NOP_M,
- tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, TCG_REG_B6, addr, 0));
- tcg_out_bundle(s, mmB,
- INSN_NOP_M,
- INSN_NOP_M,
- tcg_opc_b4(TCG_REG_P0, OPC_BR_SPTK_MANY_B4, TCG_REG_B6));
-}
-
-static inline void tcg_out_ld_rel(TCGContext *s, uint64_t opc_m4, TCGArg arg,
- TCGArg arg1, tcg_target_long arg2)
-{
- if (arg2 == ((int16_t)arg2 >> 2) << 2) {
- tcg_out_bundle(s, MmI,
- tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4,
- TCG_REG_R2, arg2, arg1),
- tcg_opc_m1 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2),
- INSN_NOP_I);
- } else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, arg2);
- tcg_out_bundle(s, MmI,
- tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1,
- TCG_REG_R2, TCG_REG_R2, arg1),
- tcg_opc_m1 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2),
- INSN_NOP_I);
- }
-}
-
-static inline void tcg_out_st_rel(TCGContext *s, uint64_t opc_m4, TCGArg arg,
- TCGArg arg1, tcg_target_long arg2)
-{
- if (arg2 == ((int16_t)arg2 >> 2) << 2) {
- tcg_out_bundle(s, MmI,
- tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4,
- TCG_REG_R2, arg2, arg1),
- tcg_opc_m4 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2),
- INSN_NOP_I);
- } else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, arg2);
- tcg_out_bundle(s, MmI,
- tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1,
- TCG_REG_R2, TCG_REG_R2, arg1),
- tcg_opc_m4 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2),
- INSN_NOP_I);
- }
-}
-
-static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
- TCGReg arg1, intptr_t arg2)
-{
- if (type == TCG_TYPE_I32) {
- tcg_out_ld_rel(s, OPC_LD4_M1, arg, arg1, arg2);
- } else {
- tcg_out_ld_rel(s, OPC_LD8_M1, arg, arg1, arg2);
- }
-}
-
-static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
- TCGReg arg1, intptr_t arg2)
-{
- if (type == TCG_TYPE_I32) {
- tcg_out_st_rel(s, OPC_ST4_M4, arg, arg1, arg2);
- } else {
- tcg_out_st_rel(s, OPC_ST8_M4, arg, arg1, arg2);
- }
-}
-
-static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
- TCGReg base, intptr_t ofs)
-{
- if (val == 0) {
- tcg_out_st(s, type, TCG_REG_R0, base, ofs);
- return true;
- }
- return false;
-}
-
-static inline void tcg_out_alu(TCGContext *s, uint64_t opc_a1, uint64_t opc_a3,
- TCGReg ret, TCGArg arg1, int const_arg1,
- TCGArg arg2, int const_arg2)
-{
- uint64_t opc1 = 0, opc2 = 0, opc3 = 0;
-
- if (const_arg2 && arg2 != 0) {
- opc2 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R3, arg2);
- arg2 = TCG_REG_R3;
- }
- if (const_arg1 && arg1 != 0) {
- if (opc_a3 && arg1 == (int8_t)arg1) {
- opc3 = tcg_opc_a3(TCG_REG_P0, opc_a3, ret, arg1, arg2);
- } else {
- opc1 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R2, arg1);
- arg1 = TCG_REG_R2;
- }
- }
- if (opc3 == 0) {
- opc3 = tcg_opc_a1(TCG_REG_P0, opc_a1, ret, arg1, arg2);
- }
-
- tcg_out_bundle(s, (opc1 || opc2 ? mII : miI),
- opc1 ? opc1 : INSN_NOP_M,
- opc2 ? opc2 : INSN_NOP_I,
- opc3);
-}
-
-static inline void tcg_out_add(TCGContext *s, TCGReg ret, TCGReg arg1,
- TCGArg arg2, int const_arg2)
-{
- if (const_arg2 && arg2 == sextract64(arg2, 0, 14)) {
- tcg_out_bundle(s, mmI,
- INSN_NOP_M,
- INSN_NOP_M,
- tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, ret, arg2, arg1));
- } else {
- tcg_out_alu(s, OPC_ADD_A1, 0, ret, arg1, 0, arg2, const_arg2);
- }
-}
-
-static inline void tcg_out_sub(TCGContext *s, TCGReg ret, TCGArg arg1,
- int const_arg1, TCGArg arg2, int const_arg2)
-{
- if (!const_arg1 && const_arg2 && -arg2 == sextract64(-arg2, 0, 14)) {
- tcg_out_bundle(s, mmI,
- INSN_NOP_M,
- INSN_NOP_M,
- tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, ret, -arg2, arg1));
- } else {
- tcg_out_alu(s, OPC_SUB_A1, OPC_SUB_A3, ret,
- arg1, const_arg1, arg2, const_arg2);
- }
-}
-
-static inline void tcg_out_eqv(TCGContext *s, TCGArg ret,
- TCGArg arg1, int const_arg1,
- TCGArg arg2, int const_arg2)
-{
- tcg_out_bundle(s, mII,
- INSN_NOP_M,
- tcg_opc_a1 (TCG_REG_P0, OPC_XOR_A1, ret, arg1, arg2),
- tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret));
-}
-
-static inline void tcg_out_nand(TCGContext *s, TCGArg ret,
- TCGArg arg1, int const_arg1,
- TCGArg arg2, int const_arg2)
-{
- tcg_out_bundle(s, mII,
- INSN_NOP_M,
- tcg_opc_a1 (TCG_REG_P0, OPC_AND_A1, ret, arg1, arg2),
- tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret));
-}
-
-static inline void tcg_out_nor(TCGContext *s, TCGArg ret,
- TCGArg arg1, int const_arg1,
- TCGArg arg2, int const_arg2)
-{
- tcg_out_bundle(s, mII,
- INSN_NOP_M,
- tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, arg1, arg2),
- tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret));
-}
-
-static inline void tcg_out_orc(TCGContext *s, TCGArg ret,
- TCGArg arg1, int const_arg1,
- TCGArg arg2, int const_arg2)
-{
- tcg_out_bundle(s, mII,
- INSN_NOP_M,
- tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, TCG_REG_R2, -1, arg2),
- tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, arg1, TCG_REG_R2));
-}
-
-static inline void tcg_out_mul(TCGContext *s, TCGArg ret,
- TCGArg arg1, TCGArg arg2)
-{
- tcg_out_bundle(s, mmI,
- tcg_opc_m18(TCG_REG_P0, OPC_SETF_SIG_M18, TCG_REG_F6, arg1),
- tcg_opc_m18(TCG_REG_P0, OPC_SETF_SIG_M18, TCG_REG_F7, arg2),
- INSN_NOP_I);
- tcg_out_bundle(s, mmF,
- INSN_NOP_M,
- INSN_NOP_M,
- tcg_opc_f2 (TCG_REG_P0, OPC_XMA_L_F2, TCG_REG_F6, TCG_REG_F6,
- TCG_REG_F7, TCG_REG_F0));
- tcg_out_bundle(s, miI,
- tcg_opc_m19(TCG_REG_P0, OPC_GETF_SIG_M19, ret, TCG_REG_F6),
- INSN_NOP_I,
- INSN_NOP_I);
-}
-
-static inline void tcg_out_sar_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
- TCGArg arg2, int const_arg2)
-{
- if (const_arg2) {
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_i11(TCG_REG_P0, OPC_EXTR_I11,
- ret, arg1, arg2, 31 - arg2));
- } else {
- tcg_out_bundle(s, mII,
- tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3,
- TCG_REG_R3, 0x1f, arg2),
- tcg_opc_i29(TCG_REG_P0, OPC_SXT4_I29, TCG_REG_R2, arg1),
- tcg_opc_i5 (TCG_REG_P0, OPC_SHR_I5, ret,
- TCG_REG_R2, TCG_REG_R3));
- }
-}
-
-static inline void tcg_out_sar_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
- TCGArg arg2, int const_arg2)
-{
- if (const_arg2) {
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_i11(TCG_REG_P0, OPC_EXTR_I11,
- ret, arg1, arg2, 63 - arg2));
- } else {
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_i5 (TCG_REG_P0, OPC_SHR_I5, ret, arg1, arg2));
- }
-}
-
-static inline void tcg_out_shl_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
- TCGArg arg2, int const_arg2)
-{
- if (const_arg2) {
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret,
- arg1, 63 - arg2, 31 - arg2));
- } else {
- tcg_out_bundle(s, mII,
- INSN_NOP_M,
- tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R2,
- 0x1f, arg2),
- tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, ret,
- arg1, TCG_REG_R2));
- }
-}
-
-static inline void tcg_out_shl_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
- TCGArg arg2, int const_arg2)
-{
- if (const_arg2) {
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret,
- arg1, 63 - arg2, 63 - arg2));
- } else {
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, ret,
- arg1, arg2));
- }
-}
-
-static inline void tcg_out_shr_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
- TCGArg arg2, int const_arg2)
-{
- if (const_arg2) {
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret,
- arg1, arg2, 31 - arg2));
- } else {
- tcg_out_bundle(s, mII,
- tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R3,
- 0x1f, arg2),
- tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, TCG_REG_R2, arg1),
- tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret,
- TCG_REG_R2, TCG_REG_R3));
- }
-}
-
-static inline void tcg_out_shr_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
- TCGArg arg2, int const_arg2)
-{
- if (const_arg2) {
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret,
- arg1, arg2, 63 - arg2));
- } else {
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret,
- arg1, arg2));
- }
-}
-
-static inline void tcg_out_rotl_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
- TCGArg arg2, int const_arg2)
-{
- if (const_arg2) {
- tcg_out_bundle(s, mII,
- INSN_NOP_M,
- tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2,
- TCG_REG_R2, arg1, arg1),
- tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret,
- TCG_REG_R2, 32 - arg2, 31));
- } else {
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2,
- TCG_REG_R2, arg1, arg1),
- tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R3,
- 0x1f, arg2));
- tcg_out_bundle(s, mII,
- INSN_NOP_M,
- tcg_opc_a3 (TCG_REG_P0, OPC_SUB_A3, TCG_REG_R3,
- 0x20, TCG_REG_R3),
- tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret,
- TCG_REG_R2, TCG_REG_R3));
- }
-}
-
-static inline void tcg_out_rotl_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
- TCGArg arg2, int const_arg2)
-{
- if (const_arg2) {
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_i10(TCG_REG_P0, OPC_SHRP_I10, ret, arg1,
- arg1, 0x40 - arg2));
- } else {
- tcg_out_bundle(s, mII,
- tcg_opc_a3 (TCG_REG_P0, OPC_SUB_A3, TCG_REG_R2,
- 0x40, arg2),
- tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, TCG_REG_R3,
- arg1, arg2),
- tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, TCG_REG_R2,
- arg1, TCG_REG_R2));
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret,
- TCG_REG_R2, TCG_REG_R3));
- }
-}
-
-static inline void tcg_out_rotr_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
- TCGArg arg2, int const_arg2)
-{
- if (const_arg2) {
- tcg_out_bundle(s, mII,
- INSN_NOP_M,
- tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2,
- TCG_REG_R2, arg1, arg1),
- tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret,
- TCG_REG_R2, arg2, 31));
- } else {
- tcg_out_bundle(s, mII,
- tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R3,
- 0x1f, arg2),
- tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2,
- TCG_REG_R2, arg1, arg1),
- tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret,
- TCG_REG_R2, TCG_REG_R3));
- }
-}
-
-static inline void tcg_out_rotr_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
- TCGArg arg2, int const_arg2)
-{
- if (const_arg2) {
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_i10(TCG_REG_P0, OPC_SHRP_I10, ret, arg1,
- arg1, arg2));
- } else {
- tcg_out_bundle(s, mII,
- tcg_opc_a3 (TCG_REG_P0, OPC_SUB_A3, TCG_REG_R2,
- 0x40, arg2),
- tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, TCG_REG_R3,
- arg1, arg2),
- tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, TCG_REG_R2,
- arg1, TCG_REG_R2));
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret,
- TCG_REG_R2, TCG_REG_R3));
- }
-}
-
-static const uint64_t opc_ext_i29[8] = {
- OPC_ZXT1_I29, OPC_ZXT2_I29, OPC_ZXT4_I29, 0,
- OPC_SXT1_I29, OPC_SXT2_I29, OPC_SXT4_I29, 0
-};
-
-static inline uint64_t tcg_opc_ext_i(int qp, TCGMemOp opc, TCGReg d, TCGReg s)
-{
- if ((opc & MO_SIZE) == MO_64) {
- return tcg_opc_mov_a(qp, d, s);
- } else {
- return tcg_opc_i29(qp, opc_ext_i29[opc & MO_SSIZE], d, s);
- }
-}
-
-static inline void tcg_out_ext(TCGContext *s, uint64_t opc_i29,
- TCGArg ret, TCGArg arg)
-{
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_i29(TCG_REG_P0, opc_i29, ret, arg));
-}
-
-static inline uint64_t tcg_opc_bswap64_i(int qp, TCGReg d, TCGReg s)
-{
- return tcg_opc_i3(qp, OPC_MUX1_I3, d, s, 0xb);
-}
-
-static inline void tcg_out_bswap16(TCGContext *s, TCGArg ret, TCGArg arg)
-{
- tcg_out_bundle(s, mII,
- INSN_NOP_M,
- tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, arg, 15, 15),
- tcg_opc_bswap64_i(TCG_REG_P0, ret, ret));
-}
-
-static inline void tcg_out_bswap32(TCGContext *s, TCGArg ret, TCGArg arg)
-{
- tcg_out_bundle(s, mII,
- INSN_NOP_M,
- tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, arg, 31, 31),
- tcg_opc_bswap64_i(TCG_REG_P0, ret, ret));
-}
-
-static inline void tcg_out_bswap64(TCGContext *s, TCGArg ret, TCGArg arg)
-{
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_bswap64_i(TCG_REG_P0, ret, arg));
-}
-
-static inline void tcg_out_deposit(TCGContext *s, TCGArg ret, TCGArg a1,
- TCGArg a2, int const_a2, int pos, int len)
-{
- uint64_t i1 = 0, i2 = 0;
- int cpos = 63 - pos, lm1 = len - 1;
-
- if (const_a2) {
- /* Truncate the value of a constant a2 to the width of the field. */
- int mask = (1u << len) - 1;
- a2 &= mask;
-
- if (a2 == 0 || a2 == mask) {
- /* 1-bit signed constant inserted into register. */
- i2 = tcg_opc_i14(TCG_REG_P0, OPC_DEP_I14, ret, a2, a1, cpos, lm1);
- } else {
- /* Otherwise, load any constant into a temporary. Do this into
- the first I slot to help out with cross-unit delays. */
- i1 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R2, a2);
- a2 = TCG_REG_R2;
- }
- }
- if (i2 == 0) {
- i2 = tcg_opc_i15(TCG_REG_P0, OPC_DEP_I15, ret, a2, a1, cpos, lm1);
- }
- tcg_out_bundle(s, (i1 ? mII : miI),
- INSN_NOP_M,
- i1 ? i1 : INSN_NOP_I,
- i2);
-}
-
-static inline uint64_t tcg_opc_cmp_a(int qp, TCGCond cond, TCGArg arg1,
- TCGArg arg2, int cmp4)
-{
- uint64_t opc_eq_a6, opc_lt_a6, opc_ltu_a6;
-
- if (cmp4) {
- opc_eq_a6 = OPC_CMP4_EQ_A6;
- opc_lt_a6 = OPC_CMP4_LT_A6;
- opc_ltu_a6 = OPC_CMP4_LTU_A6;
- } else {
- opc_eq_a6 = OPC_CMP_EQ_A6;
- opc_lt_a6 = OPC_CMP_LT_A6;
- opc_ltu_a6 = OPC_CMP_LTU_A6;
- }
-
- switch (cond) {
- case TCG_COND_EQ:
- return tcg_opc_a6 (qp, opc_eq_a6, TCG_REG_P6, TCG_REG_P7, arg1, arg2);
- case TCG_COND_NE:
- return tcg_opc_a6 (qp, opc_eq_a6, TCG_REG_P7, TCG_REG_P6, arg1, arg2);
- case TCG_COND_LT:
- return tcg_opc_a6 (qp, opc_lt_a6, TCG_REG_P6, TCG_REG_P7, arg1, arg2);
- case TCG_COND_LTU:
- return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P6, TCG_REG_P7, arg1, arg2);
- case TCG_COND_GE:
- return tcg_opc_a6 (qp, opc_lt_a6, TCG_REG_P7, TCG_REG_P6, arg1, arg2);
- case TCG_COND_GEU:
- return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P7, TCG_REG_P6, arg1, arg2);
- case TCG_COND_LE:
- return tcg_opc_a6 (qp, opc_lt_a6, TCG_REG_P7, TCG_REG_P6, arg2, arg1);
- case TCG_COND_LEU:
- return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P7, TCG_REG_P6, arg2, arg1);
- case TCG_COND_GT:
- return tcg_opc_a6 (qp, opc_lt_a6, TCG_REG_P6, TCG_REG_P7, arg2, arg1);
- case TCG_COND_GTU:
- return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P6, TCG_REG_P7, arg2, arg1);
- default:
- tcg_abort();
- break;
- }
-}
-
-static inline void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
- TCGReg arg2, TCGLabel *l, int cmp4)
-{
- uint64_t imm;
-
- /* We pay attention here to not modify the branch target by reading
- the existing value and using it again. This ensure that caches and
- memory are kept coherent during retranslation. */
- if (l->has_value) {
- imm = l->u.value_ptr - s->code_ptr;
- } else {
- imm = get_reloc_pcrel21b_slot2(s->code_ptr);
- tcg_out_reloc(s, s->code_ptr, R_IA64_PCREL21B, l, 0);
- }
-
- tcg_out_bundle(s, miB,
- INSN_NOP_M,
- tcg_opc_cmp_a(TCG_REG_P0, cond, arg1, arg2, cmp4),
- tcg_opc_b1(TCG_REG_P6, OPC_BR_DPTK_FEW_B1, imm));
-}
-
-static inline void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGArg ret,
- TCGArg arg1, TCGArg arg2, int cmp4)
-{
- tcg_out_bundle(s, MmI,
- tcg_opc_cmp_a(TCG_REG_P0, cond, arg1, arg2, cmp4),
- tcg_opc_movi_a(TCG_REG_P6, ret, 1),
- tcg_opc_movi_a(TCG_REG_P7, ret, 0));
-}
-
-static inline void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGArg ret,
- TCGArg c1, TCGArg c2,
- TCGArg v1, int const_v1,
- TCGArg v2, int const_v2, int cmp4)
-{
- uint64_t opc1, opc2;
-
- if (const_v1) {
- opc1 = tcg_opc_movi_a(TCG_REG_P6, ret, v1);
- } else if (ret == v1) {
- opc1 = INSN_NOP_M;
- } else {
- opc1 = tcg_opc_mov_a(TCG_REG_P6, ret, v1);
- }
- if (const_v2) {
- opc2 = tcg_opc_movi_a(TCG_REG_P7, ret, v2);
- } else if (ret == v2) {
- opc2 = INSN_NOP_I;
- } else {
- opc2 = tcg_opc_mov_a(TCG_REG_P7, ret, v2);
- }
-
- tcg_out_bundle(s, MmI,
- tcg_opc_cmp_a(TCG_REG_P0, cond, c1, c2, cmp4),
- opc1,
- opc2);
-}
-
-#if defined(CONFIG_SOFTMMU)
-/* We're expecting to use an signed 22-bit immediate add. */
-QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
- > 0x1fffff)
-
-/* Load and compare a TLB entry, and return the result in (p6, p7).
- R2 is loaded with the addend TLB entry.
- R57 is loaded with the address, zero extented on 32-bit targets.
- R1, R3 are clobbered, leaving R56 free for...
- BSWAP_1, BSWAP_2 and I-slot insns for swapping data for store. */
-static inline void tcg_out_qemu_tlb(TCGContext *s, TCGReg addr_reg,
- TCGMemOp opc, int off_rw, int off_add,
- uint64_t bswap1, uint64_t bswap2)
-{
- unsigned s_bits = opc & MO_SIZE;
- unsigned a_bits = get_alignment_bits(opc);
-
- /* We don't support unaligned accesses, but overalignment is easy. */
- if (a_bits < s_bits) {
- a_bits = s_bits;
- }
-
- /*
- .mii
- mov r2 = off_rw
- extr.u r3 = addr_reg, ... # extract tlb page
- zxt4 r57 = addr_reg # or mov for 64-bit guest
- ;;
- .mii
- addl r2 = r2, areg0
- shl r3 = r3, cteb # via dep.z
- dep r1 = 0, r57, ... # zero page ofs, keep align
- ;;
- .mmi
- add r2 = r2, r3
- ;;
- ld4 r3 = [r2], off_add-off_rw # or ld8 for 64-bit guest
- nop
- ;;
- .mmi
- nop
- cmp.eq p6, p7 = r3, r58
- nop
- ;;
- */
- tcg_out_bundle(s, miI,
- tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R2, off_rw),
- tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, TCG_REG_R3,
- addr_reg, TARGET_PAGE_BITS, CPU_TLB_BITS - 1),
- tcg_opc_ext_i(TCG_REG_P0,
- TARGET_LONG_BITS == 32 ? MO_UL : MO_Q,
- TCG_REG_R57, addr_reg));
- tcg_out_bundle(s, miI,
- tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
- TCG_REG_R2, TCG_AREG0),
- tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, TCG_REG_R3,
- TCG_REG_R3, 63 - CPU_TLB_ENTRY_BITS,
- 63 - CPU_TLB_ENTRY_BITS),
- tcg_opc_i14(TCG_REG_P0, OPC_DEP_I14, TCG_REG_R1, 0,
- TCG_REG_R57, 63 - a_bits,
- TARGET_PAGE_BITS - a_bits - 1));
- tcg_out_bundle(s, MmI,
- tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1,
- TCG_REG_R2, TCG_REG_R2, TCG_REG_R3),
- tcg_opc_m3 (TCG_REG_P0,
- (TARGET_LONG_BITS == 32
- ? OPC_LD4_M3 : OPC_LD8_M3), TCG_REG_R3,
- TCG_REG_R2, off_add - off_rw),
- bswap1);
- tcg_out_bundle(s, mmI,
- tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1, TCG_REG_R2, TCG_REG_R2),
- tcg_opc_a6 (TCG_REG_P0, OPC_CMP_EQ_A6, TCG_REG_P6,
- TCG_REG_P7, TCG_REG_R1, TCG_REG_R3),
- bswap2);
-}
-
-typedef struct TCGLabelQemuLdst {
- bool is_ld;
- TCGMemOp size;
- tcg_insn_unit *label_ptr; /* label pointers to be updated */
- struct TCGLabelQemuLdst *next;
-} TCGLabelQemuLdst;
-
-typedef struct TCGBackendData {
- TCGLabelQemuLdst *labels;
-} TCGBackendData;
-
-static inline void tcg_out_tb_init(TCGContext *s)
-{
- s->be->labels = NULL;
-}
-
-static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
- tcg_insn_unit *label_ptr)
-{
- TCGBackendData *be = s->be;
- TCGLabelQemuLdst *l = tcg_malloc(sizeof(*l));
-
- l->is_ld = is_ld;
- l->size = opc & MO_SIZE;
- l->label_ptr = label_ptr;
- l->next = be->labels;
- be->labels = l;
-}
-
-static bool tcg_out_tb_finalize(TCGContext *s)
-{
- static const void * const helpers[8] = {
- helper_ret_stb_mmu,
- helper_le_stw_mmu,
- helper_le_stl_mmu,
- helper_le_stq_mmu,
- helper_ret_ldub_mmu,
- helper_le_lduw_mmu,
- helper_le_ldul_mmu,
- helper_le_ldq_mmu,
- };
- tcg_insn_unit *thunks[8] = { };
- TCGLabelQemuLdst *l;
-
- for (l = s->be->labels; l != NULL; l = l->next) {
- long x = l->is_ld * 4 + l->size;
- tcg_insn_unit *dest = thunks[x];
-
- /* The out-of-line thunks are all the same; load the return address
- from B0, load the GP, and branch to the code. Note that we are
- always post-call, so the register window has rolled, so we're
- using incoming parameter register numbers, not outgoing. */
- if (dest == NULL) {
- uintptr_t *desc = (uintptr_t *)helpers[x];
- uintptr_t func = desc[0], gp = desc[1], disp;
-
- thunks[x] = dest = s->code_ptr;
-
- tcg_out_bundle(s, mlx,
- INSN_NOP_M,
- tcg_opc_l2 (gp),
- tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2,
- TCG_REG_R1, gp));
- tcg_out_bundle(s, mii,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_i22(TCG_REG_P0, OPC_MOV_I22,
- l->is_ld ? TCG_REG_R35 : TCG_REG_R36,
- TCG_REG_B0));
- disp = (tcg_insn_unit *)func - s->code_ptr;
- tcg_out_bundle(s, mLX,
- INSN_NOP_M,
- tcg_opc_l3 (disp),
- tcg_opc_x3 (TCG_REG_P0, OPC_BRL_SPTK_MANY_X3, disp));
- }
-
- reloc_pcrel21b_slot2(l->label_ptr, dest);
-
- /* Test for (pending) buffer overflow. The assumption is that any
- one operation beginning below the high water mark cannot overrun
- the buffer completely. Thus we can test for overflow after
- generating code without having to check during generation. */
- if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
- return false;
- }
- }
- return true;
-}
-
-static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args)
-{
- static const uint64_t opc_ld_m1[4] = {
- OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1
- };
- int addr_reg, data_reg, mem_index;
- TCGMemOpIdx oi;
- TCGMemOp opc, s_bits;
- uint64_t fin1, fin2;
- tcg_insn_unit *label_ptr;
-
- data_reg = args[0];
- addr_reg = args[1];
- oi = args[2];
- opc = get_memop(oi);
- mem_index = get_mmuidx(oi);
- s_bits = opc & MO_SIZE;
-
- /* Read the TLB entry */
- tcg_out_qemu_tlb(s, addr_reg, opc,
- offsetof(CPUArchState, tlb_table[mem_index][0].addr_read),
- offsetof(CPUArchState, tlb_table[mem_index][0].addend),
- INSN_NOP_I, INSN_NOP_I);
-
- /* P6 is the fast path, and P7 the slow path */
-
- fin2 = 0;
- if (opc & MO_BSWAP) {
- fin1 = tcg_opc_bswap64_i(TCG_REG_P0, data_reg, TCG_REG_R8);
- if (s_bits < MO_64) {
- int shift = 64 - (8 << s_bits);
- fin2 = (opc & MO_SIGN ? OPC_EXTR_I11 : OPC_EXTR_U_I11);
- fin2 = tcg_opc_i11(TCG_REG_P0, fin2,
- data_reg, data_reg, shift, 63 - shift);
- }
- } else {
- fin1 = tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, TCG_REG_R8);
- }
-
- tcg_out_bundle(s, mmI,
- tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0),
- tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R2,
- TCG_REG_R2, TCG_REG_R57),
- tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R58, oi));
- label_ptr = s->code_ptr;
- tcg_out_bundle(s, miB,
- tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits],
- TCG_REG_R8, TCG_REG_R2),
- INSN_NOP_I,
- tcg_opc_b3 (TCG_REG_P7, OPC_BR_CALL_SPNT_FEW_B3, TCG_REG_B0,
- get_reloc_pcrel21b_slot2(label_ptr)));
-
- add_qemu_ldst_label(s, 1, opc, label_ptr);
-
- /* Note that we always use LE helper functions, so the bswap insns
- here for the fast path also apply to the slow path. */
- tcg_out_bundle(s, (fin2 ? mII : miI),
- INSN_NOP_M,
- fin1,
- fin2 ? fin2 : INSN_NOP_I);
-}
-
-static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
-{
- static const uint64_t opc_st_m4[4] = {
- OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4
- };
- TCGReg addr_reg, data_reg;
- int mem_index;
- uint64_t pre1, pre2;
- TCGMemOpIdx oi;
- TCGMemOp opc, s_bits;
- tcg_insn_unit *label_ptr;
-
- data_reg = args[0];
- addr_reg = args[1];
- oi = args[2];
- opc = get_memop(oi);
- mem_index = get_mmuidx(oi);
- s_bits = opc & MO_SIZE;
-
- /* Note that we always use LE helper functions, so the bswap insns
- that are here for the fast path also apply to the slow path,
- and move the data into the argument register. */
- pre2 = INSN_NOP_I;
- if (opc & MO_BSWAP) {
- pre1 = tcg_opc_bswap64_i(TCG_REG_P0, TCG_REG_R58, data_reg);
- if (s_bits < MO_64) {
- int shift = 64 - (8 << s_bits);
- pre2 = tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11,
- TCG_REG_R58, TCG_REG_R58, shift, 63 - shift);
- }
- } else {
- /* Just move the data into place for the slow path. */
- pre1 = tcg_opc_ext_i(TCG_REG_P0, opc, TCG_REG_R58, data_reg);
- }
-
- tcg_out_qemu_tlb(s, addr_reg, opc,
- offsetof(CPUArchState, tlb_table[mem_index][0].addr_write),
- offsetof(CPUArchState, tlb_table[mem_index][0].addend),
- pre1, pre2);
-
- /* P6 is the fast path, and P7 the slow path */
- tcg_out_bundle(s, mmI,
- tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0),
- tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R2,
- TCG_REG_R2, TCG_REG_R57),
- tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R59, oi));
- label_ptr = s->code_ptr;
- tcg_out_bundle(s, miB,
- tcg_opc_m4 (TCG_REG_P6, opc_st_m4[s_bits],
- TCG_REG_R58, TCG_REG_R2),
- INSN_NOP_I,
- tcg_opc_b3 (TCG_REG_P7, OPC_BR_CALL_SPNT_FEW_B3, TCG_REG_B0,
- get_reloc_pcrel21b_slot2(label_ptr)));
-
- add_qemu_ldst_label(s, 0, opc, label_ptr);
-}
-
-#else /* !CONFIG_SOFTMMU */
-# include "tcg-be-null.h"
-
-static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args)
-{
- static uint64_t const opc_ld_m1[4] = {
- OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1
- };
- int addr_reg, data_reg;
- TCGMemOp opc, s_bits, bswap;
-
- data_reg = args[0];
- addr_reg = args[1];
- opc = args[2];
- s_bits = opc & MO_SIZE;
- bswap = opc & MO_BSWAP;
-
-#if TARGET_LONG_BITS == 32
- if (guest_base != 0) {
- tcg_out_bundle(s, mII,
- INSN_NOP_M,
- tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29,
- TCG_REG_R3, addr_reg),
- tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
- TCG_GUEST_BASE_REG, TCG_REG_R3));
- } else {
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29,
- TCG_REG_R2, addr_reg),
- INSN_NOP_I);
- }
-
- if (!bswap) {
- if (!(opc & MO_SIGN)) {
- tcg_out_bundle(s, miI,
- tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
- data_reg, TCG_REG_R2),
- INSN_NOP_I,
- INSN_NOP_I);
- } else {
- tcg_out_bundle(s, mII,
- tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
- data_reg, TCG_REG_R2),
- INSN_NOP_I,
- tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, data_reg));
- }
- } else if (s_bits == MO_64) {
- tcg_out_bundle(s, mII,
- tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
- data_reg, TCG_REG_R2),
- INSN_NOP_I,
- tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg));
- } else {
- if (s_bits == MO_16) {
- tcg_out_bundle(s, mII,
- tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
- data_reg, TCG_REG_R2),
- INSN_NOP_I,
- tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
- data_reg, data_reg, 15, 15));
- } else {
- tcg_out_bundle(s, mII,
- tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
- data_reg, TCG_REG_R2),
- INSN_NOP_I,
- tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
- data_reg, data_reg, 31, 31));
- }
- if (!(opc & MO_SIGN)) {
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg));
- } else {
- tcg_out_bundle(s, mII,
- INSN_NOP_M,
- tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg),
- tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, data_reg));
- }
- }
-#else
- if (guest_base != 0) {
- tcg_out_bundle(s, MmI,
- tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
- TCG_GUEST_BASE_REG, addr_reg),
- tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
- data_reg, TCG_REG_R2),
- INSN_NOP_I);
- } else {
- tcg_out_bundle(s, mmI,
- INSN_NOP_M,
- tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
- data_reg, addr_reg),
- INSN_NOP_I);
- }
-
- if (bswap && s_bits == MO_16) {
- tcg_out_bundle(s, mII,
- INSN_NOP_M,
- tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
- data_reg, data_reg, 15, 15),
- tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg));
- } else if (bswap && s_bits == MO_32) {
- tcg_out_bundle(s, mII,
- INSN_NOP_M,
- tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
- data_reg, data_reg, 31, 31),
- tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg));
- } else if (bswap && s_bits == MO_64) {
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg));
- }
- if (opc & MO_SIGN) {
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, data_reg));
- }
-#endif
-}
-
-static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
-{
- static uint64_t const opc_st_m4[4] = {
- OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4
- };
- int addr_reg, data_reg;
-#if TARGET_LONG_BITS == 64
- uint64_t add_guest_base;
-#endif
- TCGMemOp opc, s_bits, bswap;
-
- data_reg = args[0];
- addr_reg = args[1];
- opc = args[2];
- s_bits = opc & MO_SIZE;
- bswap = opc & MO_BSWAP;
-
-#if TARGET_LONG_BITS == 32
- if (guest_base != 0) {
- tcg_out_bundle(s, mII,
- INSN_NOP_M,
- tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29,
- TCG_REG_R3, addr_reg),
- tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
- TCG_GUEST_BASE_REG, TCG_REG_R3));
- } else {
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29,
- TCG_REG_R2, addr_reg),
- INSN_NOP_I);
- }
-
- if (bswap) {
- if (s_bits == MO_16) {
- tcg_out_bundle(s, mII,
- INSN_NOP_M,
- tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
- TCG_REG_R3, data_reg, 15, 15),
- tcg_opc_bswap64_i(TCG_REG_P0,
- TCG_REG_R3, TCG_REG_R3));
- data_reg = TCG_REG_R3;
- } else if (s_bits == MO_32) {
- tcg_out_bundle(s, mII,
- INSN_NOP_M,
- tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
- TCG_REG_R3, data_reg, 31, 31),
- tcg_opc_bswap64_i(TCG_REG_P0,
- TCG_REG_R3, TCG_REG_R3));
- data_reg = TCG_REG_R3;
- } else if (s_bits == MO_64) {
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- INSN_NOP_I,
- tcg_opc_bswap64_i(TCG_REG_P0, TCG_REG_R3, data_reg));
- data_reg = TCG_REG_R3;
- }
- }
- tcg_out_bundle(s, mmI,
- tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits],
- data_reg, TCG_REG_R2),
- INSN_NOP_M,
- INSN_NOP_I);
-#else
- if (guest_base != 0) {
- add_guest_base = tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
- TCG_GUEST_BASE_REG, addr_reg);
- addr_reg = TCG_REG_R2;
- } else {
- add_guest_base = INSN_NOP_M;
- }
-
- if (!bswap) {
- tcg_out_bundle(s, (guest_base ? MmI : mmI),
- add_guest_base,
- tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits],
- data_reg, addr_reg),
- INSN_NOP_I);
- } else {
- if (s_bits == MO_16) {
- tcg_out_bundle(s, mII,
- add_guest_base,
- tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
- TCG_REG_R3, data_reg, 15, 15),
- tcg_opc_bswap64_i(TCG_REG_P0,
- TCG_REG_R3, TCG_REG_R3));
- data_reg = TCG_REG_R3;
- } else if (s_bits == MO_32) {
- tcg_out_bundle(s, mII,
- add_guest_base,
- tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
- TCG_REG_R3, data_reg, 31, 31),
- tcg_opc_bswap64_i(TCG_REG_P0,
- TCG_REG_R3, TCG_REG_R3));
- data_reg = TCG_REG_R3;
- } else if (s_bits == MO_64) {
- tcg_out_bundle(s, miI,
- add_guest_base,
- INSN_NOP_I,
- tcg_opc_bswap64_i(TCG_REG_P0, TCG_REG_R3, data_reg));
- data_reg = TCG_REG_R3;
- }
- tcg_out_bundle(s, miI,
- tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits],
- data_reg, addr_reg),
- INSN_NOP_I,
- INSN_NOP_I);
- }
-#endif
-}
-
-#endif
-
-static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
- const TCGArg *args, const int *const_args)
-{
- switch(opc) {
- case INDEX_op_exit_tb:
- tcg_out_exit_tb(s, args[0]);
- break;
- case INDEX_op_br:
- tcg_out_br(s, arg_label(args[0]));
- break;
- case INDEX_op_goto_tb:
- tcg_out_goto_tb(s, args[0]);
- break;
-
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8u_i64:
- tcg_out_ld_rel(s, OPC_LD1_M1, args[0], args[1], args[2]);
- break;
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld8s_i64:
- tcg_out_ld_rel(s, OPC_LD1_M1, args[0], args[1], args[2]);
- tcg_out_ext(s, OPC_SXT1_I29, args[0], args[0]);
- break;
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16u_i64:
- tcg_out_ld_rel(s, OPC_LD2_M1, args[0], args[1], args[2]);
- break;
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld16s_i64:
- tcg_out_ld_rel(s, OPC_LD2_M1, args[0], args[1], args[2]);
- tcg_out_ext(s, OPC_SXT2_I29, args[0], args[0]);
- break;
- case INDEX_op_ld_i32:
- case INDEX_op_ld32u_i64:
- tcg_out_ld_rel(s, OPC_LD4_M1, args[0], args[1], args[2]);
- break;
- case INDEX_op_ld32s_i64:
- tcg_out_ld_rel(s, OPC_LD4_M1, args[0], args[1], args[2]);
- tcg_out_ext(s, OPC_SXT4_I29, args[0], args[0]);
- break;
- case INDEX_op_ld_i64:
- tcg_out_ld_rel(s, OPC_LD8_M1, args[0], args[1], args[2]);
- break;
- case INDEX_op_st8_i32:
- case INDEX_op_st8_i64:
- tcg_out_st_rel(s, OPC_ST1_M4, args[0], args[1], args[2]);
- break;
- case INDEX_op_st16_i32:
- case INDEX_op_st16_i64:
- tcg_out_st_rel(s, OPC_ST2_M4, args[0], args[1], args[2]);
- break;
- case INDEX_op_st_i32:
- case INDEX_op_st32_i64:
- tcg_out_st_rel(s, OPC_ST4_M4, args[0], args[1], args[2]);
- break;
- case INDEX_op_st_i64:
- tcg_out_st_rel(s, OPC_ST8_M4, args[0], args[1], args[2]);
- break;
-
- case INDEX_op_add_i32:
- case INDEX_op_add_i64:
- tcg_out_add(s, args[0], args[1], args[2], const_args[2]);
- break;
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- tcg_out_sub(s, args[0], args[1], const_args[1], args[2], const_args[2]);
- break;
-
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
- /* TCG expects arg2 constant; A3 expects arg1 constant. Swap. */
- tcg_out_alu(s, OPC_AND_A1, OPC_AND_A3, args[0],
- args[2], const_args[2], args[1], const_args[1]);
- break;
- case INDEX_op_andc_i32:
- case INDEX_op_andc_i64:
- tcg_out_alu(s, OPC_ANDCM_A1, OPC_ANDCM_A3, args[0],
- args[1], const_args[1], args[2], const_args[2]);
- break;
- case INDEX_op_eqv_i32:
- case INDEX_op_eqv_i64:
- tcg_out_eqv(s, args[0], args[1], const_args[1],
- args[2], const_args[2]);
- break;
- case INDEX_op_nand_i32:
- case INDEX_op_nand_i64:
- tcg_out_nand(s, args[0], args[1], const_args[1],
- args[2], const_args[2]);
- break;
- case INDEX_op_nor_i32:
- case INDEX_op_nor_i64:
- tcg_out_nor(s, args[0], args[1], const_args[1],
- args[2], const_args[2]);
- break;
- case INDEX_op_or_i32:
- case INDEX_op_or_i64:
- /* TCG expects arg2 constant; A3 expects arg1 constant. Swap. */
- tcg_out_alu(s, OPC_OR_A1, OPC_OR_A3, args[0],
- args[2], const_args[2], args[1], const_args[1]);
- break;
- case INDEX_op_orc_i32:
- case INDEX_op_orc_i64:
- tcg_out_orc(s, args[0], args[1], const_args[1],
- args[2], const_args[2]);
- break;
- case INDEX_op_xor_i32:
- case INDEX_op_xor_i64:
- /* TCG expects arg2 constant; A3 expects arg1 constant. Swap. */
- tcg_out_alu(s, OPC_XOR_A1, OPC_XOR_A3, args[0],
- args[2], const_args[2], args[1], const_args[1]);
- break;
-
- case INDEX_op_mul_i32:
- case INDEX_op_mul_i64:
- tcg_out_mul(s, args[0], args[1], args[2]);
- break;
-
- case INDEX_op_sar_i32:
- tcg_out_sar_i32(s, args[0], args[1], args[2], const_args[2]);
- break;
- case INDEX_op_sar_i64:
- tcg_out_sar_i64(s, args[0], args[1], args[2], const_args[2]);
- break;
- case INDEX_op_shl_i32:
- tcg_out_shl_i32(s, args[0], args[1], args[2], const_args[2]);
- break;
- case INDEX_op_shl_i64:
- tcg_out_shl_i64(s, args[0], args[1], args[2], const_args[2]);
- break;
- case INDEX_op_shr_i32:
- tcg_out_shr_i32(s, args[0], args[1], args[2], const_args[2]);
- break;
- case INDEX_op_shr_i64:
- tcg_out_shr_i64(s, args[0], args[1], args[2], const_args[2]);
- break;
- case INDEX_op_rotl_i32:
- tcg_out_rotl_i32(s, args[0], args[1], args[2], const_args[2]);
- break;
- case INDEX_op_rotl_i64:
- tcg_out_rotl_i64(s, args[0], args[1], args[2], const_args[2]);
- break;
- case INDEX_op_rotr_i32:
- tcg_out_rotr_i32(s, args[0], args[1], args[2], const_args[2]);
- break;
- case INDEX_op_rotr_i64:
- tcg_out_rotr_i64(s, args[0], args[1], args[2], const_args[2]);
- break;
-
- case INDEX_op_ext8s_i32:
- case INDEX_op_ext8s_i64:
- tcg_out_ext(s, OPC_SXT1_I29, args[0], args[1]);
- break;
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext8u_i64:
- tcg_out_ext(s, OPC_ZXT1_I29, args[0], args[1]);
- break;
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16s_i64:
- tcg_out_ext(s, OPC_SXT2_I29, args[0], args[1]);
- break;
- case INDEX_op_ext16u_i32:
- case INDEX_op_ext16u_i64:
- tcg_out_ext(s, OPC_ZXT2_I29, args[0], args[1]);
- break;
- case INDEX_op_ext_i32_i64:
- case INDEX_op_ext32s_i64:
- tcg_out_ext(s, OPC_SXT4_I29, args[0], args[1]);
- break;
- case INDEX_op_extu_i32_i64:
- case INDEX_op_ext32u_i64:
- tcg_out_ext(s, OPC_ZXT4_I29, args[0], args[1]);
- break;
-
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap16_i64:
- tcg_out_bswap16(s, args[0], args[1]);
- break;
- case INDEX_op_bswap32_i32:
- case INDEX_op_bswap32_i64:
- tcg_out_bswap32(s, args[0], args[1]);
- break;
- case INDEX_op_bswap64_i64:
- tcg_out_bswap64(s, args[0], args[1]);
- break;
-
- case INDEX_op_deposit_i32:
- case INDEX_op_deposit_i64:
- tcg_out_deposit(s, args[0], args[1], args[2], const_args[2],
- args[3], args[4]);
- break;
-
- case INDEX_op_brcond_i32:
- tcg_out_brcond(s, args[2], args[0], args[1], arg_label(args[3]), 1);
- break;
- case INDEX_op_brcond_i64:
- tcg_out_brcond(s, args[2], args[0], args[1], arg_label(args[3]), 0);
- break;
- case INDEX_op_setcond_i32:
- tcg_out_setcond(s, args[3], args[0], args[1], args[2], 1);
- break;
- case INDEX_op_setcond_i64:
- tcg_out_setcond(s, args[3], args[0], args[1], args[2], 0);
- break;
- case INDEX_op_movcond_i32:
- tcg_out_movcond(s, args[5], args[0], args[1], args[2],
- args[3], const_args[3], args[4], const_args[4], 1);
- break;
- case INDEX_op_movcond_i64:
- tcg_out_movcond(s, args[5], args[0], args[1], args[2],
- args[3], const_args[3], args[4], const_args[4], 0);
- break;
-
- case INDEX_op_qemu_ld_i32:
- tcg_out_qemu_ld(s, args);
- break;
- case INDEX_op_qemu_ld_i64:
- tcg_out_qemu_ld(s, args);
- break;
- case INDEX_op_qemu_st_i32:
- tcg_out_qemu_st(s, args);
- break;
- case INDEX_op_qemu_st_i64:
- tcg_out_qemu_st(s, args);
- break;
-
- case INDEX_op_mb:
- tcg_out_bundle(s, mmI, OPC_MF_M24, INSN_NOP_M, INSN_NOP_I);
- break;
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
- case INDEX_op_mov_i64:
- case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
- case INDEX_op_movi_i64:
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
- default:
- tcg_abort();
- }
-}
-
-static const TCGTargetOpDef ia64_op_defs[] = {
- { INDEX_op_br, { } },
- { INDEX_op_exit_tb, { } },
- { INDEX_op_goto_tb, { } },
-
- { INDEX_op_ld8u_i32, { "r", "r" } },
- { INDEX_op_ld8s_i32, { "r", "r" } },
- { INDEX_op_ld16u_i32, { "r", "r" } },
- { INDEX_op_ld16s_i32, { "r", "r" } },
- { INDEX_op_ld_i32, { "r", "r" } },
- { INDEX_op_st8_i32, { "rZ", "r" } },
- { INDEX_op_st16_i32, { "rZ", "r" } },
- { INDEX_op_st_i32, { "rZ", "r" } },
-
- { INDEX_op_add_i32, { "r", "rZ", "rI" } },
- { INDEX_op_sub_i32, { "r", "rI", "rI" } },
-
- { INDEX_op_and_i32, { "r", "rI", "rI" } },
- { INDEX_op_andc_i32, { "r", "rI", "rI" } },
- { INDEX_op_eqv_i32, { "r", "rZ", "rZ" } },
- { INDEX_op_nand_i32, { "r", "rZ", "rZ" } },
- { INDEX_op_nor_i32, { "r", "rZ", "rZ" } },
- { INDEX_op_or_i32, { "r", "rI", "rI" } },
- { INDEX_op_orc_i32, { "r", "rZ", "rZ" } },
- { INDEX_op_xor_i32, { "r", "rI", "rI" } },
-
- { INDEX_op_mul_i32, { "r", "rZ", "rZ" } },
-
- { INDEX_op_sar_i32, { "r", "rZ", "ri" } },
- { INDEX_op_shl_i32, { "r", "rZ", "ri" } },
- { INDEX_op_shr_i32, { "r", "rZ", "ri" } },
- { INDEX_op_rotl_i32, { "r", "rZ", "ri" } },
- { INDEX_op_rotr_i32, { "r", "rZ", "ri" } },
-
- { INDEX_op_ext8s_i32, { "r", "rZ"} },
- { INDEX_op_ext8u_i32, { "r", "rZ"} },
- { INDEX_op_ext16s_i32, { "r", "rZ"} },
- { INDEX_op_ext16u_i32, { "r", "rZ"} },
-
- { INDEX_op_bswap16_i32, { "r", "rZ" } },
- { INDEX_op_bswap32_i32, { "r", "rZ" } },
-
- { INDEX_op_brcond_i32, { "rZ", "rZ" } },
- { INDEX_op_setcond_i32, { "r", "rZ", "rZ" } },
- { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rI", "rI" } },
-
- { INDEX_op_ld8u_i64, { "r", "r" } },
- { INDEX_op_ld8s_i64, { "r", "r" } },
- { INDEX_op_ld16u_i64, { "r", "r" } },
- { INDEX_op_ld16s_i64, { "r", "r" } },
- { INDEX_op_ld32u_i64, { "r", "r" } },
- { INDEX_op_ld32s_i64, { "r", "r" } },
- { INDEX_op_ld_i64, { "r", "r" } },
- { INDEX_op_st8_i64, { "rZ", "r" } },
- { INDEX_op_st16_i64, { "rZ", "r" } },
- { INDEX_op_st32_i64, { "rZ", "r" } },
- { INDEX_op_st_i64, { "rZ", "r" } },
-
- { INDEX_op_add_i64, { "r", "rZ", "rI" } },
- { INDEX_op_sub_i64, { "r", "rI", "rI" } },
-
- { INDEX_op_and_i64, { "r", "rI", "rI" } },
- { INDEX_op_andc_i64, { "r", "rI", "rI" } },
- { INDEX_op_eqv_i64, { "r", "rZ", "rZ" } },
- { INDEX_op_nand_i64, { "r", "rZ", "rZ" } },
- { INDEX_op_nor_i64, { "r", "rZ", "rZ" } },
- { INDEX_op_or_i64, { "r", "rI", "rI" } },
- { INDEX_op_orc_i64, { "r", "rZ", "rZ" } },
- { INDEX_op_xor_i64, { "r", "rI", "rI" } },
-
- { INDEX_op_mul_i64, { "r", "rZ", "rZ" } },
-
- { INDEX_op_sar_i64, { "r", "rZ", "ri" } },
- { INDEX_op_shl_i64, { "r", "rZ", "ri" } },
- { INDEX_op_shr_i64, { "r", "rZ", "ri" } },
- { INDEX_op_rotl_i64, { "r", "rZ", "ri" } },
- { INDEX_op_rotr_i64, { "r", "rZ", "ri" } },
-
- { INDEX_op_ext8s_i64, { "r", "rZ"} },
- { INDEX_op_ext8u_i64, { "r", "rZ"} },
- { INDEX_op_ext16s_i64, { "r", "rZ"} },
- { INDEX_op_ext16u_i64, { "r", "rZ"} },
- { INDEX_op_ext32s_i64, { "r", "rZ"} },
- { INDEX_op_ext32u_i64, { "r", "rZ"} },
- { INDEX_op_ext_i32_i64, { "r", "rZ" } },
- { INDEX_op_extu_i32_i64, { "r", "rZ" } },
-
- { INDEX_op_bswap16_i64, { "r", "rZ" } },
- { INDEX_op_bswap32_i64, { "r", "rZ" } },
- { INDEX_op_bswap64_i64, { "r", "rZ" } },
-
- { INDEX_op_brcond_i64, { "rZ", "rZ" } },
- { INDEX_op_setcond_i64, { "r", "rZ", "rZ" } },
- { INDEX_op_movcond_i64, { "r", "rZ", "rZ", "rI", "rI" } },
-
- { INDEX_op_deposit_i32, { "r", "rZ", "ri" } },
- { INDEX_op_deposit_i64, { "r", "rZ", "ri" } },
-
- { INDEX_op_qemu_ld_i32, { "r", "r" } },
- { INDEX_op_qemu_ld_i64, { "r", "r" } },
- { INDEX_op_qemu_st_i32, { "SZ", "r" } },
- { INDEX_op_qemu_st_i64, { "SZ", "r" } },
-
- { INDEX_op_mb, { } },
- { -1 },
-};
-
-static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
-{
- int i, n = ARRAY_SIZE(ia64_op_defs);
-
- for (i = 0; i < n; ++i) {
- if (ia64_op_defs[i].op == op) {
- return &ia64_op_defs[i];
- }
- }
- return NULL;
-}
-
-/* Generate global QEMU prologue and epilogue code */
-static void tcg_target_qemu_prologue(TCGContext *s)
-{
- int frame_size;
-
- /* reserve some stack space */
- frame_size = TCG_STATIC_CALL_ARGS_SIZE +
- CPU_TEMP_BUF_NLONGS * sizeof(long);
- frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
- ~(TCG_TARGET_STACK_ALIGN - 1);
- tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
- CPU_TEMP_BUF_NLONGS * sizeof(long));
-
- /* First emit adhoc function descriptor */
- *s->code_ptr = (tcg_insn_unit){
- (uint64_t)(s->code_ptr + 1), /* entry point */
- 0 /* skip gp */
- };
- s->code_ptr++;
-
- /* prologue */
- tcg_out_bundle(s, miI,
- tcg_opc_m34(TCG_REG_P0, OPC_ALLOC_M34,
- TCG_REG_R34, 32, 24, 0),
- INSN_NOP_I,
- tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21,
- TCG_REG_B6, TCG_REG_R33, 0));
-
- /* ??? If guest_base < 0x200000, we could load the register via
- an ADDL in the M slot of the next bundle. */
- if (guest_base != 0) {
- tcg_out_bundle(s, mlx,
- INSN_NOP_M,
- tcg_opc_l2(guest_base),
- tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2,
- TCG_GUEST_BASE_REG, guest_base));
- tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
- }
-
- tcg_out_bundle(s, miB,
- tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4,
- TCG_REG_R12, -frame_size, TCG_REG_R12),
- tcg_opc_i22(TCG_REG_P0, OPC_MOV_I22,
- TCG_REG_R33, TCG_REG_B0),
- tcg_opc_b4 (TCG_REG_P0, OPC_BR_SPTK_MANY_B4, TCG_REG_B6));
-
- /* epilogue */
- tb_ret_addr = s->code_ptr;
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
- tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21,
- TCG_REG_B0, TCG_REG_R33, 0),
- tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4,
- TCG_REG_R12, frame_size, TCG_REG_R12));
- tcg_out_bundle(s, miB,
- INSN_NOP_M,
- tcg_opc_i26(TCG_REG_P0, OPC_MOV_I_I26,
- TCG_REG_PFS, TCG_REG_R34),
- tcg_opc_b4 (TCG_REG_P0, OPC_BR_RET_SPTK_MANY_B4,
- TCG_REG_B0));
-}
-
-static void tcg_target_init(TCGContext *s)
-{
- tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I32],
- 0xffffffffffffffffull);
- tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I64],
- 0xffffffffffffffffull);
-
- tcg_regset_clear(tcg_target_call_clobber_regs);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R15);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R16);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R17);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R18);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R19);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R20);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R21);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R22);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R23);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R24);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R25);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R26);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R27);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R28);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R29);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R30);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R31);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R56);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R57);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R58);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R59);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R60);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R61);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R62);
- tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R63);
-
- tcg_regset_clear(s->reserved_regs);
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* zero register */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* global pointer */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* internal use */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* internal use */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R12); /* stack pointer */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R33); /* return address */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R34); /* PFS */
-
- /* The following 4 are not in use, are call-saved, but *not* saved
- by the prologue. Therefore we cannot use them without modifying
- the prologue. There doesn't seem to be any good reason to use
- these as opposed to the windowed registers. */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R4);
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R5);
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R6);
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R7);
-}
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
index d75cb63ed3..e9558d15bc 100644
--- a/tcg/mips/tcg-target.h
+++ b/tcg/mips/tcg-target.h
@@ -206,4 +206,6 @@ static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
cacheflush ((void *)start, stop-start, ICACHE);
}
+#define TCG_TARGET_DEFAULT_MO (0)
+
#endif
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
index 5f4a40a5b4..5a092b038a 100644
--- a/tcg/ppc/tcg-target.h
+++ b/tcg/ppc/tcg-target.h
@@ -125,4 +125,6 @@ extern bool have_isa_3_00;
void flush_icache_range(uintptr_t start, uintptr_t stop);
+#define TCG_TARGET_DEFAULT_MO (0)
+
#endif
diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h
index 957f0c0afe..bedda5edf6 100644
--- a/tcg/s390/tcg-target.h
+++ b/tcg/s390/tcg-target.h
@@ -58,6 +58,8 @@ typedef enum TCGReg {
#define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
#define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
#define FACILITY_FAST_BCR_SER FACILITY_LOAD_ON_COND
+#define FACILITY_DISTINCT_OPS FACILITY_LOAD_ON_COND
+#define FACILITY_LOAD_ON_COND2 (1ULL << (63 - 53))
extern uint64_t s390_facilities;
@@ -133,6 +135,8 @@ extern uint64_t s390_facilities;
#define TCG_TARGET_EXTEND_ARGS 1
+#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
+
enum {
TCG_AREG0 = TCG_REG_R10,
};
diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c
index 5d7083e90c..38b9e791ee 100644
--- a/tcg/s390/tcg-target.inc.c
+++ b/tcg/s390/tcg-target.inc.c
@@ -38,12 +38,13 @@
a 32-bit displacement here Just In Case. */
#define USE_LONG_BRANCHES 0
-#define TCG_CT_CONST_MULI 0x100
-#define TCG_CT_CONST_ORI 0x200
-#define TCG_CT_CONST_XORI 0x400
-#define TCG_CT_CONST_CMPI 0x800
-#define TCG_CT_CONST_ADLI 0x1000
-#define TCG_CT_CONST_ZERO 0x2000
+#define TCG_CT_CONST_S16 0x100
+#define TCG_CT_CONST_S32 0x200
+#define TCG_CT_CONST_NN16 0x400
+#define TCG_CT_CONST_NN32 0x800
+#define TCG_CT_CONST_U31 0x1000
+#define TCG_CT_CONST_S33 0x2000
+#define TCG_CT_CONST_ZERO 0x4000
/* Several places within the instruction set 0 means "no register"
rather than TCG_REG_R0. */
@@ -121,6 +122,7 @@ typedef enum S390Opcode {
RIE_CLGIJ = 0xec7d,
RIE_CLRJ = 0xec77,
RIE_CRJ = 0xec76,
+ RIE_LOCGHI = 0xec46,
RIE_RISBG = 0xec55,
RRE_AGR = 0xb908,
@@ -158,6 +160,16 @@ typedef enum S390Opcode {
RRF_LOCR = 0xb9f2,
RRF_LOCGR = 0xb9e2,
+ RRF_NRK = 0xb9f4,
+ RRF_NGRK = 0xb9e4,
+ RRF_ORK = 0xb9f6,
+ RRF_OGRK = 0xb9e6,
+ RRF_SRK = 0xb9f9,
+ RRF_SGRK = 0xb9e9,
+ RRF_SLRK = 0xb9fb,
+ RRF_SLGRK = 0xb9eb,
+ RRF_XRK = 0xb9f7,
+ RRF_XGRK = 0xb9e7,
RR_AR = 0x1a,
RR_ALR = 0x1e,
@@ -178,8 +190,11 @@ typedef enum S390Opcode {
RSY_RLL = 0xeb1d,
RSY_RLLG = 0xeb1c,
RSY_SLLG = 0xeb0d,
+ RSY_SLLK = 0xebdf,
RSY_SRAG = 0xeb0a,
+ RSY_SRAK = 0xebdc,
RSY_SRLG = 0xeb0c,
+ RSY_SRLK = 0xebde,
RS_SLL = 0x89,
RS_SRA = 0x8a,
@@ -386,19 +401,33 @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
break;
case 'A':
- ct->ct |= TCG_CT_CONST_ADLI;
+ ct->ct |= TCG_CT_CONST_S33;
break;
- case 'K':
- ct->ct |= TCG_CT_CONST_MULI;
+ case 'I':
+ ct->ct |= TCG_CT_CONST_S16;
break;
- case 'O':
- ct->ct |= TCG_CT_CONST_ORI;
+ case 'J':
+ ct->ct |= TCG_CT_CONST_S32;
break;
- case 'X':
- ct->ct |= TCG_CT_CONST_XORI;
+ case 'N':
+ ct->ct |= TCG_CT_CONST_NN16;
+ break;
+ case 'M':
+ ct->ct |= TCG_CT_CONST_NN32;
break;
case 'C':
- ct->ct |= TCG_CT_CONST_CMPI;
+ /* ??? We have no insight here into whether the comparison is
+ signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
+ signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
+ a 32-bit unsigned immediate. If we were to use the (semi)
+ obvious "val == (int32_t)val" we would be enabling unsigned
+ comparisons vs very large numbers. The only solution is to
+ take the intersection of the ranges. */
+ /* ??? Another possible solution is to simply lie and allow all
+ constants here and force the out-of-range values into a temp
+ register in tgen_cmp when we have knowledge of the actual
+ comparison code in use. */
+ ct->ct |= TCG_CT_CONST_U31;
break;
case 'Z':
ct->ct |= TCG_CT_CONST_ZERO;
@@ -409,103 +438,6 @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
return ct_str;
}
-/* Immediates to be used with logical OR. This is an optimization only,
- since a full 64-bit immediate OR can always be performed with 4 sequential
- OI[LH][LH] instructions. What we're looking for is immediates that we
- can load efficiently, and the immediate load plus the reg-reg OR is
- smaller than the sequential OI's. */
-
-static int tcg_match_ori(TCGType type, tcg_target_long val)
-{
- if (s390_facilities & FACILITY_EXT_IMM) {
- if (type == TCG_TYPE_I32) {
- /* All 32-bit ORs can be performed with 1 48-bit insn. */
- return 1;
- }
- }
-
- /* Look for negative values. These are best to load with LGHI. */
- if (val < 0) {
- if (val == (int16_t)val) {
- return 0;
- }
- if (s390_facilities & FACILITY_EXT_IMM) {
- if (val == (int32_t)val) {
- return 0;
- }
- }
- }
-
- return 1;
-}
-
-/* Immediates to be used with logical XOR. This is almost, but not quite,
- only an optimization. XOR with immediate is only supported with the
- extended-immediate facility. That said, there are a few patterns for
- which it is better to load the value into a register first. */
-
-static int tcg_match_xori(TCGType type, tcg_target_long val)
-{
- if ((s390_facilities & FACILITY_EXT_IMM) == 0) {
- return 0;
- }
-
- if (type == TCG_TYPE_I32) {
- /* All 32-bit XORs can be performed with 1 48-bit insn. */
- return 1;
- }
-
- /* Look for negative values. These are best to load with LGHI. */
- if (val < 0 && val == (int32_t)val) {
- return 0;
- }
-
- return 1;
-}
-
-/* Imediates to be used with comparisons. */
-
-static int tcg_match_cmpi(TCGType type, tcg_target_long val)
-{
- if (s390_facilities & FACILITY_EXT_IMM) {
- /* The COMPARE IMMEDIATE instruction is available. */
- if (type == TCG_TYPE_I32) {
- /* We have a 32-bit immediate and can compare against anything. */
- return 1;
- } else {
- /* ??? We have no insight here into whether the comparison is
- signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
- signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
- a 32-bit unsigned immediate. If we were to use the (semi)
- obvious "val == (int32_t)val" we would be enabling unsigned
- comparisons vs very large numbers. The only solution is to
- take the intersection of the ranges. */
- /* ??? Another possible solution is to simply lie and allow all
- constants here and force the out-of-range values into a temp
- register in tgen_cmp when we have knowledge of the actual
- comparison code in use. */
- return val >= 0 && val <= 0x7fffffff;
- }
- } else {
- /* Only the LOAD AND TEST instruction is available. */
- return val == 0;
- }
-}
-
-/* Immediates to be used with add2/sub2. */
-
-static int tcg_match_add2i(TCGType type, tcg_target_long val)
-{
- if (s390_facilities & FACILITY_EXT_IMM) {
- if (type == TCG_TYPE_I32) {
- return 1;
- } else if (val >= -0xffffffffll && val <= 0xffffffffll) {
- return 1;
- }
- }
- return 0;
-}
-
/* Test if a constant matches the constraint. */
static int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct)
@@ -521,24 +453,18 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type,
}
/* The following are mutually exclusive. */
- if (ct & TCG_CT_CONST_MULI) {
- /* Immediates that may be used with multiply. If we have the
- general-instruction-extensions, then we have MULTIPLY SINGLE
- IMMEDIATE with a signed 32-bit, otherwise we have only
- MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
- if (s390_facilities & FACILITY_GEN_INST_EXT) {
- return val == (int32_t)val;
- } else {
- return val == (int16_t)val;
- }
- } else if (ct & TCG_CT_CONST_ADLI) {
- return tcg_match_add2i(type, val);
- } else if (ct & TCG_CT_CONST_ORI) {
- return tcg_match_ori(type, val);
- } else if (ct & TCG_CT_CONST_XORI) {
- return tcg_match_xori(type, val);
- } else if (ct & TCG_CT_CONST_CMPI) {
- return tcg_match_cmpi(type, val);
+ if (ct & TCG_CT_CONST_S16) {
+ return val == (int16_t)val;
+ } else if (ct & TCG_CT_CONST_S32) {
+ return val == (int32_t)val;
+ } else if (ct & TCG_CT_CONST_S33) {
+ return val >= -0xffffffffll && val <= 0xffffffffll;
+ } else if (ct & TCG_CT_CONST_NN16) {
+ return !(val < 0 && val == (int16_t)val);
+ } else if (ct & TCG_CT_CONST_NN32) {
+ return !(val < 0 && val == (int32_t)val);
+ } else if (ct & TCG_CT_CONST_U31) {
+ return val >= 0 && val <= 0x7fffffff;
} else if (ct & TCG_CT_CONST_ZERO) {
return val == 0;
}
@@ -570,6 +496,13 @@ static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
}
+static void tcg_out_insn_RIE(TCGContext *s, S390Opcode op, TCGReg r1,
+ int i2, int m3)
+{
+ tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3);
+ tcg_out32(s, (i2 << 16) | (op & 0xff));
+}
+
static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
{
tcg_out16(s, op | (r1 << 4));
@@ -1138,11 +1071,33 @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
{
int cc;
+ bool have_loc;
+
+ /* With LOC2, we can always emit the minimum 3 insns. */
+ if (s390_facilities & FACILITY_LOAD_ON_COND2) {
+ /* Emit: d = 0, d = (cc ? 1 : d). */
+ cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
+ tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
+ tcg_out_insn(s, RIE, LOCGHI, dest, 1, cc);
+ return;
+ }
+ have_loc = (s390_facilities & FACILITY_LOAD_ON_COND) != 0;
+
+ /* For HAVE_LOC, only the paths through GTU/GT/LEU/LE are smaller. */
+ restart:
switch (cond) {
+ case TCG_COND_NE:
+ /* X != 0 is X > 0. */
+ if (c2const && c2 == 0) {
+ cond = TCG_COND_GTU;
+ } else {
+ break;
+ }
+ /* fallthru */
+
case TCG_COND_GTU:
case TCG_COND_GT:
- do_greater:
/* The result of a compare has CC=2 for GT and CC=3 unused.
ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */
tgen_cmp(s, type, cond, c1, c2, c2const, true);
@@ -1150,34 +1105,34 @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
tcg_out_insn(s, RRE, ALCGR, dest, dest);
return;
- case TCG_COND_GEU:
- do_geu:
- /* We need "real" carry semantics, so use SUBTRACT LOGICAL
- instead of COMPARE LOGICAL. This needs an extra move. */
- tcg_out_mov(s, type, TCG_TMP0, c1);
- if (c2const) {
- tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
- if (type == TCG_TYPE_I32) {
- tcg_out_insn(s, RIL, SLFI, TCG_TMP0, c2);
- } else {
- tcg_out_insn(s, RIL, SLGFI, TCG_TMP0, c2);
- }
+ case TCG_COND_EQ:
+ /* X == 0 is X <= 0. */
+ if (c2const && c2 == 0) {
+ cond = TCG_COND_LEU;
} else {
- if (type == TCG_TYPE_I32) {
- tcg_out_insn(s, RR, SLR, TCG_TMP0, c2);
- } else {
- tcg_out_insn(s, RRE, SLGR, TCG_TMP0, c2);
- }
- tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
+ break;
}
- tcg_out_insn(s, RRE, ALCGR, dest, dest);
- return;
+ /* fallthru */
case TCG_COND_LEU:
+ case TCG_COND_LE:
+ /* As above, but we're looking for borrow, or !carry.
+ The second insn computes d - d - borrow, or -1 for true
+ and 0 for false. So we must mask to 1 bit afterward. */
+ tgen_cmp(s, type, cond, c1, c2, c2const, true);
+ tcg_out_insn(s, RRE, SLBGR, dest, dest);
+ tgen_andi(s, type, dest, 1);
+ return;
+
+ case TCG_COND_GEU:
case TCG_COND_LTU:
case TCG_COND_LT:
- /* Swap operands so that we can use GEU/GTU/GT. */
+ case TCG_COND_GE:
+ /* Swap operands so that we can use LEU/GTU/GT/LE. */
if (c2const) {
+ if (have_loc) {
+ break;
+ }
tcg_out_movi(s, type, TCG_TMP0, c2);
c2 = c1;
c2const = 0;
@@ -1187,37 +1142,15 @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
c1 = c2;
c2 = t;
}
- if (cond == TCG_COND_LEU) {
- goto do_geu;
- }
cond = tcg_swap_cond(cond);
- goto do_greater;
-
- case TCG_COND_NE:
- /* X != 0 is X > 0. */
- if (c2const && c2 == 0) {
- cond = TCG_COND_GTU;
- goto do_greater;
- }
- break;
-
- case TCG_COND_EQ:
- /* X == 0 is X <= 0 is 0 >= X. */
- if (c2const && c2 == 0) {
- tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0);
- c2 = c1;
- c2const = 0;
- c1 = TCG_TMP0;
- goto do_geu;
- }
- break;
+ goto restart;
default:
- break;
+ g_assert_not_reached();
}
cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
- if (s390_facilities & FACILITY_LOAD_ON_COND) {
+ if (have_loc) {
/* Emit: d = 0, t = 1, d = (cc ? t : d). */
tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
@@ -1231,19 +1164,24 @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
}
static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
- TCGReg c1, TCGArg c2, int c2const, TCGReg r3)
+ TCGReg c1, TCGArg c2, int c2const,
+ TCGArg v3, int v3const)
{
int cc;
if (s390_facilities & FACILITY_LOAD_ON_COND) {
cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
- tcg_out_insn(s, RRF, LOCGR, dest, r3, cc);
+ if (v3const) {
+ tcg_out_insn(s, RIE, LOCGHI, dest, v3, cc);
+ } else {
+ tcg_out_insn(s, RRF, LOCGR, dest, v3, cc);
+ }
} else {
c = tcg_invert_cond(c);
cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
/* Emit: if (cc) goto over; dest = r3; over: */
tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
- tcg_out_insn(s, RRE, LGR, dest, r3);
+ tcg_out_insn(s, RRE, LGR, dest, v3);
}
}
@@ -1736,7 +1674,7 @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg *args, const int *const_args)
{
- S390Opcode op;
+ S390Opcode op, op2;
TCGArg a0, a1, a2;
switch (opc) {
@@ -1841,29 +1779,44 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
if (const_args[2]) {
a2 = -a2;
goto do_addi_32;
+ } else if (a0 == a1) {
+ tcg_out_insn(s, RR, SR, a0, a2);
+ } else {
+ tcg_out_insn(s, RRF, SRK, a0, a1, a2);
}
- tcg_out_insn(s, RR, SR, args[0], args[2]);
break;
case INDEX_op_and_i32:
+ a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
if (const_args[2]) {
- tgen_andi(s, TCG_TYPE_I32, args[0], args[2]);
+ tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
+ tgen_andi(s, TCG_TYPE_I32, a0, a2);
+ } else if (a0 == a1) {
+ tcg_out_insn(s, RR, NR, a0, a2);
} else {
- tcg_out_insn(s, RR, NR, args[0], args[2]);
+ tcg_out_insn(s, RRF, NRK, a0, a1, a2);
}
break;
case INDEX_op_or_i32:
+ a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
if (const_args[2]) {
- tgen64_ori(s, args[0], args[2] & 0xffffffff);
+ tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
+ tgen64_ori(s, a0, a2);
+ } else if (a0 == a1) {
+ tcg_out_insn(s, RR, OR, a0, a2);
} else {
- tcg_out_insn(s, RR, OR, args[0], args[2]);
+ tcg_out_insn(s, RRF, ORK, a0, a1, a2);
}
break;
case INDEX_op_xor_i32:
+ a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
if (const_args[2]) {
- tgen64_xori(s, args[0], args[2] & 0xffffffff);
- } else {
+ tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
+ tgen64_xori(s, a0, a2);
+ } else if (a0 == a1) {
tcg_out_insn(s, RR, XR, args[0], args[2]);
+ } else {
+ tcg_out_insn(s, RRF, XRK, a0, a1, a2);
}
break;
@@ -1892,18 +1845,31 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_shl_i32:
op = RS_SLL;
+ op2 = RSY_SLLK;
do_shift32:
- if (const_args[2]) {
- tcg_out_sh32(s, op, args[0], TCG_REG_NONE, args[2]);
+ a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
+ if (a0 == a1) {
+ if (const_args[2]) {
+ tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2);
+ } else {
+ tcg_out_sh32(s, op, a0, a2, 0);
+ }
} else {
- tcg_out_sh32(s, op, args[0], args[2], 0);
+ /* Using tcg_out_sh64 here for the format; it is a 32-bit shift. */
+ if (const_args[2]) {
+ tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2);
+ } else {
+ tcg_out_sh64(s, op2, a0, a1, a2, 0);
+ }
}
break;
case INDEX_op_shr_i32:
op = RS_SRL;
+ op2 = RSY_SRLK;
goto do_shift32;
case INDEX_op_sar_i32:
op = RS_SRA;
+ op2 = RSY_SRAK;
goto do_shift32;
case INDEX_op_rotl_i32:
@@ -1978,7 +1944,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_movcond_i32:
tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
- args[2], const_args[2], args[3]);
+ args[2], const_args[2], args[3], const_args[3]);
break;
case INDEX_op_qemu_ld_i32:
@@ -2045,30 +2011,44 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
if (const_args[2]) {
a2 = -a2;
goto do_addi_64;
+ } else if (a0 == a1) {
+ tcg_out_insn(s, RRE, SGR, a0, a2);
} else {
- tcg_out_insn(s, RRE, SGR, args[0], args[2]);
+ tcg_out_insn(s, RRF, SGRK, a0, a1, a2);
}
break;
case INDEX_op_and_i64:
+ a0 = args[0], a1 = args[1], a2 = args[2];
if (const_args[2]) {
+ tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
- } else {
+ } else if (a0 == a1) {
tcg_out_insn(s, RRE, NGR, args[0], args[2]);
+ } else {
+ tcg_out_insn(s, RRF, NGRK, a0, a1, a2);
}
break;
case INDEX_op_or_i64:
+ a0 = args[0], a1 = args[1], a2 = args[2];
if (const_args[2]) {
- tgen64_ori(s, args[0], args[2]);
+ tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
+ tgen64_ori(s, a0, a2);
+ } else if (a0 == a1) {
+ tcg_out_insn(s, RRE, OGR, a0, a2);
} else {
- tcg_out_insn(s, RRE, OGR, args[0], args[2]);
+ tcg_out_insn(s, RRF, OGRK, a0, a1, a2);
}
break;
case INDEX_op_xor_i64:
+ a0 = args[0], a1 = args[1], a2 = args[2];
if (const_args[2]) {
- tgen64_xori(s, args[0], args[2]);
+ tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
+ tgen64_xori(s, a0, a2);
+ } else if (a0 == a1) {
+ tcg_out_insn(s, RRE, XGR, a0, a2);
} else {
- tcg_out_insn(s, RRE, XGR, args[0], args[2]);
+ tcg_out_insn(s, RRF, XGRK, a0, a1, a2);
}
break;
@@ -2197,7 +2177,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_movcond_i64:
tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
- args[2], const_args[2], args[3]);
+ args[2], const_args[2], args[3], const_args[3]);
break;
OP_32_64(deposit):
@@ -2246,134 +2226,210 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
}
-static const TCGTargetOpDef s390_op_defs[] = {
- { INDEX_op_exit_tb, { } },
- { INDEX_op_goto_tb, { } },
- { INDEX_op_br, { } },
- { INDEX_op_goto_ptr, { "r" } },
-
- { INDEX_op_ld8u_i32, { "r", "r" } },
- { INDEX_op_ld8s_i32, { "r", "r" } },
- { INDEX_op_ld16u_i32, { "r", "r" } },
- { INDEX_op_ld16s_i32, { "r", "r" } },
- { INDEX_op_ld_i32, { "r", "r" } },
- { INDEX_op_st8_i32, { "r", "r" } },
- { INDEX_op_st16_i32, { "r", "r" } },
- { INDEX_op_st_i32, { "r", "r" } },
-
- { INDEX_op_add_i32, { "r", "r", "ri" } },
- { INDEX_op_sub_i32, { "r", "0", "ri" } },
- { INDEX_op_mul_i32, { "r", "0", "rK" } },
-
- { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } },
- { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } },
-
- { INDEX_op_and_i32, { "r", "0", "ri" } },
- { INDEX_op_or_i32, { "r", "0", "rO" } },
- { INDEX_op_xor_i32, { "r", "0", "rX" } },
-
- { INDEX_op_neg_i32, { "r", "r" } },
-
- { INDEX_op_shl_i32, { "r", "0", "ri" } },
- { INDEX_op_shr_i32, { "r", "0", "ri" } },
- { INDEX_op_sar_i32, { "r", "0", "ri" } },
-
- { INDEX_op_rotl_i32, { "r", "r", "ri" } },
- { INDEX_op_rotr_i32, { "r", "r", "ri" } },
-
- { INDEX_op_ext8s_i32, { "r", "r" } },
- { INDEX_op_ext8u_i32, { "r", "r" } },
- { INDEX_op_ext16s_i32, { "r", "r" } },
- { INDEX_op_ext16u_i32, { "r", "r" } },
-
- { INDEX_op_bswap16_i32, { "r", "r" } },
- { INDEX_op_bswap32_i32, { "r", "r" } },
-
- { INDEX_op_add2_i32, { "r", "r", "0", "1", "rA", "r" } },
- { INDEX_op_sub2_i32, { "r", "r", "0", "1", "rA", "r" } },
-
- { INDEX_op_brcond_i32, { "r", "rC" } },
- { INDEX_op_setcond_i32, { "r", "r", "rC" } },
- { INDEX_op_movcond_i32, { "r", "r", "rC", "r", "0" } },
- { INDEX_op_deposit_i32, { "r", "rZ", "r" } },
- { INDEX_op_extract_i32, { "r", "r" } },
-
- { INDEX_op_qemu_ld_i32, { "r", "L" } },
- { INDEX_op_qemu_ld_i64, { "r", "L" } },
- { INDEX_op_qemu_st_i32, { "L", "L" } },
- { INDEX_op_qemu_st_i64, { "L", "L" } },
-
- { INDEX_op_ld8u_i64, { "r", "r" } },
- { INDEX_op_ld8s_i64, { "r", "r" } },
- { INDEX_op_ld16u_i64, { "r", "r" } },
- { INDEX_op_ld16s_i64, { "r", "r" } },
- { INDEX_op_ld32u_i64, { "r", "r" } },
- { INDEX_op_ld32s_i64, { "r", "r" } },
- { INDEX_op_ld_i64, { "r", "r" } },
-
- { INDEX_op_st8_i64, { "r", "r" } },
- { INDEX_op_st16_i64, { "r", "r" } },
- { INDEX_op_st32_i64, { "r", "r" } },
- { INDEX_op_st_i64, { "r", "r" } },
-
- { INDEX_op_add_i64, { "r", "r", "ri" } },
- { INDEX_op_sub_i64, { "r", "0", "ri" } },
- { INDEX_op_mul_i64, { "r", "0", "rK" } },
-
- { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } },
- { INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } },
- { INDEX_op_mulu2_i64, { "b", "a", "0", "r" } },
-
- { INDEX_op_and_i64, { "r", "0", "ri" } },
- { INDEX_op_or_i64, { "r", "0", "rO" } },
- { INDEX_op_xor_i64, { "r", "0", "rX" } },
-
- { INDEX_op_neg_i64, { "r", "r" } },
-
- { INDEX_op_shl_i64, { "r", "r", "ri" } },
- { INDEX_op_shr_i64, { "r", "r", "ri" } },
- { INDEX_op_sar_i64, { "r", "r", "ri" } },
-
- { INDEX_op_rotl_i64, { "r", "r", "ri" } },
- { INDEX_op_rotr_i64, { "r", "r", "ri" } },
-
- { INDEX_op_ext8s_i64, { "r", "r" } },
- { INDEX_op_ext8u_i64, { "r", "r" } },
- { INDEX_op_ext16s_i64, { "r", "r" } },
- { INDEX_op_ext16u_i64, { "r", "r" } },
- { INDEX_op_ext32s_i64, { "r", "r" } },
- { INDEX_op_ext32u_i64, { "r", "r" } },
-
- { INDEX_op_ext_i32_i64, { "r", "r" } },
- { INDEX_op_extu_i32_i64, { "r", "r" } },
-
- { INDEX_op_bswap16_i64, { "r", "r" } },
- { INDEX_op_bswap32_i64, { "r", "r" } },
- { INDEX_op_bswap64_i64, { "r", "r" } },
-
- { INDEX_op_clz_i64, { "r", "r", "ri" } },
-
- { INDEX_op_add2_i64, { "r", "r", "0", "1", "rA", "r" } },
- { INDEX_op_sub2_i64, { "r", "r", "0", "1", "rA", "r" } },
-
- { INDEX_op_brcond_i64, { "r", "rC" } },
- { INDEX_op_setcond_i64, { "r", "r", "rC" } },
- { INDEX_op_movcond_i64, { "r", "r", "rC", "r", "0" } },
- { INDEX_op_deposit_i64, { "r", "0", "r" } },
- { INDEX_op_extract_i64, { "r", "r" } },
-
- { INDEX_op_mb, { } },
- { -1 },
-};
-
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
{
- int i, n = ARRAY_SIZE(s390_op_defs);
+ static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
+ static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
+ static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
+ static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } };
+ static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } };
+ static const TCGTargetOpDef r_rC = { .args_ct_str = { "r", "rC" } };
+ static const TCGTargetOpDef r_rZ = { .args_ct_str = { "r", "rZ" } };
+ static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
+ static const TCGTargetOpDef r_r_rM = { .args_ct_str = { "r", "r", "rM" } };
+ static const TCGTargetOpDef r_0_r = { .args_ct_str = { "r", "0", "r" } };
+ static const TCGTargetOpDef r_0_ri = { .args_ct_str = { "r", "0", "ri" } };
+ static const TCGTargetOpDef r_0_rI = { .args_ct_str = { "r", "0", "rI" } };
+ static const TCGTargetOpDef r_0_rJ = { .args_ct_str = { "r", "0", "rJ" } };
+ static const TCGTargetOpDef r_0_rN = { .args_ct_str = { "r", "0", "rN" } };
+ static const TCGTargetOpDef r_0_rM = { .args_ct_str = { "r", "0", "rM" } };
+ static const TCGTargetOpDef a2_r
+ = { .args_ct_str = { "r", "r", "0", "1", "r", "r" } };
+ static const TCGTargetOpDef a2_ri
+ = { .args_ct_str = { "r", "r", "0", "1", "ri", "r" } };
+ static const TCGTargetOpDef a2_rA
+ = { .args_ct_str = { "r", "r", "0", "1", "rA", "r" } };
+
+ switch (op) {
+ case INDEX_op_goto_ptr:
+ return &r;
+
+ case INDEX_op_ld8u_i32:
+ case INDEX_op_ld8u_i64:
+ case INDEX_op_ld8s_i32:
+ case INDEX_op_ld8s_i64:
+ case INDEX_op_ld16u_i32:
+ case INDEX_op_ld16u_i64:
+ case INDEX_op_ld16s_i32:
+ case INDEX_op_ld16s_i64:
+ case INDEX_op_ld_i32:
+ case INDEX_op_ld32u_i64:
+ case INDEX_op_ld32s_i64:
+ case INDEX_op_ld_i64:
+ case INDEX_op_st8_i32:
+ case INDEX_op_st8_i64:
+ case INDEX_op_st16_i32:
+ case INDEX_op_st16_i64:
+ case INDEX_op_st_i32:
+ case INDEX_op_st32_i64:
+ case INDEX_op_st_i64:
+ return &r_r;
+
+ case INDEX_op_add_i32:
+ case INDEX_op_add_i64:
+ return &r_r_ri;
+ case INDEX_op_sub_i32:
+ case INDEX_op_sub_i64:
+ return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri);
+
+ case INDEX_op_mul_i32:
+ /* If we have the general-instruction-extensions, then we have
+ MULTIPLY SINGLE IMMEDIATE with a signed 32-bit, otherwise we
+ have only MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
+ return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_ri : &r_0_rI);
+ case INDEX_op_mul_i64:
+ return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_rJ : &r_0_rI);
+
+ case INDEX_op_or_i32:
+ /* The use of [iNM] constraints are optimization only, since a full
+ 64-bit immediate OR can always be performed with 4 sequential
+ OI[LH][LH] instructions. By rejecting certain negative ranges,
+ the immediate load plus the reg-reg OR is smaller. */
+ return (s390_facilities & FACILITY_EXT_IMM
+ ? (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri)
+ : &r_0_rN);
+ case INDEX_op_or_i64:
+ return (s390_facilities & FACILITY_EXT_IMM
+ ? (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_rM : &r_0_rM)
+ : &r_0_rN);
+
+ case INDEX_op_xor_i32:
+ /* Without EXT_IMM, no immediates are supported. Otherwise,
+ rejecting certain negative ranges leads to smaller code. */
+ return (s390_facilities & FACILITY_EXT_IMM
+ ? (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri)
+ : &r_0_r);
+ case INDEX_op_xor_i64:
+ return (s390_facilities & FACILITY_EXT_IMM
+ ? (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_rM : &r_0_rM)
+ : &r_0_r);
+
+ case INDEX_op_and_i32:
+ case INDEX_op_and_i64:
+ return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri);
+
+ case INDEX_op_shl_i32:
+ case INDEX_op_shr_i32:
+ case INDEX_op_sar_i32:
+ return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri);
+
+ case INDEX_op_shl_i64:
+ case INDEX_op_shr_i64:
+ case INDEX_op_sar_i64:
+ return &r_r_ri;
+
+ case INDEX_op_rotl_i32:
+ case INDEX_op_rotl_i64:
+ case INDEX_op_rotr_i32:
+ case INDEX_op_rotr_i64:
+ return &r_r_ri;
+
+ case INDEX_op_brcond_i32:
+ /* Without EXT_IMM, only the LOAD AND TEST insn is available. */
+ return (s390_facilities & FACILITY_EXT_IMM ? &r_ri : &r_rZ);
+ case INDEX_op_brcond_i64:
+ return (s390_facilities & FACILITY_EXT_IMM ? &r_rC : &r_rZ);
+
+ case INDEX_op_bswap16_i32:
+ case INDEX_op_bswap16_i64:
+ case INDEX_op_bswap32_i32:
+ case INDEX_op_bswap32_i64:
+ case INDEX_op_bswap64_i64:
+ case INDEX_op_neg_i32:
+ case INDEX_op_neg_i64:
+ case INDEX_op_ext8s_i32:
+ case INDEX_op_ext8s_i64:
+ case INDEX_op_ext8u_i32:
+ case INDEX_op_ext8u_i64:
+ case INDEX_op_ext16s_i32:
+ case INDEX_op_ext16s_i64:
+ case INDEX_op_ext16u_i32:
+ case INDEX_op_ext16u_i64:
+ case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
+ case INDEX_op_ext_i32_i64:
+ case INDEX_op_extu_i32_i64:
+ case INDEX_op_extract_i32:
+ case INDEX_op_extract_i64:
+ return &r_r;
+
+ case INDEX_op_clz_i64:
+ return &r_r_ri;
- for (i = 0; i < n; ++i) {
- if (s390_op_defs[i].op == op) {
- return &s390_op_defs[i];
+ case INDEX_op_qemu_ld_i32:
+ case INDEX_op_qemu_ld_i64:
+ return &r_L;
+ case INDEX_op_qemu_st_i64:
+ case INDEX_op_qemu_st_i32:
+ return &L_L;
+
+ case INDEX_op_deposit_i32:
+ case INDEX_op_deposit_i64:
+ {
+ static const TCGTargetOpDef dep
+ = { .args_ct_str = { "r", "rZ", "r" } };
+ return &dep;
+ }
+ case INDEX_op_setcond_i32:
+ case INDEX_op_setcond_i64:
+ {
+ /* Without EXT_IMM, only the LOAD AND TEST insn is available. */
+ static const TCGTargetOpDef setc_z
+ = { .args_ct_str = { "r", "r", "rZ" } };
+ static const TCGTargetOpDef setc_c
+ = { .args_ct_str = { "r", "r", "rC" } };
+ return (s390_facilities & FACILITY_EXT_IMM ? &setc_c : &setc_z);
+ }
+ case INDEX_op_movcond_i32:
+ case INDEX_op_movcond_i64:
+ {
+ /* Without EXT_IMM, only the LOAD AND TEST insn is available. */
+ static const TCGTargetOpDef movc_z
+ = { .args_ct_str = { "r", "r", "rZ", "r", "0" } };
+ static const TCGTargetOpDef movc_c
+ = { .args_ct_str = { "r", "r", "rC", "r", "0" } };
+ static const TCGTargetOpDef movc_l
+ = { .args_ct_str = { "r", "r", "rC", "rI", "0" } };
+ return (s390_facilities & FACILITY_EXT_IMM
+ ? (s390_facilities & FACILITY_LOAD_ON_COND2
+ ? &movc_l : &movc_c)
+ : &movc_z);
+ }
+ case INDEX_op_div2_i32:
+ case INDEX_op_div2_i64:
+ case INDEX_op_divu2_i32:
+ case INDEX_op_divu2_i64:
+ {
+ static const TCGTargetOpDef div2
+ = { .args_ct_str = { "b", "a", "0", "1", "r" } };
+ return &div2;
}
+ case INDEX_op_mulu2_i64:
+ {
+ static const TCGTargetOpDef mul2
+ = { .args_ct_str = { "b", "a", "0", "r" } };
+ return &mul2;
+ }
+
+ case INDEX_op_add2_i32:
+ case INDEX_op_sub2_i32:
+ return (s390_facilities & FACILITY_EXT_IMM ? &a2_ri : &a2_r);
+ case INDEX_op_add2_i64:
+ case INDEX_op_sub2_i64:
+ return (s390_facilities & FACILITY_EXT_IMM ? &a2_rA : &a2_r);
+
+ default:
+ break;
}
return NULL;
}
diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h
index 854a0afd70..4515c9ab48 100644
--- a/tcg/sparc/tcg-target.h
+++ b/tcg/sparc/tcg-target.h
@@ -162,6 +162,8 @@ extern bool use_vis3_instructions;
#define TCG_AREG0 TCG_REG_I0
+#define TCG_TARGET_DEFAULT_MO (0)
+
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
uintptr_t p;
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index 87f673ef49..688d91755b 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -28,6 +28,7 @@
#include "exec/exec-all.h"
#include "tcg.h"
#include "tcg-op.h"
+#include "tcg-mo.h"
#include "trace-tcg.h"
#include "trace/mem.h"
@@ -2662,8 +2663,20 @@ static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr,
#endif
}
+static void tcg_gen_req_mo(TCGBar type)
+{
+#ifdef TCG_GUEST_DEFAULT_MO
+ type &= TCG_GUEST_DEFAULT_MO;
+#endif
+ type &= ~TCG_TARGET_DEFAULT_MO;
+ if (type) {
+ tcg_gen_mb(type | TCG_BAR_SC);
+ }
+}
+
void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
+ tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
memop = tcg_canonicalize_memop(memop, 0, 0);
trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
addr, trace_mem_get_info(memop, 0));
@@ -2672,6 +2685,7 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
+ tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 0, 1);
trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
addr, trace_mem_get_info(memop, 1));
@@ -2680,6 +2694,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
+ tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
if (memop & MO_SIGN) {
@@ -2698,6 +2713,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
+ tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop);
return;