diff options
-rw-r--r-- | Makefile.target | 4 | ||||
-rwxr-xr-x | configure | 34 | ||||
-rw-r--r-- | dis-asm.h | 1 | ||||
-rw-r--r-- | disas.c | 4 | ||||
-rw-r--r-- | dyngen-exec.h | 12 | ||||
-rw-r--r-- | exec-all.h | 22 | ||||
-rw-r--r-- | tcg/ppc/tcg-target.h | 4 | ||||
-rw-r--r-- | tcg/tcg.c | 1 | ||||
-rw-r--r-- | tcg/tcg.h | 12 | ||||
-rw-r--r-- | tcg/tci/README | 130 | ||||
-rw-r--r-- | tcg/tci/tcg-target.c | 906 | ||||
-rw-r--r-- | tcg/tci/tcg-target.h | 160 | ||||
-rw-r--r-- | tci-dis.c | 59 | ||||
-rw-r--r-- | tci.c | 1208 |
14 files changed, 2545 insertions, 12 deletions
diff --git a/Makefile.target b/Makefile.target index 0c86bc5a6d..a111521dbf 100644 --- a/Makefile.target +++ b/Makefile.target @@ -69,6 +69,7 @@ all: $(PROGS) stap # cpu emulator library libobj-y = exec.o translate-all.o cpu-exec.o translate.o libobj-y += tcg/tcg.o tcg/optimize.o +libobj-$(CONFIG_TCG_INTERPRETER) += tci.o libobj-y += fpu/softfloat.o libobj-y += op_helper.o helper.o ifeq ($(TARGET_BASE_ARCH), i386) @@ -85,6 +86,9 @@ libobj-$(TARGET_SPARC) += int32_helper.o libobj-$(TARGET_SPARC64) += int64_helper.o libobj-y += disas.o +libobj-$(CONFIG_TCI_DIS) += tci-dis.o + +tci-dis.o: QEMU_CFLAGS += -I$(SRC_PATH)/tcg -I$(SRC_PATH)/tcg/tci $(libobj-y): $(GENERATED_HEADERS) @@ -138,6 +138,7 @@ debug_tcg="no" debug_mon="no" debug="no" strip_opt="yes" +tcg_interpreter="no" bigendian="no" mingw32="no" EXESUF="" @@ -654,6 +655,10 @@ for opt do ;; --enable-kvm) kvm="yes" ;; + --disable-tcg-interpreter) tcg_interpreter="no" + ;; + --enable-tcg-interpreter) tcg_interpreter="yes" + ;; --disable-spice) spice="no" ;; --enable-spice) spice="yes" @@ -1009,6 +1014,7 @@ echo " --enable-bluez enable bluez stack connectivity" echo " --disable-slirp disable SLIRP userspace network connectivity" echo " --disable-kvm disable KVM acceleration support" echo " --enable-kvm enable KVM acceleration support" +echo " --enable-tcg-interpreter enable TCG with bytecode interpreter (TCI)" echo " --disable-nptl disable usermode NPTL support" echo " --enable-nptl enable usermode NPTL support" echo " --enable-system enable all system emulation targets" @@ -2755,6 +2761,7 @@ echo "Linux AIO support $linux_aio" echo "ATTR/XATTR support $attr" echo "Install blobs $blobs" echo "KVM support $kvm" +echo "TCG interpreter $tcg_interpreter" echo "fdt support $fdt" echo "preadv support $preadv" echo "fdatasync $fdatasync" @@ -2803,6 +2810,15 @@ case "$cpu" in armv4b|armv4l) ARCH=arm ;; + *) + if test "$tcg_interpreter" = "yes" ; then + echo "Unsupported CPU = $cpu, will use TCG with TCI (experimental)" + ARCH=tci + else + echo "Unsupported CPU = $cpu, try --enable-tcg-interpreter" + exit 1 + fi + ;; esac echo "ARCH=$ARCH" >> $config_host_mak if test "$debug_tcg" = "yes" ; then @@ -3036,6 +3052,9 @@ fi if test "$signalfd" = "yes" ; then echo "CONFIG_SIGNALFD=y" >> $config_host_mak fi +if test "$tcg_interpreter" = "yes" ; then + echo "CONFIG_TCG_INTERPRETER=y" >> $config_host_mak +fi if test "$need_offsetof" = "yes" ; then echo "CONFIG_NEED_OFFSETOF=y" >> $config_host_mak fi @@ -3509,7 +3528,9 @@ cflags="" includes="" ldflags="" -if test "$ARCH" = "sparc64" ; then +if test "$tcg_interpreter" = "yes"; then + includes="-I\$(SRC_PATH)/tcg/tci $includes" +elif test "$ARCH" = "sparc64" ; then includes="-I\$(SRC_PATH)/tcg/sparc $includes" elif test "$ARCH" = "s390x" ; then includes="-I\$(SRC_PATH)/tcg/s390 $includes" @@ -3586,6 +3607,10 @@ for i in $ARCH $TARGET_BASE_ARCH ; do ;; esac done +if test "$tcg_interpreter" = "yes" ; then + echo "CONFIG_TCI_DIS=y" >> $config_target_mak + echo "CONFIG_TCI_DIS=y" >> $libdis_config_mak +fi case "$ARCH" in alpha) @@ -3632,7 +3657,12 @@ if test "$gprof" = "yes" ; then fi fi -linker_script="-Wl,-T../config-host.ld -Wl,-T,\$(SRC_PATH)/\$(ARCH).ld" +if test "$ARCH" = "tci"; then + linker_script="" +else + linker_script="-Wl,-T../config-host.ld -Wl,-T,\$(SRC_PATH)/\$(ARCH).ld" +fi + if test "$target_linux_user" = "yes" -o "$target_bsd_user" = "yes" ; then case "$ARCH" in sparc) @@ -365,6 +365,7 @@ typedef struct disassemble_info { target address. Return number of bytes processed. */ typedef int (*disassembler_ftype) (bfd_vma, disassemble_info *); +int print_insn_tci(bfd_vma, disassemble_info*); int print_insn_big_mips (bfd_vma, disassemble_info*); int print_insn_little_mips (bfd_vma, disassemble_info*); int print_insn_i386 (bfd_vma, disassemble_info*); @@ -273,7 +273,9 @@ void disas(FILE *out, void *code, unsigned long size) #else disasm_info.endian = BFD_ENDIAN_LITTLE; #endif -#if defined(__i386__) +#if defined(CONFIG_TCG_INTERPRETER) + print_insn = print_insn_tci; +#elif defined(__i386__) disasm_info.mach = bfd_mach_i386_i386; print_insn = print_insn_i386; #elif defined(__x86_64__) diff --git a/dyngen-exec.h b/dyngen-exec.h index fbde29ea01..3544372a65 100644 --- a/dyngen-exec.h +++ b/dyngen-exec.h @@ -19,7 +19,12 @@ #if !defined(__DYNGEN_EXEC_H__) #define __DYNGEN_EXEC_H__ -#if defined(__i386__) +#if defined(CONFIG_TCG_INTERPRETER) +/* The TCG interpreter does not need a special register AREG0, + * but it is possible to use one by defining AREG0. + * On i386, register edi seems to work. */ +/* Run without special register AREG0 or use a value defined elsewhere. */ +#elif defined(__i386__) #define AREG0 "ebp" #elif defined(__x86_64__) #define AREG0 "r14" @@ -55,6 +60,11 @@ #error unsupported CPU #endif +#if defined(AREG0) register CPUState *env asm(AREG0); +#else +/* TODO: Try env = cpu_single_env. */ +extern CPUState *env; +#endif #endif /* !defined(__DYNGEN_EXEC_H__) */ diff --git a/exec-all.h b/exec-all.h index 85a37bf1ed..c211242bab 100644 --- a/exec-all.h +++ b/exec-all.h @@ -122,6 +122,8 @@ void tlb_set_page(CPUState *env, target_ulong vaddr, #if defined(_ARCH_PPC) || defined(__x86_64__) || defined(__arm__) || defined(__i386__) #define USE_DIRECT_JUMP +#elif defined(CONFIG_TCG_INTERPRETER) +#define USE_DIRECT_JUMP #endif struct TranslationBlock { @@ -189,7 +191,14 @@ extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; #if defined(USE_DIRECT_JUMP) -#if defined(_ARCH_PPC) +#if defined(CONFIG_TCG_INTERPRETER) +static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) +{ + /* patch the branch destination */ + *(uint32_t *)jmp_addr = addr - (jmp_addr + 4); + /* no need to flush icache explicitly */ +} +#elif defined(_ARCH_PPC) void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr); #define tb_set_jmp_target1 ppc_tb_set_jmp_target #elif defined(__i386__) || defined(__x86_64__) @@ -223,6 +232,8 @@ static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg)); #endif } +#else +#error tb_set_jmp_target1 is missing #endif static inline void tb_set_jmp_target(TranslationBlock *tb, @@ -269,7 +280,14 @@ extern int tb_invalidated_flag; /* The return address may point to the start of the next instruction. Subtracting one gets us the call instruction itself. */ -#if defined(__s390__) && !defined(__s390x__) +#if defined(CONFIG_TCG_INTERPRETER) +/* Alpha and SH4 user mode emulations and Softmmu call GETPC(). + For all others, GETPC remains undefined (which makes TCI a little faster. */ +# if defined(CONFIG_SOFTMMU) || defined(TARGET_ALPHA) || defined(TARGET_SH4) +extern void *tci_tb_ptr; +# define GETPC() tci_tb_ptr +# endif +#elif defined(__s390__) && !defined(__s390x__) # define GETPC() ((void*)(((unsigned long)__builtin_return_address(0) & 0x7fffffffUL) - 1)) #elif defined(__arm__) /* Thumb return addresses have the low bit set, so we need to subtract two. diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index 5c2d61294f..25a6ea4edc 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -96,3 +96,7 @@ enum { #define TCG_AREG0 TCG_REG_R27 #define TCG_TARGET_HAS_GUEST_BASE + +#define tcg_qemu_tb_exec(env, tb_ptr) \ + ((long REGPARM __attribute__ ((longcall)) \ + (*)(void *, void *))code_gen_prologue)(env, tb_ptr) @@ -89,6 +89,7 @@ TCGOpDef tcg_op_defs[] = { #include "tcg-opc.h" #undef DEF }; +const size_t tcg_op_defs_max = ARRAY_SIZE(tcg_op_defs); static TCGRegSet tcg_target_available_regs[2]; static TCGRegSet tcg_target_call_clobber_regs; @@ -525,7 +525,8 @@ typedef struct TCGOpDef { } TCGOpDef; extern TCGOpDef tcg_op_defs[]; - +extern const size_t tcg_op_defs_max; + typedef struct TCGTargetOpDef { TCGOpcode op; const char *args_ct_str[TCG_MAX_OP_ARGS]; @@ -584,10 +585,9 @@ TCGv_i32 tcg_const_local_i32(int32_t val); TCGv_i64 tcg_const_local_i64(int64_t val); extern uint8_t code_gen_prologue[]; -#if defined(_ARCH_PPC) && !defined(_ARCH_PPC64) -#define tcg_qemu_tb_exec(env, tb_ptr) \ - ((long REGPARM __attribute__ ((longcall)) (*)(void *, void *))code_gen_prologue)(env, tb_ptr) -#else -#define tcg_qemu_tb_exec(env, tb_ptr) \ + +/* TCG targets may use a different definition of tcg_qemu_tb_exec. */ +#if !defined(tcg_qemu_tb_exec) +# define tcg_qemu_tb_exec(env, tb_ptr) \ ((long REGPARM (*)(void *, void *))code_gen_prologue)(env, tb_ptr) #endif diff --git a/tcg/tci/README b/tcg/tci/README new file mode 100644 index 0000000000..6ac1ac99d6 --- /dev/null +++ b/tcg/tci/README @@ -0,0 +1,130 @@ +TCG Interpreter (TCI) - Copyright (c) 2011 Stefan Weil. + +This file is released under the BSD license. + +1) Introduction + +TCG (Tiny Code Generator) is a code generator which translates +code fragments ("basic blocks") from target code (any of the +targets supported by QEMU) to a code representation which +can be run on a host. + +QEMU can create native code for some hosts (arm, hppa, i386, ia64, ppc, ppc64, +s390, sparc, x86_64). For others, unofficial host support was written. + +By adding a code generator for a virtual machine and using an +interpreter for the generated bytecode, it is possible to +support (almost) any host. + +This is what TCI (Tiny Code Interpreter) does. + +2) Implementation + +Like each TCG host frontend, TCI implements the code generator in +tcg-target.c, tcg-target.h. Both files are in directory tcg/tci. + +The additional file tcg/tci.c adds the interpreter. + +The bytecode consists of opcodes (same numeric values as those used by +TCG), command length and arguments of variable size and number. + +3) Usage + +For hosts without native TCG, the interpreter TCI must be enabled by + + configure --enable-tcg-interpreter + +If configure is called without --enable-tcg-interpreter, it will +suggest using this option. Setting it automatically would need +additional code in configure which must be fixed when new native TCG +implementations are added. + +System emulation should work on any 32 or 64 bit host. +User mode emulation might work. Maybe a new linker script (*.ld) +is needed. Byte order might be wrong (on big endian hosts) +and need fixes in configure. + +For hosts with native TCG, the interpreter TCI can be enabled by + + configure --enable-tcg-interpreter + +The only difference from running QEMU with TCI to running without TCI +should be speed. Especially during development of TCI, it was very +useful to compare runs with and without TCI. Create /tmp/qemu.log by + + qemu-system-i386 -d in_asm,op_opt,cpu -singlestep + +once with interpreter and once without interpreter and compare the resulting +qemu.log files. This is also useful to see the effects of additional +registers or additional opcodes (it is easy to modify the virtual machine). +It can also be used to verify native TCGs. + +Hosts with native TCG can also enable TCI by claiming to be unsupported: + + configure --cpu=unknown --enable-tcg-interpreter + +configure then no longer uses the native linker script (*.ld) for +user mode emulation. + + +4) Status + +TCI needs special implementation for 32 and 64 bit host, 32 and 64 bit target, +host and target with same or different endianness. + + | host (le) host (be) + | 32 64 32 64 +------------+------------------------------------------------------------ +target (le) | s0, u0 s1, u1 s?, u? s?, u? +32 bit | + | +target (le) | sc, uc s1, u1 s?, u? s?, u? +64 bit | + | +target (be) | sc, u0 sc, uc s?, u? s?, u? +32 bit | + | +target (be) | sc, uc sc, uc s?, u? s?, u? +64 bit | + | + +System emulation +s? = untested +sc = compiles +s0 = bios works +s1 = grub works +s2 = Linux boots + +Linux user mode emulation +u? = untested +uc = compiles +u0 = static hello works +u1 = linux-user-test works + +5) Todo list + +* TCI is not widely tested. It was written and tested on a x86_64 host + running i386 and x86_64 system emulation and Linux user mode. + A cross compiled QEMU for i386 host also works with the same basic tests. + A cross compiled QEMU for mipsel host works, too. It is terribly slow + because I run it in a mips malta emulation, so it is an interpreted + emulation in an emulation. + A cross compiled QEMU for arm host works (tested with pc bios). + A cross compiled QEMU for ppc host works at least partially: + i386-linux-user/qemu-i386 can run a simple hello-world program + (tested in a ppc emulation). + +* Some TCG opcodes are either missing in the code generator and/or + in the interpreter. These opcodes raise a runtime exception, so it is + possible to see where code must be added. + +* The pseudo code is not optimized and still ugly. For hosts with special + alignment requirements, it needs some fixes (maybe aligned bytecode + would also improve speed for hosts which support byte alignment). + +* A better disassembler for the pseudo code would be nice (a very primitive + disassembler is included in tcg-target.c). + +* It might be useful to have a runtime option which selects the native TCG + or TCI, so QEMU would have to include two TCGs. Today, selecting TCI + is a configure option, so you need two compilations of QEMU. diff --git a/tcg/tci/tcg-target.c b/tcg/tci/tcg-target.c new file mode 100644 index 0000000000..e373e2a704 --- /dev/null +++ b/tcg/tci/tcg-target.c @@ -0,0 +1,906 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2009, 2011 Stefan Weil + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* TODO list: + * - See TODO comments in code. + */ + +/* Marker for missing code. */ +#define TODO() \ + do { \ + fprintf(stderr, "TODO %s:%u: %s()\n", \ + __FILE__, __LINE__, __func__); \ + tcg_abort(); \ + } while (0) + +/* Single bit n. */ +#define BIT(n) (1 << (n)) + +/* Bitfield n...m (in 32 bit value). */ +#define BITS(n, m) (((0xffffffffU << (31 - n)) >> (31 - n + m)) << m) + +/* Used for function call generation. */ +#define TCG_REG_CALL_STACK TCG_REG_R4 +#define TCG_TARGET_STACK_ALIGN 16 +#define TCG_TARGET_CALL_STACK_OFFSET 0 + +/* TODO: documentation. */ +static uint8_t *tb_ret_addr; + +/* Macros used in tcg_target_op_defs. */ +#define R "r" +#define RI "ri" +#if TCG_TARGET_REG_BITS == 32 +# define R64 "r", "r" +#else +# define R64 "r" +#endif +#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS +# define L "L", "L" +# define S "S", "S" +#else +# define L "L" +# define S "S" +#endif + +/* TODO: documentation. */ +static const TCGTargetOpDef tcg_target_op_defs[] = { + { INDEX_op_exit_tb, { NULL } }, + { INDEX_op_goto_tb, { NULL } }, + { INDEX_op_call, { RI } }, + { INDEX_op_jmp, { RI } }, + { INDEX_op_br, { NULL } }, + + { INDEX_op_mov_i32, { R, R } }, + { INDEX_op_movi_i32, { R } }, + + { INDEX_op_ld8u_i32, { R, R } }, + { INDEX_op_ld8s_i32, { R, R } }, + { INDEX_op_ld16u_i32, { R, R } }, + { INDEX_op_ld16s_i32, { R, R } }, + { INDEX_op_ld_i32, { R, R } }, + { INDEX_op_st8_i32, { R, R } }, + { INDEX_op_st16_i32, { R, R } }, + { INDEX_op_st_i32, { R, R } }, + + { INDEX_op_add_i32, { R, RI, RI } }, + { INDEX_op_sub_i32, { R, RI, RI } }, + { INDEX_op_mul_i32, { R, RI, RI } }, +#if TCG_TARGET_HAS_div_i32 + { INDEX_op_div_i32, { R, R, R } }, + { INDEX_op_divu_i32, { R, R, R } }, + { INDEX_op_rem_i32, { R, R, R } }, + { INDEX_op_remu_i32, { R, R, R } }, +#elif TCG_TARGET_HAS_div2_i32 + { INDEX_op_div2_i32, { R, R, "0", "1", R } }, + { INDEX_op_divu2_i32, { R, R, "0", "1", R } }, +#endif + /* TODO: Does R, RI, RI result in faster code than R, R, RI? + If both operands are constants, we can optimize. */ + { INDEX_op_and_i32, { R, RI, RI } }, +#if TCG_TARGET_HAS_andc_i32 + { INDEX_op_andc_i32, { R, RI, RI } }, +#endif +#if TCG_TARGET_HAS_eqv_i32 + { INDEX_op_eqv_i32, { R, RI, RI } }, +#endif +#if TCG_TARGET_HAS_nand_i32 + { INDEX_op_nand_i32, { R, RI, RI } }, +#endif +#if TCG_TARGET_HAS_nor_i32 + { INDEX_op_nor_i32, { R, RI, RI } }, +#endif + { INDEX_op_or_i32, { R, RI, RI } }, +#if TCG_TARGET_HAS_orc_i32 + { INDEX_op_orc_i32, { R, RI, RI } }, +#endif + { INDEX_op_xor_i32, { R, RI, RI } }, + { INDEX_op_shl_i32, { R, RI, RI } }, + { INDEX_op_shr_i32, { R, RI, RI } }, + { INDEX_op_sar_i32, { R, RI, RI } }, +#if TCG_TARGET_HAS_rot_i32 + { INDEX_op_rotl_i32, { R, RI, RI } }, + { INDEX_op_rotr_i32, { R, RI, RI } }, +#endif + + { INDEX_op_brcond_i32, { R, RI } }, + + { INDEX_op_setcond_i32, { R, R, RI } }, +#if TCG_TARGET_REG_BITS == 64 + { INDEX_op_setcond_i64, { R, R, RI } }, +#endif /* TCG_TARGET_REG_BITS == 64 */ + +#if TCG_TARGET_REG_BITS == 32 + /* TODO: Support R, R, R, R, RI, RI? Will it be faster? */ + { INDEX_op_add2_i32, { R, R, R, R, R, R } }, + { INDEX_op_sub2_i32, { R, R, R, R, R, R } }, + { INDEX_op_brcond2_i32, { R, R, RI, RI } }, + { INDEX_op_mulu2_i32, { R, R, R, R } }, + { INDEX_op_setcond2_i32, { R, R, R, RI, RI } }, +#endif + +#if TCG_TARGET_HAS_not_i32 + { INDEX_op_not_i32, { R, R } }, +#endif +#if TCG_TARGET_HAS_neg_i32 + { INDEX_op_neg_i32, { R, R } }, +#endif + +#if TCG_TARGET_REG_BITS == 64 + { INDEX_op_mov_i64, { R, R } }, + { INDEX_op_movi_i64, { R } }, + + { INDEX_op_ld8u_i64, { R, R } }, + { INDEX_op_ld8s_i64, { R, R } }, + { INDEX_op_ld16u_i64, { R, R } }, + { INDEX_op_ld16s_i64, { R, R } }, + { INDEX_op_ld32u_i64, { R, R } }, + { INDEX_op_ld32s_i64, { R, R } }, + { INDEX_op_ld_i64, { R, R } }, + + { INDEX_op_st8_i64, { R, R } }, + { INDEX_op_st16_i64, { R, R } }, + { INDEX_op_st32_i64, { R, R } }, + { INDEX_op_st_i64, { R, R } }, + + { INDEX_op_add_i64, { R, RI, RI } }, + { INDEX_op_sub_i64, { R, RI, RI } }, + { INDEX_op_mul_i64, { R, RI, RI } }, +#if TCG_TARGET_HAS_div_i64 + { INDEX_op_div_i64, { R, R, R } }, + { INDEX_op_divu_i64, { R, R, R } }, + { INDEX_op_rem_i64, { R, R, R } }, + { INDEX_op_remu_i64, { R, R, R } }, +#elif TCG_TARGET_HAS_div2_i64 + { INDEX_op_div2_i64, { R, R, "0", "1", R } }, + { INDEX_op_divu2_i64, { R, R, "0", "1", R } }, +#endif + { INDEX_op_and_i64, { R, RI, RI } }, +#if TCG_TARGET_HAS_andc_i64 + { INDEX_op_andc_i64, { R, RI, RI } }, +#endif +#if TCG_TARGET_HAS_eqv_i64 + { INDEX_op_eqv_i64, { R, RI, RI } }, +#endif +#if TCG_TARGET_HAS_nand_i64 + { INDEX_op_nand_i64, { R, RI, RI } }, +#endif +#if TCG_TARGET_HAS_nor_i64 + { INDEX_op_nor_i64, { R, RI, RI } }, +#endif + { INDEX_op_or_i64, { R, RI, RI } }, +#if TCG_TARGET_HAS_orc_i64 + { INDEX_op_orc_i64, { R, RI, RI } }, +#endif + { INDEX_op_xor_i64, { R, RI, RI } }, + { INDEX_op_shl_i64, { R, RI, RI } }, + { INDEX_op_shr_i64, { R, RI, RI } }, + { INDEX_op_sar_i64, { R, RI, RI } }, +#if TCG_TARGET_HAS_rot_i64 + { INDEX_op_rotl_i64, { R, RI, RI } }, + { INDEX_op_rotr_i64, { R, RI, RI } }, +#endif + { INDEX_op_brcond_i64, { R, RI } }, + +#if TCG_TARGET_HAS_ext8s_i64 + { INDEX_op_ext8s_i64, { R, R } }, +#endif +#if TCG_TARGET_HAS_ext16s_i64 + { INDEX_op_ext16s_i64, { R, R } }, +#endif +#if TCG_TARGET_HAS_ext32s_i64 + { INDEX_op_ext32s_i64, { R, R } }, +#endif +#if TCG_TARGET_HAS_ext8u_i64 + { INDEX_op_ext8u_i64, { R, R } }, +#endif +#if TCG_TARGET_HAS_ext16u_i64 + { INDEX_op_ext16u_i64, { R, R } }, +#endif +#if TCG_TARGET_HAS_ext32u_i64 + { INDEX_op_ext32u_i64, { R, R } }, +#endif +#if TCG_TARGET_HAS_bswap16_i64 + { INDEX_op_bswap16_i64, { R, R } }, +#endif +#if TCG_TARGET_HAS_bswap32_i64 + { INDEX_op_bswap32_i64, { R, R } }, +#endif +#if TCG_TARGET_HAS_bswap64_i64 + { INDEX_op_bswap64_i64, { R, R } }, +#endif +#if TCG_TARGET_HAS_not_i64 + { INDEX_op_not_i64, { R, R } }, +#endif +#if TCG_TARGET_HAS_neg_i64 + { INDEX_op_neg_i64, { R, R } }, +#endif +#endif /* TCG_TARGET_REG_BITS == 64 */ + + { INDEX_op_qemu_ld8u, { R, L } }, + { INDEX_op_qemu_ld8s, { R, L } }, + { INDEX_op_qemu_ld16u, { R, L } }, + { INDEX_op_qemu_ld16s, { R, L } }, + { INDEX_op_qemu_ld32, { R, L } }, +#if TCG_TARGET_REG_BITS == 64 + { INDEX_op_qemu_ld32u, { R, L } }, + { INDEX_op_qemu_ld32s, { R, L } }, +#endif + { INDEX_op_qemu_ld64, { R64, L } }, + + { INDEX_op_qemu_st8, { R, S } }, + { INDEX_op_qemu_st16, { R, S } }, + { INDEX_op_qemu_st32, { R, S } }, + { INDEX_op_qemu_st64, { R64, S } }, + +#if TCG_TARGET_HAS_ext8s_i32 + { INDEX_op_ext8s_i32, { R, R } }, +#endif +#if TCG_TARGET_HAS_ext16s_i32 + { INDEX_op_ext16s_i32, { R, R } }, +#endif +#if TCG_TARGET_HAS_ext8u_i32 + { INDEX_op_ext8u_i32, { R, R } }, +#endif +#if TCG_TARGET_HAS_ext16u_i32 + { INDEX_op_ext16u_i32, { R, R } }, +#endif + +#if TCG_TARGET_HAS_bswap16_i32 + { INDEX_op_bswap16_i32, { R, R } }, +#endif +#if TCG_TARGET_HAS_bswap32_i32 + { INDEX_op_bswap32_i32, { R, R } }, +#endif + + { -1 }, +}; + +static const int tcg_target_reg_alloc_order[] = { + TCG_REG_R0, + TCG_REG_R1, + TCG_REG_R2, + TCG_REG_R3, +#if 0 /* used for TCG_REG_CALL_STACK */ + TCG_REG_R4, +#endif + TCG_REG_R5, + TCG_REG_R6, + TCG_REG_R7, +#if TCG_TARGET_NB_REGS >= 16 + TCG_REG_R8, + TCG_REG_R9, + TCG_REG_R10, + TCG_REG_R11, + TCG_REG_R12, + TCG_REG_R13, + TCG_REG_R14, + TCG_REG_R15, +#endif +}; + +#if MAX_OPC_PARAM_IARGS != 4 +# error Fix needed, number of supported input arguments changed! +#endif + +static const int tcg_target_call_iarg_regs[] = { + TCG_REG_R0, + TCG_REG_R1, + TCG_REG_R2, + TCG_REG_R3, +#if TCG_TARGET_REG_BITS == 32 + /* 32 bit hosts need 2 * MAX_OPC_PARAM_IARGS registers. */ +#if 0 /* used for TCG_REG_CALL_STACK */ + TCG_REG_R4, +#endif + TCG_REG_R5, + TCG_REG_R6, + TCG_REG_R7, +#if TCG_TARGET_NB_REGS >= 16 + TCG_REG_R8, +#else +# error Too few input registers available +#endif +#endif +}; + +static const int tcg_target_call_oarg_regs[] = { + TCG_REG_R0, +#if TCG_TARGET_REG_BITS == 32 + TCG_REG_R1 +#endif +}; + +#ifndef NDEBUG +static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { + "r00", + "r01", + "r02", + "r03", + "r04", + "r05", + "r06", + "r07", +#if TCG_TARGET_NB_REGS >= 16 + "r08", + "r09", + "r10", + "r11", + "r12", + "r13", + "r14", + "r15", +#if TCG_TARGET_NB_REGS >= 32 + "r16", + "r17", + "r18", + "r19", + "r20", + "r21", + "r22", + "r23", + "r24", + "r25", + "r26", + "r27", + "r28", + "r29", + "r30", + "r31" +#endif +#endif +}; +#endif + +static void flush_icache_range(unsigned long start, unsigned long stop) +{ +} + +static void patch_reloc(uint8_t *code_ptr, int type, + tcg_target_long value, tcg_target_long addend) +{ + /* tcg_out_reloc always uses the same type, addend. */ + assert(type == sizeof(tcg_target_long)); + assert(addend == 0); + assert(value != 0); + *(tcg_target_long *)code_ptr = value; +} + +/* Parse target specific constraints. */ +static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) +{ + const char *ct_str = *pct_str; + switch (ct_str[0]) { + case 'r': + case 'L': /* qemu_ld constraint */ + case 'S': /* qemu_st constraint */ + ct->ct |= TCG_CT_REG; + tcg_regset_set32(ct->u.regs, 0, BIT(TCG_TARGET_NB_REGS) - 1); + break; + default: + return -1; + } + ct_str++; + *pct_str = ct_str; + return 0; +} + +#if defined(CONFIG_DEBUG_TCG_INTERPRETER) +/* Show current bytecode. Used by tcg interpreter. */ +void tci_disas(uint8_t opc) +{ + const TCGOpDef *def = &tcg_op_defs[opc]; + fprintf(stderr, "TCG %s %u, %u, %u\n", + def->name, def->nb_oargs, def->nb_iargs, def->nb_cargs); +} +#endif + +/* Write value (native size). */ +static void tcg_out_i(TCGContext *s, tcg_target_ulong v) +{ + *(tcg_target_ulong *)s->code_ptr = v; + s->code_ptr += sizeof(tcg_target_ulong); +} + +/* Write 64 bit value. */ +static void tcg_out64(TCGContext *s, uint64_t v) +{ + *(uint64_t *)s->code_ptr = v; + s->code_ptr += sizeof(v); +} + +/* Write opcode. */ +static void tcg_out_op_t(TCGContext *s, TCGOpcode op) +{ + tcg_out8(s, op); + tcg_out8(s, 0); +} + +/* Write register. */ +static void tcg_out_r(TCGContext *s, TCGArg t0) +{ + assert(t0 < TCG_TARGET_NB_REGS); + tcg_out8(s, t0); +} + +/* Write register or constant (native size). */ +static void tcg_out_ri(TCGContext *s, int const_arg, TCGArg arg) +{ + if (const_arg) { + assert(const_arg == 1); + tcg_out8(s, TCG_CONST); + tcg_out_i(s, arg); + } else { + tcg_out_r(s, arg); + } +} + +/* Write register or constant (32 bit). */ +static void tcg_out_ri32(TCGContext *s, int const_arg, TCGArg arg) +{ + if (const_arg) { + assert(const_arg == 1); + tcg_out8(s, TCG_CONST); + tcg_out32(s, arg); + } else { + tcg_out_r(s, arg); + } +} + +#if TCG_TARGET_REG_BITS == 64 +/* Write register or constant (64 bit). */ +static void tcg_out_ri64(TCGContext *s, int const_arg, TCGArg arg) +{ + if (const_arg) { + assert(const_arg == 1); + tcg_out8(s, TCG_CONST); + tcg_out64(s, arg); + } else { + tcg_out_r(s, arg); + } +} +#endif + +/* Write label. */ +static void tci_out_label(TCGContext *s, TCGArg arg) +{ + TCGLabel *label = &s->labels[arg]; + if (label->has_value) { + tcg_out_i(s, label->u.value); + assert(label->u.value); + } else { + tcg_out_reloc(s, s->code_ptr, sizeof(tcg_target_ulong), arg, 0); + tcg_out_i(s, 0); + } +} + +static void tcg_out_ld(TCGContext *s, TCGType type, int ret, int arg1, + tcg_target_long arg2) +{ + uint8_t *old_code_ptr = s->code_ptr; + if (type == TCG_TYPE_I32) { + tcg_out_op_t(s, INDEX_op_ld_i32); + tcg_out_r(s, ret); + tcg_out_r(s, arg1); + tcg_out32(s, arg2); + } else { + assert(type == TCG_TYPE_I64); +#if TCG_TARGET_REG_BITS == 64 + tcg_out_op_t(s, INDEX_op_ld_i64); + tcg_out_r(s, ret); + tcg_out_r(s, arg1); + assert(arg2 == (uint32_t)arg2); + tcg_out32(s, arg2); +#else + TODO(); +#endif + } + old_code_ptr[1] = s->code_ptr - old_code_ptr; +} + +static void tcg_out_mov(TCGContext *s, TCGType type, int ret, int arg) +{ + uint8_t *old_code_ptr = s->code_ptr; + assert(ret != arg); +#if TCG_TARGET_REG_BITS == 32 + tcg_out_op_t(s, INDEX_op_mov_i32); +#else + tcg_out_op_t(s, INDEX_op_mov_i64); +#endif + tcg_out_r(s, ret); + tcg_out_r(s, arg); + old_code_ptr[1] = s->code_ptr - old_code_ptr; +} + +static void tcg_out_movi(TCGContext *s, TCGType type, + int t0, tcg_target_long arg) +{ + uint8_t *old_code_ptr = s->code_ptr; + uint32_t arg32 = arg; + if (type == TCG_TYPE_I32 || arg == arg32) { + tcg_out_op_t(s, INDEX_op_movi_i32); + tcg_out_r(s, t0); + tcg_out32(s, arg32); + } else { + assert(type == TCG_TYPE_I64); +#if TCG_TARGET_REG_BITS == 64 + tcg_out_op_t(s, INDEX_op_movi_i64); + tcg_out_r(s, t0); + tcg_out64(s, arg); +#else + TODO(); +#endif + } + old_code_ptr[1] = s->code_ptr - old_code_ptr; +} + +static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, + const int *const_args) +{ + uint8_t *old_code_ptr = s->code_ptr; + + tcg_out_op_t(s, opc); + + switch (opc) { + case INDEX_op_exit_tb: + tcg_out64(s, args[0]); + break; + case INDEX_op_goto_tb: + if (s->tb_jmp_offset) { + /* Direct jump method. */ + assert(args[0] < ARRAY_SIZE(s->tb_jmp_offset)); + s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; + tcg_out32(s, 0); + } else { + /* Indirect jump method. */ + TODO(); + } + assert(args[0] < ARRAY_SIZE(s->tb_next_offset)); + s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf; + break; + case INDEX_op_br: + tci_out_label(s, args[0]); + break; + case INDEX_op_call: + tcg_out_ri(s, const_args[0], args[0]); + break; + case INDEX_op_jmp: + TODO(); + break; + case INDEX_op_setcond_i32: + tcg_out_r(s, args[0]); + tcg_out_r(s, args[1]); + tcg_out_ri32(s, const_args[2], args[2]); + tcg_out8(s, args[3]); /* condition */ + break; +#if TCG_TARGET_REG_BITS == 32 + case INDEX_op_setcond2_i32: + /* setcond2_i32 cond, t0, t1_low, t1_high, t2_low, t2_high */ + tcg_out_r(s, args[0]); + tcg_out_r(s, args[1]); + tcg_out_r(s, args[2]); + tcg_out_ri32(s, const_args[3], args[3]); + tcg_out_ri32(s, const_args[4], args[4]); + tcg_out8(s, args[5]); /* condition */ + break; +#elif TCG_TARGET_REG_BITS == 64 + case INDEX_op_setcond_i64: + tcg_out_r(s, args[0]); + tcg_out_r(s, args[1]); + tcg_out_ri64(s, const_args[2], args[2]); + tcg_out8(s, args[3]); /* condition */ + break; +#endif + case INDEX_op_movi_i32: + TODO(); /* Handled by tcg_out_movi? */ + break; + case INDEX_op_ld8u_i32: + case INDEX_op_ld8s_i32: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16s_i32: + case INDEX_op_ld_i32: + case INDEX_op_st8_i32: + case INDEX_op_st16_i32: + case INDEX_op_st_i32: + case INDEX_op_ld8u_i64: + case INDEX_op_ld8s_i64: + case INDEX_op_ld16u_i64: + case INDEX_op_ld16s_i64: + case INDEX_op_ld32u_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_ld_i64: + case INDEX_op_st8_i64: + case INDEX_op_st16_i64: + case INDEX_op_st32_i64: + case INDEX_op_st_i64: + tcg_out_r(s, args[0]); + tcg_out_r(s, args[1]); + assert(args[2] == (uint32_t)args[2]); + tcg_out32(s, args[2]); + break; + case INDEX_op_add_i32: + case INDEX_op_sub_i32: + case INDEX_op_mul_i32: + case INDEX_op_and_i32: + case INDEX_op_andc_i32: /* Optional (TCG_TARGET_HAS_andc_i32). */ + case INDEX_op_eqv_i32: /* Optional (TCG_TARGET_HAS_eqv_i32). */ + case INDEX_op_nand_i32: /* Optional (TCG_TARGET_HAS_nand_i32). */ + case INDEX_op_nor_i32: /* Optional (TCG_TARGET_HAS_nor_i32). */ + case INDEX_op_or_i32: + case INDEX_op_orc_i32: /* Optional (TCG_TARGET_HAS_orc_i32). */ + case INDEX_op_xor_i32: + case INDEX_op_shl_i32: + case INDEX_op_shr_i32: + case INDEX_op_sar_i32: + case INDEX_op_rotl_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */ + case INDEX_op_rotr_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */ + tcg_out_r(s, args[0]); + tcg_out_ri32(s, const_args[1], args[1]); + tcg_out_ri32(s, const_args[2], args[2]); + break; + +#if TCG_TARGET_REG_BITS == 64 + case INDEX_op_mov_i64: + case INDEX_op_movi_i64: + TODO(); + break; + case INDEX_op_add_i64: + case INDEX_op_sub_i64: + case INDEX_op_mul_i64: + case INDEX_op_and_i64: + case INDEX_op_andc_i64: /* Optional (TCG_TARGET_HAS_andc_i64). */ + case INDEX_op_eqv_i64: /* Optional (TCG_TARGET_HAS_eqv_i64). */ + case INDEX_op_nand_i64: /* Optional (TCG_TARGET_HAS_nand_i64). */ + case INDEX_op_nor_i64: /* Optional (TCG_TARGET_HAS_nor_i64). */ + case INDEX_op_or_i64: + case INDEX_op_orc_i64: /* Optional (TCG_TARGET_HAS_orc_i64). */ + case INDEX_op_xor_i64: + case INDEX_op_shl_i64: + case INDEX_op_shr_i64: + case INDEX_op_sar_i64: + /* TODO: Implementation of rotl_i64, rotr_i64 missing in tci.c. */ + case INDEX_op_rotl_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */ + case INDEX_op_rotr_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */ + tcg_out_r(s, args[0]); + tcg_out_ri64(s, const_args[1], args[1]); + tcg_out_ri64(s, const_args[2], args[2]); + break; + case INDEX_op_div_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ + case INDEX_op_divu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ + case INDEX_op_rem_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ + case INDEX_op_remu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ + TODO(); + break; + case INDEX_op_div2_i64: /* Optional (TCG_TARGET_HAS_div2_i64). */ + case INDEX_op_divu2_i64: /* Optional (TCG_TARGET_HAS_div2_i64). */ + TODO(); + break; + case INDEX_op_brcond_i64: + tcg_out_r(s, args[0]); + tcg_out_ri64(s, const_args[1], args[1]); + tcg_out8(s, args[2]); /* condition */ + tci_out_label(s, args[3]); + break; + case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */ + case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */ + case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */ + case INDEX_op_not_i64: /* Optional (TCG_TARGET_HAS_not_i64). */ + case INDEX_op_neg_i64: /* Optional (TCG_TARGET_HAS_neg_i64). */ + case INDEX_op_ext8s_i64: /* Optional (TCG_TARGET_HAS_ext8s_i64). */ + case INDEX_op_ext8u_i64: /* Optional (TCG_TARGET_HAS_ext8u_i64). */ + case INDEX_op_ext16s_i64: /* Optional (TCG_TARGET_HAS_ext16s_i64). */ + case INDEX_op_ext16u_i64: /* Optional (TCG_TARGET_HAS_ext16u_i64). */ + case INDEX_op_ext32s_i64: /* Optional (TCG_TARGET_HAS_ext32s_i64). */ + case INDEX_op_ext32u_i64: /* Optional (TCG_TARGET_HAS_ext32u_i64). */ +#endif /* TCG_TARGET_REG_BITS == 64 */ + case INDEX_op_neg_i32: /* Optional (TCG_TARGET_HAS_neg_i32). */ + case INDEX_op_not_i32: /* Optional (TCG_TARGET_HAS_not_i32). */ + case INDEX_op_ext8s_i32: /* Optional (TCG_TARGET_HAS_ext8s_i32). */ + case INDEX_op_ext16s_i32: /* Optional (TCG_TARGET_HAS_ext16s_i32). */ + case INDEX_op_ext8u_i32: /* Optional (TCG_TARGET_HAS_ext8u_i32). */ + case INDEX_op_ext16u_i32: /* Optional (TCG_TARGET_HAS_ext16u_i32). */ + case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */ + case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */ + tcg_out_r(s, args[0]); + tcg_out_r(s, args[1]); + break; + case INDEX_op_div_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ + case INDEX_op_divu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ + case INDEX_op_rem_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ + case INDEX_op_remu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ + tcg_out_r(s, args[0]); + tcg_out_ri32(s, const_args[1], args[1]); + tcg_out_ri32(s, const_args[2], args[2]); + break; + case INDEX_op_div2_i32: /* Optional (TCG_TARGET_HAS_div2_i32). */ + case INDEX_op_divu2_i32: /* Optional (TCG_TARGET_HAS_div2_i32). */ + TODO(); + break; +#if TCG_TARGET_REG_BITS == 32 + case INDEX_op_add2_i32: + case INDEX_op_sub2_i32: + tcg_out_r(s, args[0]); + tcg_out_r(s, args[1]); + tcg_out_r(s, args[2]); + tcg_out_r(s, args[3]); + tcg_out_r(s, args[4]); + tcg_out_r(s, args[5]); + break; + case INDEX_op_brcond2_i32: + tcg_out_r(s, args[0]); + tcg_out_r(s, args[1]); + tcg_out_ri32(s, const_args[2], args[2]); + tcg_out_ri32(s, const_args[3], args[3]); + tcg_out8(s, args[4]); /* condition */ + tci_out_label(s, args[5]); + break; + case INDEX_op_mulu2_i32: + tcg_out_r(s, args[0]); + tcg_out_r(s, args[1]); + tcg_out_r(s, args[2]); + tcg_out_r(s, args[3]); + break; +#endif + case INDEX_op_brcond_i32: + tcg_out_r(s, args[0]); + tcg_out_ri32(s, const_args[1], args[1]); + tcg_out8(s, args[2]); /* condition */ + tci_out_label(s, args[3]); + break; + case INDEX_op_qemu_ld8u: + case INDEX_op_qemu_ld8s: + case INDEX_op_qemu_ld16u: + case INDEX_op_qemu_ld16s: + case INDEX_op_qemu_ld32: +#if TCG_TARGET_REG_BITS == 64 + case INDEX_op_qemu_ld32s: + case INDEX_op_qemu_ld32u: +#endif + tcg_out_r(s, *args++); + tcg_out_r(s, *args++); +#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS + tcg_out_r(s, *args++); +#endif +#ifdef CONFIG_SOFTMMU + tcg_out_i(s, *args); +#endif + break; + case INDEX_op_qemu_ld64: + tcg_out_r(s, *args++); +#if TCG_TARGET_REG_BITS == 32 + tcg_out_r(s, *args++); +#endif + tcg_out_r(s, *args++); +#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS + tcg_out_r(s, *args++); +#endif +#ifdef CONFIG_SOFTMMU + tcg_out_i(s, *args); +#endif + break; + case INDEX_op_qemu_st8: + case INDEX_op_qemu_st16: + case INDEX_op_qemu_st32: + tcg_out_r(s, *args++); + tcg_out_r(s, *args++); +#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS + tcg_out_r(s, *args++); +#endif +#ifdef CONFIG_SOFTMMU + tcg_out_i(s, *args); +#endif + break; + case INDEX_op_qemu_st64: + tcg_out_r(s, *args++); +#if TCG_TARGET_REG_BITS == 32 + tcg_out_r(s, *args++); +#endif + tcg_out_r(s, *args++); +#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS + tcg_out_r(s, *args++); +#endif +#ifdef CONFIG_SOFTMMU + tcg_out_i(s, *args); +#endif + break; + case INDEX_op_end: + TODO(); + break; + default: + fprintf(stderr, "Missing: %s\n", tcg_op_defs[opc].name); + tcg_abort(); + } + old_code_ptr[1] = s->code_ptr - old_code_ptr; +} + +static void tcg_out_st(TCGContext *s, TCGType type, int arg, int arg1, + tcg_target_long arg2) +{ + uint8_t *old_code_ptr = s->code_ptr; + if (type == TCG_TYPE_I32) { + tcg_out_op_t(s, INDEX_op_st_i32); + tcg_out_r(s, arg); + tcg_out_r(s, arg1); + tcg_out32(s, arg2); + } else { + assert(type == TCG_TYPE_I64); +#if TCG_TARGET_REG_BITS == 64 + tcg_out_op_t(s, INDEX_op_st_i64); + tcg_out_r(s, arg); + tcg_out_r(s, arg1); + tcg_out32(s, arg2); +#else + TODO(); +#endif + } + old_code_ptr[1] = s->code_ptr - old_code_ptr; +} + +/* Test if a constant matches the constraint. */ +static int tcg_target_const_match(tcg_target_long val, + const TCGArgConstraint *arg_ct) +{ + /* No need to return 0 or 1, 0 or != 0 is good enough. */ + return arg_ct->ct & TCG_CT_CONST; +} + +/* Maximum number of register used for input function arguments. */ +static int tcg_target_get_call_iarg_regs_count(int flags) +{ + return ARRAY_SIZE(tcg_target_call_iarg_regs); +} + +static void tcg_target_init(TCGContext *s) +{ +#if defined(CONFIG_DEBUG_TCG_INTERPRETER) + const char *envval = getenv("DEBUG_TCG"); + if (envval) { + loglevel = strtol(envval, NULL, 0); + } +#endif + + /* The current code uses uint8_t for tcg operations. */ + assert(ARRAY_SIZE(tcg_op_defs) <= UINT8_MAX); + + /* Registers available for 32 bit operations. */ + tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, + BIT(TCG_TARGET_NB_REGS) - 1); + /* Registers available for 64 bit operations. */ + tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, + BIT(TCG_TARGET_NB_REGS) - 1); + /* TODO: Which registers should be set here? */ + tcg_regset_set32(tcg_target_call_clobber_regs, 0, + BIT(TCG_TARGET_NB_REGS) - 1); + tcg_regset_clear(s->reserved_regs); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); + tcg_add_target_add_op_defs(tcg_target_op_defs); + tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf), + CPU_TEMP_BUF_NLONGS * sizeof(long)); +} + +/* Generate global QEMU prologue and epilogue code. */ +static void tcg_target_qemu_prologue(TCGContext *s) +{ + tb_ret_addr = s->code_ptr; +} diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h new file mode 100644 index 0000000000..81ded86959 --- /dev/null +++ b/tcg/tci/tcg-target.h @@ -0,0 +1,160 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2009, 2011 Stefan Weil + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* + * This code implements a TCG which does not generate machine code for some + * real target machine but which generates virtual machine code for an + * interpreter. Interpreted pseudo code is slow, but it works on any host. + * + * Some remarks might help in understanding the code: + * + * "target" or "TCG target" is the machine which runs the generated code. + * This is different to the usual meaning in QEMU where "target" is the + * emulated machine. So normally QEMU host is identical to TCG target. + * Here the TCG target is a virtual machine, but this virtual machine must + * use the same word size like the real machine. + * Therefore, we need both 32 and 64 bit virtual machines (interpreter). + */ + +#if !defined(TCG_TARGET_H) +#define TCG_TARGET_H + +#include "config-host.h" + +#define TCG_TARGET_INTERPRETER 1 + +#ifdef CONFIG_DEBUG_TCG +/* Enable debug output. */ +#define CONFIG_DEBUG_TCG_INTERPRETER +#endif + +#if 0 /* TCI tries to emulate a little endian host. */ +#if defined(HOST_WORDS_BIGENDIAN) +# define TCG_TARGET_WORDS_BIGENDIAN +#endif +#endif + +/* Optional instructions. */ + +#define TCG_TARGET_HAS_bswap16_i32 1 +#define TCG_TARGET_HAS_bswap32_i32 1 +/* Not more than one of the next two defines must be 1. */ +#define TCG_TARGET_HAS_div_i32 1 +#define TCG_TARGET_HAS_div2_i32 0 +#define TCG_TARGET_HAS_ext8s_i32 1 +#define TCG_TARGET_HAS_ext16s_i32 1 +#define TCG_TARGET_HAS_ext8u_i32 1 +#define TCG_TARGET_HAS_ext16u_i32 1 +#define TCG_TARGET_HAS_andc_i32 0 +#define TCG_TARGET_HAS_deposit_i32 0 +#define TCG_TARGET_HAS_eqv_i32 0 +#define TCG_TARGET_HAS_nand_i32 0 +#define TCG_TARGET_HAS_nor_i32 0 +#define TCG_TARGET_HAS_neg_i32 1 +#define TCG_TARGET_HAS_not_i32 1 +#define TCG_TARGET_HAS_orc_i32 0 +#define TCG_TARGET_HAS_rot_i32 1 + +#if TCG_TARGET_REG_BITS == 64 +#define TCG_TARGET_HAS_bswap16_i64 1 +#define TCG_TARGET_HAS_bswap32_i64 1 +#define TCG_TARGET_HAS_bswap64_i64 1 +#define TCG_TARGET_HAS_deposit_i64 0 +/* Not more than one of the next two defines must be 1. */ +#define TCG_TARGET_HAS_div_i64 0 +#define TCG_TARGET_HAS_div2_i64 0 +#define TCG_TARGET_HAS_ext8s_i64 1 +#define TCG_TARGET_HAS_ext16s_i64 1 +#define TCG_TARGET_HAS_ext32s_i64 1 +#define TCG_TARGET_HAS_ext8u_i64 1 +#define TCG_TARGET_HAS_ext16u_i64 1 +#define TCG_TARGET_HAS_ext32u_i64 1 +#define TCG_TARGET_HAS_andc_i64 0 +#define TCG_TARGET_HAS_eqv_i64 0 +#define TCG_TARGET_HAS_nand_i64 0 +#define TCG_TARGET_HAS_nor_i64 0 +#define TCG_TARGET_HAS_neg_i64 1 +#define TCG_TARGET_HAS_not_i64 1 +#define TCG_TARGET_HAS_orc_i64 0 +#define TCG_TARGET_HAS_rot_i64 1 +#endif /* TCG_TARGET_REG_BITS == 64 */ + +/* Offset to user memory in user mode. */ +#define TCG_TARGET_HAS_GUEST_BASE + +/* Number of registers available. + For 32 bit hosts, we need more than 8 registers (call arguments). */ +/* #define TCG_TARGET_NB_REGS 8 */ +#define TCG_TARGET_NB_REGS 16 +/* #define TCG_TARGET_NB_REGS 32 */ + +/* List of registers which are used by TCG. */ +typedef enum { + TCG_REG_R0 = 0, + TCG_REG_R1, + TCG_REG_R2, + TCG_REG_R3, + TCG_REG_R4, + TCG_REG_R5, + TCG_REG_R6, + TCG_REG_R7, + TCG_AREG0 = TCG_REG_R7, +#if TCG_TARGET_NB_REGS >= 16 + TCG_REG_R8, + TCG_REG_R9, + TCG_REG_R10, + TCG_REG_R11, + TCG_REG_R12, + TCG_REG_R13, + TCG_REG_R14, + TCG_REG_R15, +#if TCG_TARGET_NB_REGS >= 32 + TCG_REG_R16, + TCG_REG_R17, + TCG_REG_R18, + TCG_REG_R19, + TCG_REG_R20, + TCG_REG_R21, + TCG_REG_R22, + TCG_REG_R23, + TCG_REG_R24, + TCG_REG_R25, + TCG_REG_R26, + TCG_REG_R27, + TCG_REG_R28, + TCG_REG_R29, + TCG_REG_R30, + TCG_REG_R31, +#endif +#endif + /* Special value UINT8_MAX is used by TCI to encode constant values. */ + TCG_CONST = UINT8_MAX +} TCGRegister; + +void tci_disas(uint8_t opc); + +unsigned long tcg_qemu_tb_exec(CPUState *env, uint8_t *tb_ptr); +#define tcg_qemu_tb_exec tcg_qemu_tb_exec + +#endif /* TCG_TARGET_H */ diff --git a/tci-dis.c b/tci-dis.c new file mode 100644 index 0000000000..10c411be8c --- /dev/null +++ b/tci-dis.c @@ -0,0 +1,59 @@ +/* + * Tiny Code Interpreter for QEMU - disassembler + * + * Copyright (c) 2011 Stefan Weil + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "dis-asm.h" +#include "tcg/tcg.h" + +/* Disassemble TCI bytecode. */ +int print_insn_tci(bfd_vma addr, disassemble_info *info) +{ + int length; + uint8_t byte; + int status; + TCGOpcode op; + + status = info->read_memory_func(addr, &byte, 1, info); + if (status != 0) { + info->memory_error_func(status, addr, info); + return -1; + } + op = byte; + + addr++; + status = info->read_memory_func(addr, &byte, 1, info); + if (status != 0) { + info->memory_error_func(status, addr, info); + return -1; + } + length = byte; + + if (op >= tcg_op_defs_max) { + info->fprintf_func(info->stream, "illegal opcode %d", op); + } else { + const TCGOpDef *def = &tcg_op_defs[op]; + int nb_oargs = def->nb_oargs; + int nb_iargs = def->nb_iargs; + int nb_cargs = def->nb_cargs; + /* TODO: Improve disassembler output. */ + info->fprintf_func(info->stream, "%s\to=%d i=%d c=%d", + def->name, nb_oargs, nb_iargs, nb_cargs); + } + + return length; +} @@ -0,0 +1,1208 @@ +/* + * Tiny Code Interpreter for QEMU + * + * Copyright (c) 2009, 2011 Stefan Weil + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "config.h" + +/* Defining NDEBUG disables assertions (which makes the code faster). */ +#if !defined(CONFIG_TCG_DEBUG) && !defined(NDEBUG) +# define NDEBUG +#endif + +#include "qemu-common.h" +#include "dyngen-exec.h" /* env */ +#include "exec-all.h" /* MAX_OPC_PARAM_IARGS */ +#include "tcg-op.h" + +/* Marker for missing code. */ +#define TODO() \ + do { \ + fprintf(stderr, "TODO %s:%u: %s()\n", \ + __FILE__, __LINE__, __func__); \ + tcg_abort(); \ + } while (0) + +#if MAX_OPC_PARAM_IARGS != 4 +# error Fix needed, number of supported input arguments changed! +#endif +#if TCG_TARGET_REG_BITS == 32 +typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong, + tcg_target_ulong, tcg_target_ulong, + tcg_target_ulong, tcg_target_ulong, + tcg_target_ulong, tcg_target_ulong); +#else +typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong, + tcg_target_ulong, tcg_target_ulong); +#endif + +/* TCI can optionally use a global register variable for env. */ +#if !defined(AREG0) +CPUState *env; +#endif + +/* Targets which don't use GETPC also don't need tci_tb_ptr + which makes them a little faster. */ +#if defined(GETPC) +void *tci_tb_ptr; +#endif + +static tcg_target_ulong tci_reg[TCG_TARGET_NB_REGS]; + +static tcg_target_ulong tci_read_reg(TCGRegister index) +{ + assert(index < ARRAY_SIZE(tci_reg)); + return tci_reg[index]; +} + +#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 +static int8_t tci_read_reg8s(TCGRegister index) +{ + return (int8_t)tci_read_reg(index); +} +#endif + +#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 +static int16_t tci_read_reg16s(TCGRegister index) +{ + return (int16_t)tci_read_reg(index); +} +#endif + +#if TCG_TARGET_REG_BITS == 64 +static int32_t tci_read_reg32s(TCGRegister index) +{ + return (int32_t)tci_read_reg(index); +} +#endif + +static uint8_t tci_read_reg8(TCGRegister index) +{ + return (uint8_t)tci_read_reg(index); +} + +static uint16_t tci_read_reg16(TCGRegister index) +{ + return (uint16_t)tci_read_reg(index); +} + +static uint32_t tci_read_reg32(TCGRegister index) +{ + return (uint32_t)tci_read_reg(index); +} + +#if TCG_TARGET_REG_BITS == 64 +static uint64_t tci_read_reg64(TCGRegister index) +{ + return tci_read_reg(index); +} +#endif + +static void tci_write_reg(TCGRegister index, tcg_target_ulong value) +{ + assert(index < ARRAY_SIZE(tci_reg)); + assert(index != TCG_AREG0); + tci_reg[index] = value; +} + +static void tci_write_reg8s(TCGRegister index, int8_t value) +{ + tci_write_reg(index, value); +} + +static void tci_write_reg16s(TCGRegister index, int16_t value) +{ + tci_write_reg(index, value); +} + +#if TCG_TARGET_REG_BITS == 64 +static void tci_write_reg32s(TCGRegister index, int32_t value) +{ + tci_write_reg(index, value); +} +#endif + +static void tci_write_reg8(TCGRegister index, uint8_t value) +{ + tci_write_reg(index, value); +} + +static void tci_write_reg16(TCGRegister index, uint16_t value) +{ + tci_write_reg(index, value); +} + +static void tci_write_reg32(TCGRegister index, uint32_t value) +{ + tci_write_reg(index, value); +} + +#if TCG_TARGET_REG_BITS == 32 +static void tci_write_reg64(uint32_t high_index, uint32_t low_index, + uint64_t value) +{ + tci_write_reg(low_index, value); + tci_write_reg(high_index, value >> 32); +} +#elif TCG_TARGET_REG_BITS == 64 +static void tci_write_reg64(TCGRegister index, uint64_t value) +{ + tci_write_reg(index, value); +} +#endif + +#if TCG_TARGET_REG_BITS == 32 +/* Create a 64 bit value from two 32 bit values. */ +static uint64_t tci_uint64(uint32_t high, uint32_t low) +{ + return ((uint64_t)high << 32) + low; +} +#endif + +/* Read constant (native size) from bytecode. */ +static tcg_target_ulong tci_read_i(uint8_t **tb_ptr) +{ + tcg_target_ulong value = *(tcg_target_ulong *)(*tb_ptr); + *tb_ptr += sizeof(value); + return value; +} + +/* Read constant (32 bit) from bytecode. */ +static uint32_t tci_read_i32(uint8_t **tb_ptr) +{ + uint32_t value = *(uint32_t *)(*tb_ptr); + *tb_ptr += sizeof(value); + return value; +} + +#if TCG_TARGET_REG_BITS == 64 +/* Read constant (64 bit) from bytecode. */ +static uint64_t tci_read_i64(uint8_t **tb_ptr) +{ + uint64_t value = *(uint64_t *)(*tb_ptr); + *tb_ptr += sizeof(value); + return value; +} +#endif + +/* Read indexed register (native size) from bytecode. */ +static tcg_target_ulong tci_read_r(uint8_t **tb_ptr) +{ + tcg_target_ulong value = tci_read_reg(**tb_ptr); + *tb_ptr += 1; + return value; +} + +/* Read indexed register (8 bit) from bytecode. */ +static uint8_t tci_read_r8(uint8_t **tb_ptr) +{ + uint8_t value = tci_read_reg8(**tb_ptr); + *tb_ptr += 1; + return value; +} + +#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 +/* Read indexed register (8 bit signed) from bytecode. */ +static int8_t tci_read_r8s(uint8_t **tb_ptr) +{ + int8_t value = tci_read_reg8s(**tb_ptr); + *tb_ptr += 1; + return value; +} +#endif + +/* Read indexed register (16 bit) from bytecode. */ +static uint16_t tci_read_r16(uint8_t **tb_ptr) +{ + uint16_t value = tci_read_reg16(**tb_ptr); + *tb_ptr += 1; + return value; +} + +#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 +/* Read indexed register (16 bit signed) from bytecode. */ +static int16_t tci_read_r16s(uint8_t **tb_ptr) +{ + int16_t value = tci_read_reg16s(**tb_ptr); + *tb_ptr += 1; + return value; +} +#endif + +/* Read indexed register (32 bit) from bytecode. */ +static uint32_t tci_read_r32(uint8_t **tb_ptr) +{ + uint32_t value = tci_read_reg32(**tb_ptr); + *tb_ptr += 1; + return value; +} + +#if TCG_TARGET_REG_BITS == 32 +/* Read two indexed registers (2 * 32 bit) from bytecode. */ +static uint64_t tci_read_r64(uint8_t **tb_ptr) +{ + uint32_t low = tci_read_r32(tb_ptr); + return tci_uint64(tci_read_r32(tb_ptr), low); +} +#elif TCG_TARGET_REG_BITS == 64 +/* Read indexed register (32 bit signed) from bytecode. */ +static int32_t tci_read_r32s(uint8_t **tb_ptr) +{ + int32_t value = tci_read_reg32s(**tb_ptr); + *tb_ptr += 1; + return value; +} + +/* Read indexed register (64 bit) from bytecode. */ +static uint64_t tci_read_r64(uint8_t **tb_ptr) +{ + uint64_t value = tci_read_reg64(**tb_ptr); + *tb_ptr += 1; + return value; +} +#endif + +/* Read indexed register(s) with target address from bytecode. */ +static target_ulong tci_read_ulong(uint8_t **tb_ptr) +{ + target_ulong taddr = tci_read_r(tb_ptr); +#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS + taddr += (uint64_t)tci_read_r(tb_ptr) << 32; +#endif + return taddr; +} + +/* Read indexed register or constant (native size) from bytecode. */ +static tcg_target_ulong tci_read_ri(uint8_t **tb_ptr) +{ + tcg_target_ulong value; + TCGRegister r = **tb_ptr; + *tb_ptr += 1; + if (r == TCG_CONST) { + value = tci_read_i(tb_ptr); + } else { + value = tci_read_reg(r); + } + return value; +} + +/* Read indexed register or constant (32 bit) from bytecode. */ +static uint32_t tci_read_ri32(uint8_t **tb_ptr) +{ + uint32_t value; + TCGRegister r = **tb_ptr; + *tb_ptr += 1; + if (r == TCG_CONST) { + value = tci_read_i32(tb_ptr); + } else { + value = tci_read_reg32(r); + } + return value; +} + +#if TCG_TARGET_REG_BITS == 32 +/* Read two indexed registers or constants (2 * 32 bit) from bytecode. */ +static uint64_t tci_read_ri64(uint8_t **tb_ptr) +{ + uint32_t low = tci_read_ri32(tb_ptr); + return tci_uint64(tci_read_ri32(tb_ptr), low); +} +#elif TCG_TARGET_REG_BITS == 64 +/* Read indexed register or constant (64 bit) from bytecode. */ +static uint64_t tci_read_ri64(uint8_t **tb_ptr) +{ + uint64_t value; + TCGRegister r = **tb_ptr; + *tb_ptr += 1; + if (r == TCG_CONST) { + value = tci_read_i64(tb_ptr); + } else { + value = tci_read_reg64(r); + } + return value; +} +#endif + +static target_ulong tci_read_label(uint8_t **tb_ptr) +{ + target_ulong label = tci_read_i(tb_ptr); + assert(label != 0); + return label; +} + +static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition) +{ + bool result = false; + int32_t i0 = u0; + int32_t i1 = u1; + switch (condition) { + case TCG_COND_EQ: + result = (u0 == u1); + break; + case TCG_COND_NE: + result = (u0 != u1); + break; + case TCG_COND_LT: + result = (i0 < i1); + break; + case TCG_COND_GE: + result = (i0 >= i1); + break; + case TCG_COND_LE: + result = (i0 <= i1); + break; + case TCG_COND_GT: + result = (i0 > i1); + break; + case TCG_COND_LTU: + result = (u0 < u1); + break; + case TCG_COND_GEU: + result = (u0 >= u1); + break; + case TCG_COND_LEU: + result = (u0 <= u1); + break; + case TCG_COND_GTU: + result = (u0 > u1); + break; + default: + TODO(); + } + return result; +} + +static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) +{ + bool result = false; + int64_t i0 = u0; + int64_t i1 = u1; + switch (condition) { + case TCG_COND_EQ: + result = (u0 == u1); + break; + case TCG_COND_NE: + result = (u0 != u1); + break; + case TCG_COND_LT: + result = (i0 < i1); + break; + case TCG_COND_GE: + result = (i0 >= i1); + break; + case TCG_COND_LE: + result = (i0 <= i1); + break; + case TCG_COND_GT: + result = (i0 > i1); + break; + case TCG_COND_LTU: + result = (u0 < u1); + break; + case TCG_COND_GEU: + result = (u0 >= u1); + break; + case TCG_COND_LEU: + result = (u0 <= u1); + break; + case TCG_COND_GTU: + result = (u0 > u1); + break; + default: + TODO(); + } + return result; +} + +/* Interpret pseudo code in tb. */ +unsigned long tcg_qemu_tb_exec(CPUState *cpustate, uint8_t *tb_ptr) +{ + unsigned long next_tb = 0; + + env = cpustate; + tci_reg[TCG_AREG0] = (tcg_target_ulong)env; + assert(tb_ptr); + + for (;;) { +#if defined(GETPC) + tci_tb_ptr = tb_ptr; +#endif + TCGOpcode opc = tb_ptr[0]; +#if !defined(NDEBUG) + uint8_t op_size = tb_ptr[1]; + uint8_t *old_code_ptr = tb_ptr; +#endif + tcg_target_ulong t0; + tcg_target_ulong t1; + tcg_target_ulong t2; + tcg_target_ulong label; + TCGCond condition; + target_ulong taddr; +#ifndef CONFIG_SOFTMMU + tcg_target_ulong host_addr; +#endif + uint8_t tmp8; + uint16_t tmp16; + uint32_t tmp32; + uint64_t tmp64; +#if TCG_TARGET_REG_BITS == 32 + uint64_t v64; +#endif + + /* Skip opcode and size entry. */ + tb_ptr += 2; + + switch (opc) { + case INDEX_op_end: + case INDEX_op_nop: + break; + case INDEX_op_nop1: + case INDEX_op_nop2: + case INDEX_op_nop3: + case INDEX_op_nopn: + case INDEX_op_discard: + TODO(); + break; + case INDEX_op_set_label: + TODO(); + break; + case INDEX_op_call: + t0 = tci_read_ri(&tb_ptr); +#if TCG_TARGET_REG_BITS == 32 + tmp64 = ((helper_function)t0)(tci_read_reg(TCG_REG_R0), + tci_read_reg(TCG_REG_R1), + tci_read_reg(TCG_REG_R2), + tci_read_reg(TCG_REG_R3), + tci_read_reg(TCG_REG_R5), + tci_read_reg(TCG_REG_R6), + tci_read_reg(TCG_REG_R7), + tci_read_reg(TCG_REG_R8)); + tci_write_reg(TCG_REG_R0, tmp64); + tci_write_reg(TCG_REG_R1, tmp64 >> 32); +#else + tmp64 = ((helper_function)t0)(tci_read_reg(TCG_REG_R0), + tci_read_reg(TCG_REG_R1), + tci_read_reg(TCG_REG_R2), + tci_read_reg(TCG_REG_R3)); + tci_write_reg(TCG_REG_R0, tmp64); +#endif + break; + case INDEX_op_jmp: + case INDEX_op_br: + label = tci_read_label(&tb_ptr); + assert(tb_ptr == old_code_ptr + op_size); + tb_ptr = (uint8_t *)label; + continue; + case INDEX_op_setcond_i32: + t0 = *tb_ptr++; + t1 = tci_read_r32(&tb_ptr); + t2 = tci_read_ri32(&tb_ptr); + condition = *tb_ptr++; + tci_write_reg32(t0, tci_compare32(t1, t2, condition)); + break; +#if TCG_TARGET_REG_BITS == 32 + case INDEX_op_setcond2_i32: + t0 = *tb_ptr++; + tmp64 = tci_read_r64(&tb_ptr); + v64 = tci_read_ri64(&tb_ptr); + condition = *tb_ptr++; + tci_write_reg32(t0, tci_compare64(tmp64, v64, condition)); + break; +#elif TCG_TARGET_REG_BITS == 64 + case INDEX_op_setcond_i64: + t0 = *tb_ptr++; + t1 = tci_read_r64(&tb_ptr); + t2 = tci_read_ri64(&tb_ptr); + condition = *tb_ptr++; + tci_write_reg64(t0, tci_compare64(t1, t2, condition)); + break; +#endif + case INDEX_op_mov_i32: + t0 = *tb_ptr++; + t1 = tci_read_r32(&tb_ptr); + tci_write_reg32(t0, t1); + break; + case INDEX_op_movi_i32: + t0 = *tb_ptr++; + t1 = tci_read_i32(&tb_ptr); + tci_write_reg32(t0, t1); + break; + + /* Load/store operations (32 bit). */ + + case INDEX_op_ld8u_i32: + t0 = *tb_ptr++; + t1 = tci_read_r(&tb_ptr); + t2 = tci_read_i32(&tb_ptr); + tci_write_reg8(t0, *(uint8_t *)(t1 + t2)); + break; + case INDEX_op_ld8s_i32: + case INDEX_op_ld16u_i32: + TODO(); + break; + case INDEX_op_ld16s_i32: + TODO(); + break; + case INDEX_op_ld_i32: + t0 = *tb_ptr++; + t1 = tci_read_r(&tb_ptr); + t2 = tci_read_i32(&tb_ptr); + tci_write_reg32(t0, *(uint32_t *)(t1 + t2)); + break; + case INDEX_op_st8_i32: + t0 = tci_read_r8(&tb_ptr); + t1 = tci_read_r(&tb_ptr); + t2 = tci_read_i32(&tb_ptr); + *(uint8_t *)(t1 + t2) = t0; + break; + case INDEX_op_st16_i32: + t0 = tci_read_r16(&tb_ptr); + t1 = tci_read_r(&tb_ptr); + t2 = tci_read_i32(&tb_ptr); + *(uint16_t *)(t1 + t2) = t0; + break; + case INDEX_op_st_i32: + t0 = tci_read_r32(&tb_ptr); + t1 = tci_read_r(&tb_ptr); + t2 = tci_read_i32(&tb_ptr); + *(uint32_t *)(t1 + t2) = t0; + break; + + /* Arithmetic operations (32 bit). */ + + case INDEX_op_add_i32: + t0 = *tb_ptr++; + t1 = tci_read_ri32(&tb_ptr); + t2 = tci_read_ri32(&tb_ptr); + tci_write_reg32(t0, t1 + t2); + break; + case INDEX_op_sub_i32: + t0 = *tb_ptr++; + t1 = tci_read_ri32(&tb_ptr); + t2 = tci_read_ri32(&tb_ptr); + tci_write_reg32(t0, t1 - t2); + break; + case INDEX_op_mul_i32: + t0 = *tb_ptr++; + t1 = tci_read_ri32(&tb_ptr); + t2 = tci_read_ri32(&tb_ptr); + tci_write_reg32(t0, t1 * t2); + break; +#if TCG_TARGET_HAS_div_i32 + case INDEX_op_div_i32: + t0 = *tb_ptr++; + t1 = tci_read_ri32(&tb_ptr); + t2 = tci_read_ri32(&tb_ptr); + tci_write_reg32(t0, (int32_t)t1 / (int32_t)t2); + break; + case INDEX_op_divu_i32: + t0 = *tb_ptr++; + t1 = tci_read_ri32(&tb_ptr); + t2 = tci_read_ri32(&tb_ptr); + tci_write_reg32(t0, t1 / t2); + break; + case INDEX_op_rem_i32: + t0 = *tb_ptr++; + t1 = tci_read_ri32(&tb_ptr); + t2 = tci_read_ri32(&tb_ptr); + tci_write_reg32(t0, (int32_t)t1 % (int32_t)t2); + break; + case INDEX_op_remu_i32: + t0 = *tb_ptr++; + t1 = tci_read_ri32(&tb_ptr); + t2 = tci_read_ri32(&tb_ptr); + tci_write_reg32(t0, t1 % t2); + break; +#elif TCG_TARGET_HAS_div2_i32 + case INDEX_op_div2_i32: + case INDEX_op_divu2_i32: + TODO(); + break; +#endif + case INDEX_op_and_i32: + t0 = *tb_ptr++; + t1 = tci_read_ri32(&tb_ptr); + t2 = tci_read_ri32(&tb_ptr); + tci_write_reg32(t0, t1 & t2); + break; + case INDEX_op_or_i32: + t0 = *tb_ptr++; + t1 = tci_read_ri32(&tb_ptr); + t2 = tci_read_ri32(&tb_ptr); + tci_write_reg32(t0, t1 | t2); + break; + case INDEX_op_xor_i32: + t0 = *tb_ptr++; + t1 = tci_read_ri32(&tb_ptr); + t2 = tci_read_ri32(&tb_ptr); + tci_write_reg32(t0, t1 ^ t2); + break; + + /* Shift/rotate operations (32 bit). */ + + case INDEX_op_shl_i32: + t0 = *tb_ptr++; + t1 = tci_read_ri32(&tb_ptr); + t2 = tci_read_ri32(&tb_ptr); + tci_write_reg32(t0, t1 << t2); + break; + case INDEX_op_shr_i32: + t0 = *tb_ptr++; + t1 = tci_read_ri32(&tb_ptr); + t2 = tci_read_ri32(&tb_ptr); + tci_write_reg32(t0, t1 >> t2); + break; + case INDEX_op_sar_i32: + t0 = *tb_ptr++; + t1 = tci_read_ri32(&tb_ptr); + t2 = tci_read_ri32(&tb_ptr); + tci_write_reg32(t0, ((int32_t)t1 >> t2)); + break; +#if TCG_TARGET_HAS_rot_i32 + case INDEX_op_rotl_i32: + t0 = *tb_ptr++; + t1 = tci_read_ri32(&tb_ptr); + t2 = tci_read_ri32(&tb_ptr); + tci_write_reg32(t0, (t1 << t2) | (t1 >> (32 - t2))); + break; + case INDEX_op_rotr_i32: + t0 = *tb_ptr++; + t1 = tci_read_ri32(&tb_ptr); + t2 = tci_read_ri32(&tb_ptr); + tci_write_reg32(t0, (t1 >> t2) | (t1 << (32 - t2))); + break; +#endif + case INDEX_op_brcond_i32: + t0 = tci_read_r32(&tb_ptr); + t1 = tci_read_ri32(&tb_ptr); + condition = *tb_ptr++; + label = tci_read_label(&tb_ptr); + if (tci_compare32(t0, t1, condition)) { + assert(tb_ptr == old_code_ptr + op_size); + tb_ptr = (uint8_t *)label; + continue; + } + break; +#if TCG_TARGET_REG_BITS == 32 + case INDEX_op_add2_i32: + t0 = *tb_ptr++; + t1 = *tb_ptr++; + tmp64 = tci_read_r64(&tb_ptr); + tmp64 += tci_read_r64(&tb_ptr); + tci_write_reg64(t1, t0, tmp64); + break; + case INDEX_op_sub2_i32: + t0 = *tb_ptr++; + t1 = *tb_ptr++; + tmp64 = tci_read_r64(&tb_ptr); + tmp64 -= tci_read_r64(&tb_ptr); + tci_write_reg64(t1, t0, tmp64); + break; + case INDEX_op_brcond2_i32: + tmp64 = tci_read_r64(&tb_ptr); + v64 = tci_read_ri64(&tb_ptr); + condition = *tb_ptr++; + label = tci_read_label(&tb_ptr); + if (tci_compare64(tmp64, v64, condition)) { + assert(tb_ptr == old_code_ptr + op_size); + tb_ptr = (uint8_t *)label; + continue; + } + break; + case INDEX_op_mulu2_i32: + t0 = *tb_ptr++; + t1 = *tb_ptr++; + t2 = tci_read_r32(&tb_ptr); + tmp64 = tci_read_r32(&tb_ptr); + tci_write_reg64(t1, t0, t2 * tmp64); + break; +#endif /* TCG_TARGET_REG_BITS == 32 */ +#if TCG_TARGET_HAS_ext8s_i32 + case INDEX_op_ext8s_i32: + t0 = *tb_ptr++; + t1 = tci_read_r8s(&tb_ptr); + tci_write_reg32(t0, t1); + break; +#endif +#if TCG_TARGET_HAS_ext16s_i32 + case INDEX_op_ext16s_i32: + t0 = *tb_ptr++; + t1 = tci_read_r16s(&tb_ptr); + tci_write_reg32(t0, t1); + break; +#endif +#if TCG_TARGET_HAS_ext8u_i32 + case INDEX_op_ext8u_i32: + t0 = *tb_ptr++; + t1 = tci_read_r8(&tb_ptr); + tci_write_reg32(t0, t1); + break; +#endif +#if TCG_TARGET_HAS_ext16u_i32 + case INDEX_op_ext16u_i32: + t0 = *tb_ptr++; + t1 = tci_read_r16(&tb_ptr); + tci_write_reg32(t0, t1); + break; +#endif +#if TCG_TARGET_HAS_bswap16_i32 + case INDEX_op_bswap16_i32: + t0 = *tb_ptr++; + t1 = tci_read_r16(&tb_ptr); + tci_write_reg32(t0, bswap16(t1)); + break; +#endif +#if TCG_TARGET_HAS_bswap32_i32 + case INDEX_op_bswap32_i32: + t0 = *tb_ptr++; + t1 = tci_read_r32(&tb_ptr); + tci_write_reg32(t0, bswap32(t1)); + break; +#endif +#if TCG_TARGET_HAS_not_i32 + case INDEX_op_not_i32: + t0 = *tb_ptr++; + t1 = tci_read_r32(&tb_ptr); + tci_write_reg32(t0, ~t1); + break; +#endif +#if TCG_TARGET_HAS_neg_i32 + case INDEX_op_neg_i32: + t0 = *tb_ptr++; + t1 = tci_read_r32(&tb_ptr); + tci_write_reg32(t0, -t1); + break; +#endif +#if TCG_TARGET_REG_BITS == 64 + case INDEX_op_mov_i64: + t0 = *tb_ptr++; + t1 = tci_read_r64(&tb_ptr); + tci_write_reg64(t0, t1); + break; + case INDEX_op_movi_i64: + t0 = *tb_ptr++; + t1 = tci_read_i64(&tb_ptr); + tci_write_reg64(t0, t1); + break; + + /* Load/store operations (64 bit). */ + + case INDEX_op_ld8u_i64: + t0 = *tb_ptr++; + t1 = tci_read_r(&tb_ptr); + t2 = tci_read_i32(&tb_ptr); + tci_write_reg8(t0, *(uint8_t *)(t1 + t2)); + break; + case INDEX_op_ld8s_i64: + case INDEX_op_ld16u_i64: + case INDEX_op_ld16s_i64: + TODO(); + break; + case INDEX_op_ld32u_i64: + t0 = *tb_ptr++; + t1 = tci_read_r(&tb_ptr); + t2 = tci_read_i32(&tb_ptr); + tci_write_reg32(t0, *(uint32_t *)(t1 + t2)); + break; + case INDEX_op_ld32s_i64: + t0 = *tb_ptr++; + t1 = tci_read_r(&tb_ptr); + t2 = tci_read_i32(&tb_ptr); + tci_write_reg32s(t0, *(int32_t *)(t1 + t2)); + break; + case INDEX_op_ld_i64: + t0 = *tb_ptr++; + t1 = tci_read_r(&tb_ptr); + t2 = tci_read_i32(&tb_ptr); + tci_write_reg64(t0, *(uint64_t *)(t1 + t2)); + break; + case INDEX_op_st8_i64: + t0 = tci_read_r8(&tb_ptr); + t1 = tci_read_r(&tb_ptr); + t2 = tci_read_i32(&tb_ptr); + *(uint8_t *)(t1 + t2) = t0; + break; + case INDEX_op_st16_i64: + t0 = tci_read_r16(&tb_ptr); + t1 = tci_read_r(&tb_ptr); + t2 = tci_read_i32(&tb_ptr); + *(uint16_t *)(t1 + t2) = t0; + break; + case INDEX_op_st32_i64: + t0 = tci_read_r32(&tb_ptr); + t1 = tci_read_r(&tb_ptr); + t2 = tci_read_i32(&tb_ptr); + *(uint32_t *)(t1 + t2) = t0; + break; + case INDEX_op_st_i64: + t0 = tci_read_r64(&tb_ptr); + t1 = tci_read_r(&tb_ptr); + t2 = tci_read_i32(&tb_ptr); + *(uint64_t *)(t1 + t2) = t0; + break; + + /* Arithmetic operations (64 bit). */ + + case INDEX_op_add_i64: + t0 = *tb_ptr++; + t1 = tci_read_ri64(&tb_ptr); + t2 = tci_read_ri64(&tb_ptr); + tci_write_reg64(t0, t1 + t2); + break; + case INDEX_op_sub_i64: + t0 = *tb_ptr++; + t1 = tci_read_ri64(&tb_ptr); + t2 = tci_read_ri64(&tb_ptr); + tci_write_reg64(t0, t1 - t2); + break; + case INDEX_op_mul_i64: + t0 = *tb_ptr++; + t1 = tci_read_ri64(&tb_ptr); + t2 = tci_read_ri64(&tb_ptr); + tci_write_reg64(t0, t1 * t2); + break; +#if TCG_TARGET_HAS_div_i64 + case INDEX_op_div_i64: + case INDEX_op_divu_i64: + case INDEX_op_rem_i64: + case INDEX_op_remu_i64: + TODO(); + break; +#elif TCG_TARGET_HAS_div2_i64 + case INDEX_op_div2_i64: + case INDEX_op_divu2_i64: + TODO(); + break; +#endif + case INDEX_op_and_i64: + t0 = *tb_ptr++; + t1 = tci_read_ri64(&tb_ptr); + t2 = tci_read_ri64(&tb_ptr); + tci_write_reg64(t0, t1 & t2); + break; + case INDEX_op_or_i64: + t0 = *tb_ptr++; + t1 = tci_read_ri64(&tb_ptr); + t2 = tci_read_ri64(&tb_ptr); + tci_write_reg64(t0, t1 | t2); + break; + case INDEX_op_xor_i64: + t0 = *tb_ptr++; + t1 = tci_read_ri64(&tb_ptr); + t2 = tci_read_ri64(&tb_ptr); + tci_write_reg64(t0, t1 ^ t2); + break; + + /* Shift/rotate operations (64 bit). */ + + case INDEX_op_shl_i64: + t0 = *tb_ptr++; + t1 = tci_read_ri64(&tb_ptr); + t2 = tci_read_ri64(&tb_ptr); + tci_write_reg64(t0, t1 << t2); + break; + case INDEX_op_shr_i64: + t0 = *tb_ptr++; + t1 = tci_read_ri64(&tb_ptr); + t2 = tci_read_ri64(&tb_ptr); + tci_write_reg64(t0, t1 >> t2); + break; + case INDEX_op_sar_i64: + t0 = *tb_ptr++; + t1 = tci_read_ri64(&tb_ptr); + t2 = tci_read_ri64(&tb_ptr); + tci_write_reg64(t0, ((int64_t)t1 >> t2)); + break; +#if TCG_TARGET_HAS_rot_i64 + case INDEX_op_rotl_i64: + case INDEX_op_rotr_i64: + TODO(); + break; +#endif + case INDEX_op_brcond_i64: + t0 = tci_read_r64(&tb_ptr); + t1 = tci_read_ri64(&tb_ptr); + condition = *tb_ptr++; + label = tci_read_label(&tb_ptr); + if (tci_compare64(t0, t1, condition)) { + assert(tb_ptr == old_code_ptr + op_size); + tb_ptr = (uint8_t *)label; + continue; + } + break; +#if TCG_TARGET_HAS_ext8u_i64 + case INDEX_op_ext8u_i64: + t0 = *tb_ptr++; + t1 = tci_read_r8(&tb_ptr); + tci_write_reg64(t0, t1); + break; +#endif +#if TCG_TARGET_HAS_ext8s_i64 + case INDEX_op_ext8s_i64: + t0 = *tb_ptr++; + t1 = tci_read_r8s(&tb_ptr); + tci_write_reg64(t0, t1); + break; +#endif +#if TCG_TARGET_HAS_ext16s_i64 + case INDEX_op_ext16s_i64: + t0 = *tb_ptr++; + t1 = tci_read_r16s(&tb_ptr); + tci_write_reg64(t0, t1); + break; +#endif +#if TCG_TARGET_HAS_ext16u_i64 + case INDEX_op_ext16u_i64: + t0 = *tb_ptr++; + t1 = tci_read_r16(&tb_ptr); + tci_write_reg64(t0, t1); + break; +#endif +#if TCG_TARGET_HAS_ext32s_i64 + case INDEX_op_ext32s_i64: + t0 = *tb_ptr++; + t1 = tci_read_r32s(&tb_ptr); + tci_write_reg64(t0, t1); + break; +#endif +#if TCG_TARGET_HAS_ext32u_i64 + case INDEX_op_ext32u_i64: + t0 = *tb_ptr++; + t1 = tci_read_r32(&tb_ptr); + tci_write_reg64(t0, t1); + break; +#endif +#if TCG_TARGET_HAS_bswap16_i64 + case INDEX_op_bswap16_i64: + TODO(); + t0 = *tb_ptr++; + t1 = tci_read_r16(&tb_ptr); + tci_write_reg64(t0, bswap16(t1)); + break; +#endif +#if TCG_TARGET_HAS_bswap32_i64 + case INDEX_op_bswap32_i64: + t0 = *tb_ptr++; + t1 = tci_read_r32(&tb_ptr); + tci_write_reg64(t0, bswap32(t1)); + break; +#endif +#if TCG_TARGET_HAS_bswap64_i64 + case INDEX_op_bswap64_i64: + TODO(); + t0 = *tb_ptr++; + t1 = tci_read_r64(&tb_ptr); + tci_write_reg64(t0, bswap64(t1)); + break; +#endif +#if TCG_TARGET_HAS_not_i64 + case INDEX_op_not_i64: + t0 = *tb_ptr++; + t1 = tci_read_r64(&tb_ptr); + tci_write_reg64(t0, ~t1); + break; +#endif +#if TCG_TARGET_HAS_neg_i64 + case INDEX_op_neg_i64: + t0 = *tb_ptr++; + t1 = tci_read_r64(&tb_ptr); + tci_write_reg64(t0, -t1); + break; +#endif +#endif /* TCG_TARGET_REG_BITS == 64 */ + + /* QEMU specific operations. */ + +#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS + case INDEX_op_debug_insn_start: + TODO(); + break; +#else + case INDEX_op_debug_insn_start: + TODO(); + break; +#endif + case INDEX_op_exit_tb: + next_tb = *(uint64_t *)tb_ptr; + goto exit; + break; + case INDEX_op_goto_tb: + t0 = tci_read_i32(&tb_ptr); + assert(tb_ptr == old_code_ptr + op_size); + tb_ptr += (int32_t)t0; + continue; + case INDEX_op_qemu_ld8u: + t0 = *tb_ptr++; + taddr = tci_read_ulong(&tb_ptr); +#ifdef CONFIG_SOFTMMU + tmp8 = __ldb_mmu(taddr, tci_read_i(&tb_ptr)); +#else + host_addr = (tcg_target_ulong)taddr; + assert(taddr == host_addr); + tmp8 = *(uint8_t *)(host_addr + GUEST_BASE); +#endif + tci_write_reg8(t0, tmp8); + break; + case INDEX_op_qemu_ld8s: + t0 = *tb_ptr++; + taddr = tci_read_ulong(&tb_ptr); +#ifdef CONFIG_SOFTMMU + tmp8 = __ldb_mmu(taddr, tci_read_i(&tb_ptr)); +#else + host_addr = (tcg_target_ulong)taddr; + assert(taddr == host_addr); + tmp8 = *(uint8_t *)(host_addr + GUEST_BASE); +#endif + tci_write_reg8s(t0, tmp8); + break; + case INDEX_op_qemu_ld16u: + t0 = *tb_ptr++; + taddr = tci_read_ulong(&tb_ptr); +#ifdef CONFIG_SOFTMMU + tmp16 = __ldw_mmu(taddr, tci_read_i(&tb_ptr)); +#else + host_addr = (tcg_target_ulong)taddr; + assert(taddr == host_addr); + tmp16 = tswap16(*(uint16_t *)(host_addr + GUEST_BASE)); +#endif + tci_write_reg16(t0, tmp16); + break; + case INDEX_op_qemu_ld16s: + t0 = *tb_ptr++; + taddr = tci_read_ulong(&tb_ptr); +#ifdef CONFIG_SOFTMMU + tmp16 = __ldw_mmu(taddr, tci_read_i(&tb_ptr)); +#else + host_addr = (tcg_target_ulong)taddr; + assert(taddr == host_addr); + tmp16 = tswap16(*(uint16_t *)(host_addr + GUEST_BASE)); +#endif + tci_write_reg16s(t0, tmp16); + break; +#if TCG_TARGET_REG_BITS == 64 + case INDEX_op_qemu_ld32u: + t0 = *tb_ptr++; + taddr = tci_read_ulong(&tb_ptr); +#ifdef CONFIG_SOFTMMU + tmp32 = __ldl_mmu(taddr, tci_read_i(&tb_ptr)); +#else + host_addr = (tcg_target_ulong)taddr; + assert(taddr == host_addr); + tmp32 = tswap32(*(uint32_t *)(host_addr + GUEST_BASE)); +#endif + tci_write_reg32(t0, tmp32); + break; + case INDEX_op_qemu_ld32s: + t0 = *tb_ptr++; + taddr = tci_read_ulong(&tb_ptr); +#ifdef CONFIG_SOFTMMU + tmp32 = __ldl_mmu(taddr, tci_read_i(&tb_ptr)); +#else + host_addr = (tcg_target_ulong)taddr; + assert(taddr == host_addr); + tmp32 = tswap32(*(uint32_t *)(host_addr + GUEST_BASE)); +#endif + tci_write_reg32s(t0, tmp32); + break; +#endif /* TCG_TARGET_REG_BITS == 64 */ + case INDEX_op_qemu_ld32: + t0 = *tb_ptr++; + taddr = tci_read_ulong(&tb_ptr); +#ifdef CONFIG_SOFTMMU + tmp32 = __ldl_mmu(taddr, tci_read_i(&tb_ptr)); +#else + host_addr = (tcg_target_ulong)taddr; + assert(taddr == host_addr); + tmp32 = tswap32(*(uint32_t *)(host_addr + GUEST_BASE)); +#endif + tci_write_reg32(t0, tmp32); + break; + case INDEX_op_qemu_ld64: + t0 = *tb_ptr++; +#if TCG_TARGET_REG_BITS == 32 + t1 = *tb_ptr++; +#endif + taddr = tci_read_ulong(&tb_ptr); +#ifdef CONFIG_SOFTMMU + tmp64 = __ldq_mmu(taddr, tci_read_i(&tb_ptr)); +#else + host_addr = (tcg_target_ulong)taddr; + assert(taddr == host_addr); + tmp64 = tswap64(*(uint64_t *)(host_addr + GUEST_BASE)); +#endif + tci_write_reg(t0, tmp64); +#if TCG_TARGET_REG_BITS == 32 + tci_write_reg(t1, tmp64 >> 32); +#endif + break; + case INDEX_op_qemu_st8: + t0 = tci_read_r8(&tb_ptr); + taddr = tci_read_ulong(&tb_ptr); +#ifdef CONFIG_SOFTMMU + t2 = tci_read_i(&tb_ptr); + __stb_mmu(taddr, t0, t2); +#else + host_addr = (tcg_target_ulong)taddr; + assert(taddr == host_addr); + *(uint8_t *)(host_addr + GUEST_BASE) = t0; +#endif + break; + case INDEX_op_qemu_st16: + t0 = tci_read_r16(&tb_ptr); + taddr = tci_read_ulong(&tb_ptr); +#ifdef CONFIG_SOFTMMU + t2 = tci_read_i(&tb_ptr); + __stw_mmu(taddr, t0, t2); +#else + host_addr = (tcg_target_ulong)taddr; + assert(taddr == host_addr); + *(uint16_t *)(host_addr + GUEST_BASE) = tswap16(t0); +#endif + break; + case INDEX_op_qemu_st32: + t0 = tci_read_r32(&tb_ptr); + taddr = tci_read_ulong(&tb_ptr); +#ifdef CONFIG_SOFTMMU + t2 = tci_read_i(&tb_ptr); + __stl_mmu(taddr, t0, t2); +#else + host_addr = (tcg_target_ulong)taddr; + assert(taddr == host_addr); + *(uint32_t *)(host_addr + GUEST_BASE) = tswap32(t0); +#endif + break; + case INDEX_op_qemu_st64: + tmp64 = tci_read_r64(&tb_ptr); + taddr = tci_read_ulong(&tb_ptr); +#ifdef CONFIG_SOFTMMU + t2 = tci_read_i(&tb_ptr); + __stq_mmu(taddr, tmp64, t2); +#else + host_addr = (tcg_target_ulong)taddr; + assert(taddr == host_addr); + *(uint64_t *)(host_addr + GUEST_BASE) = tswap64(tmp64); +#endif + break; + default: + TODO(); + break; + } + assert(tb_ptr == old_code_ptr + op_size); + } +exit: + return next_tb; +} |