diff options
104 files changed, 4930 insertions, 3003 deletions
diff --git a/.github/lockdown.yml b/.github/lockdown.yml index 07fc2f31ee..d3546bd2bc 100644 --- a/.github/lockdown.yml +++ b/.github/lockdown.yml @@ -14,11 +14,11 @@ issues: at https://gitlab.com/qemu-project/qemu.git. The project does not process issues filed on GitHub. - The project issues are tracked on Launchpad: - https://bugs.launchpad.net/qemu + The project issues are tracked on GitLab: + https://gitlab.com/qemu-project/qemu/-/issues QEMU welcomes bug report contributions. You can file new ones on: - https://bugs.launchpad.net/qemu/+filebug + https://gitlab.com/qemu-project/qemu/-/issues/new pulls: comment: | diff --git a/MAINTAINERS b/MAINTAINERS index e8ba494c3f..3443d2a5b5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -155,6 +155,7 @@ S: Maintained F: target/arm/ F: tests/tcg/arm/ F: tests/tcg/aarch64/ +F: tests/qtest/arm-cpu-features.c F: hw/arm/ F: hw/cpu/a*mpcore.c F: include/hw/cpu/a*mpcore.h diff --git a/README.rst b/README.rst index a92c7394b7..79b19f1481 100644 --- a/README.rst +++ b/README.rst @@ -131,16 +131,16 @@ will be tagged as my-feature-v2. Bug reporting ============= -The QEMU project uses Launchpad as its primary upstream bug tracker. Bugs +The QEMU project uses GitLab issues to track bugs. Bugs found when running code built from QEMU git or upstream released sources should be reported via: -* `<https://bugs.launchpad.net/qemu/>`_ +* `<https://gitlab.com/qemu-project/qemu/-/issues>`_ If using QEMU via an operating system vendor pre-built binary package, it is preferable to report bugs to the vendor's own bug tracker first. If the bug is also known to affect latest upstream code, it can also be -reported via launchpad. +reported via GitLab. For additional information on bug reporting consult: diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h index 0ff7f913e1..afa8a9daf3 100644 --- a/accel/tcg/atomic_template.h +++ b/accel/tcg/atomic_template.h @@ -74,7 +74,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) { ATOMIC_MMU_DECLS; - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; DATA_TYPE ret; uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, ATOMIC_MMU_IDX); @@ -95,7 +95,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) { ATOMIC_MMU_DECLS; - DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; + DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP_R; uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, ATOMIC_MMU_IDX); @@ -110,7 +110,7 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val EXTRA_ARGS) { ATOMIC_MMU_DECLS; - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_W; uint16_t info = trace_mem_build_info(SHIFT, false, 0, true, ATOMIC_MMU_IDX); @@ -125,7 +125,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val EXTRA_ARGS) { ATOMIC_MMU_DECLS; - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; DATA_TYPE ret; uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, ATOMIC_MMU_IDX); @@ -142,7 +142,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE val EXTRA_ARGS) \ { \ ATOMIC_MMU_DECLS; \ - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \ DATA_TYPE ret; \ uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \ ATOMIC_MMU_IDX); \ @@ -176,7 +176,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE xval EXTRA_ARGS) \ { \ ATOMIC_MMU_DECLS; \ - XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ + XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \ XDATA_TYPE cmp, old, new, val = xval; \ uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \ ATOMIC_MMU_IDX); \ @@ -221,7 +221,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) { ATOMIC_MMU_DECLS; - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; DATA_TYPE ret; uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false, ATOMIC_MMU_IDX); @@ -242,7 +242,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) { ATOMIC_MMU_DECLS; - DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; + DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP_R; uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false, ATOMIC_MMU_IDX); @@ -257,7 +257,7 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val EXTRA_ARGS) { ATOMIC_MMU_DECLS; - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_W; uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, true, ATOMIC_MMU_IDX); @@ -274,7 +274,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val EXTRA_ARGS) { ATOMIC_MMU_DECLS; - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; ABI_TYPE ret; uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false, ATOMIC_MMU_IDX); @@ -291,7 +291,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE val EXTRA_ARGS) \ { \ ATOMIC_MMU_DECLS; \ - DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \ DATA_TYPE ret; \ uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \ false, ATOMIC_MMU_IDX); \ @@ -323,7 +323,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ABI_TYPE xval EXTRA_ARGS) \ { \ ATOMIC_MMU_DECLS; \ - XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ + XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \ XDATA_TYPE ldo, ldn, old, new, val = xval; \ uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \ false, ATOMIC_MMU_IDX); \ diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index f24348e979..b6d5fc6326 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1742,18 +1742,22 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, #endif -/* Probe for a read-modify-write atomic operation. Do not allow unaligned - * operations, or io operations to proceed. Return the host address. */ +/* + * Probe for an atomic operation. Do not allow unaligned operations, + * or io operations to proceed. Return the host address. + * + * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. + */ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + TCGMemOpIdx oi, int size, int prot, + uintptr_t retaddr) { size_t mmu_idx = get_mmuidx(oi); - uintptr_t index = tlb_index(env, mmu_idx, addr); - CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); - target_ulong tlb_addr = tlb_addr_write(tlbe); MemOp mop = get_memop(oi); int a_bits = get_alignment_bits(mop); - int s_bits = mop & MO_SIZE; + uintptr_t index; + CPUTLBEntry *tlbe; + target_ulong tlb_addr; void *hostaddr; /* Adjust the given return address. */ @@ -1767,7 +1771,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, } /* Enforce qemu required alignment. */ - if (unlikely(addr & ((1 << s_bits) - 1))) { + if (unlikely(addr & (size - 1))) { /* We get here if guest alignment was not requested, or was not enforced by cpu_unaligned_access above. We might widen the access and emulate, but for now @@ -1775,15 +1779,45 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, goto stop_the_world; } + index = tlb_index(env, mmu_idx, addr); + tlbe = tlb_entry(env, mmu_idx, addr); + /* Check TLB entry and enforce page permissions. */ - if (!tlb_hit(tlb_addr, addr)) { - if (!VICTIM_TLB_HIT(addr_write, addr)) { - tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE, - mmu_idx, retaddr); - index = tlb_index(env, mmu_idx, addr); - tlbe = tlb_entry(env, mmu_idx, addr); + if (prot & PAGE_WRITE) { + tlb_addr = tlb_addr_write(tlbe); + if (!tlb_hit(tlb_addr, addr)) { + if (!VICTIM_TLB_HIT(addr_write, addr)) { + tlb_fill(env_cpu(env), addr, size, + MMU_DATA_STORE, mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + tlbe = tlb_entry(env, mmu_idx, addr); + } + tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; + } + + /* Let the guest notice RMW on a write-only page. */ + if ((prot & PAGE_READ) && + unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { + tlb_fill(env_cpu(env), addr, size, + MMU_DATA_LOAD, mmu_idx, retaddr); + /* + * Since we don't support reads and writes to different addresses, + * and we do have the proper page loaded for write, this shouldn't + * ever return. But just in case, handle via stop-the-world. + */ + goto stop_the_world; + } + } else /* if (prot & PAGE_READ) */ { + tlb_addr = tlbe->addr_read; + if (!tlb_hit(tlb_addr, addr)) { + if (!VICTIM_TLB_HIT(addr_write, addr)) { + tlb_fill(env_cpu(env), addr, size, + MMU_DATA_LOAD, mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + tlbe = tlb_entry(env, mmu_idx, addr); + } + tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK; } - tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; } /* Notice an IO access or a needs-MMU-lookup access */ @@ -1793,20 +1827,10 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, goto stop_the_world; } - /* Let the guest notice RMW on a write-only page. */ - if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { - tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD, - mmu_idx, retaddr); - /* Since we don't support reads and writes to different addresses, - and we do have the proper page loaded for write, this shouldn't - ever return. But just in case, handle via stop-the-world. */ - goto stop_the_world; - } - hostaddr = (void *)((uintptr_t)addr + tlbe->addend); if (unlikely(tlb_addr & TLB_NOTDIRTY)) { - notdirty_write(env_cpu(env), addr, 1 << s_bits, + notdirty_write(env_cpu(env), addr, size, &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr); } @@ -2669,7 +2693,12 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val) #define ATOMIC_NAME(X) \ HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) #define ATOMIC_MMU_DECLS -#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr) +#define ATOMIC_MMU_LOOKUP_RW \ + atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, retaddr) +#define ATOMIC_MMU_LOOKUP_R \ + atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, retaddr) +#define ATOMIC_MMU_LOOKUP_W \ + atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, retaddr) #define ATOMIC_MMU_CLEANUP #define ATOMIC_MMU_IDX get_mmuidx(oi) @@ -2698,10 +2727,18 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val) #undef EXTRA_ARGS #undef ATOMIC_NAME -#undef ATOMIC_MMU_LOOKUP +#undef ATOMIC_MMU_LOOKUP_RW +#undef ATOMIC_MMU_LOOKUP_R +#undef ATOMIC_MMU_LOOKUP_W + #define EXTRA_ARGS , TCGMemOpIdx oi #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) -#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC()) +#define ATOMIC_MMU_LOOKUP_RW \ + atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, GETPC()) +#define ATOMIC_MMU_LOOKUP_R \ + atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, GETPC()) +#define ATOMIC_MMU_LOOKUP_W \ + atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, GETPC()) #define DATA_SIZE 1 #include "atomic_template.h" diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c index 7627225aef..88e25c6df9 100644 --- a/accel/tcg/plugin-gen.c +++ b/accel/tcg/plugin-gen.c @@ -160,9 +160,8 @@ static void gen_empty_mem_helper(void) tcg_temp_free_ptr(ptr); } -static inline -void gen_plugin_cb_start(enum plugin_gen_from from, - enum plugin_gen_cb type, unsigned wr) +static void gen_plugin_cb_start(enum plugin_gen_from from, + enum plugin_gen_cb type, unsigned wr) { TCGOp *op; @@ -179,7 +178,7 @@ static void gen_wrapped(enum plugin_gen_from from, tcg_gen_plugin_cb_end(); } -static inline void plugin_gen_empty_callback(enum plugin_gen_from from) +static void plugin_gen_empty_callback(enum plugin_gen_from from) { switch (from) { case PLUGIN_GEN_AFTER_INSN: @@ -385,7 +384,7 @@ static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op) } static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func, - void *func, unsigned tcg_flags, int *cb_idx) + void *func, int *cb_idx) { /* copy all ops until the call */ do { @@ -412,7 +411,7 @@ static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func, tcg_debug_assert(i < MAX_OPC_PARAM_ARGS); } op->args[*cb_idx] = (uintptr_t)func; - op->args[*cb_idx + 1] = tcg_flags; + op->args[*cb_idx + 1] = (*begin_op)->args[*cb_idx + 1]; return op; } @@ -439,7 +438,7 @@ static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb, /* call */ op = copy_call(&begin_op, op, HELPER(plugin_vcpu_udata_cb), - cb->f.vcpu_udata, cb->tcg_flags, cb_idx); + cb->f.vcpu_udata, cb_idx); return op; } @@ -490,7 +489,7 @@ static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb, if (type == PLUGIN_GEN_CB_MEM) { /* call */ op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb), - cb->f.vcpu_udata, cb->tcg_flags, cb_idx); + cb->f.vcpu_udata, cb_idx); } return op; @@ -513,9 +512,8 @@ static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb) return !!(cb->rw & (w + 1)); } -static inline -void inject_cb_type(const GArray *cbs, TCGOp *begin_op, inject_fn inject, - op_ok_fn ok) +static void inject_cb_type(const GArray *cbs, TCGOp *begin_op, + inject_fn inject, op_ok_fn ok) { TCGOp *end_op; TCGOp *op; diff --git a/accel/tcg/plugin-helpers.h b/accel/tcg/plugin-helpers.h index 1916ee7920..9829abe4a9 100644 --- a/accel/tcg/plugin-helpers.h +++ b/accel/tcg/plugin-helpers.h @@ -1,5 +1,4 @@ #ifdef CONFIG_PLUGIN -/* Note: no TCG flags because those are overwritten later */ -DEF_HELPER_2(plugin_vcpu_udata_cb, void, i32, ptr) -DEF_HELPER_4(plugin_vcpu_mem_cb, void, i32, i32, i64, ptr) +DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb, TCG_CALL_NO_RWG, void, i32, ptr) +DEF_HELPER_FLAGS_4(plugin_vcpu_mem_cb, TCG_CALL_NO_RWG, void, i32, i32, i64, ptr) #endif diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index fb2d43e6a9..e67b1617b5 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -1220,7 +1220,9 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, /* Macro to call the above, with local variables from the use context. */ #define ATOMIC_MMU_DECLS do {} while (0) -#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC()) +#define ATOMIC_MMU_LOOKUP_RW atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC()) +#define ATOMIC_MMU_LOOKUP_R ATOMIC_MMU_LOOKUP_RW +#define ATOMIC_MMU_LOOKUP_W ATOMIC_MMU_LOOKUP_RW #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) #define ATOMIC_MMU_IDX MMU_USER_IDX @@ -1250,12 +1252,12 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, #undef EXTRA_ARGS #undef ATOMIC_NAME -#undef ATOMIC_MMU_LOOKUP +#undef ATOMIC_MMU_LOOKUP_RW #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr #define ATOMIC_NAME(X) \ HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) -#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr) +#define ATOMIC_MMU_LOOKUP_RW atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr) #define DATA_SIZE 16 #include "atomic_template.h" @@ -4441,6 +4441,19 @@ if compile_prog "" "" ; then fi ########################################## +# check if we have sigev_notify_thread_id + +sigev_notify_thread_id=no +cat > $TMPC << EOF +#include <stddef.h> +#include <signal.h> +int main(void) { return offsetof(struct sigevent, sigev_notify_thread_id); } +EOF +if compile_prog "" "" ; then + sigev_notify_thread_id=yes +fi + +########################################## # check if trace backend exists $python "$source_path/scripts/tracetool.py" "--backends=$trace_backends" --check-backends > /dev/null 2> /dev/null @@ -5424,7 +5437,7 @@ if test "$cpu" = "s390x" ; then write_c_skeleton compile_prog "-march=z900" "" has_z900=$? - if [ $has_z900 = 0 ] || compile_prog "-march=z10" ""; then + if [ $has_z900 = 0 ] || compile_object "-march=z10 -msoft-float -Werror"; then if [ $has_z900 != 0 ]; then echo "WARNING: Your compiler does not support the z900!" echo " The s390-ccw bios will only work with guest CPUs >= z10." @@ -5678,6 +5691,9 @@ fi if test "$st_atim" = "yes" ; then echo "HAVE_STRUCT_STAT_ST_ATIM=y" >> $config_host_mak fi +if test "$sigev_notify_thread_id" = "yes" ; then + echo "HAVE_SIGEV_NOTIFY_THREAD_ID=y" >> $config_host_mak +fi if test "$byteswap_h" = "yes" ; then echo "CONFIG_BYTESWAP_H=y" >> $config_host_mak fi @@ -5803,6 +5819,9 @@ fi if test "$optreset" = "yes" ; then echo "HAVE_OPTRESET=y" >> $config_host_mak fi +if test "$tcg" = "enabled" -a "$tcg_interpreter" = "true" ; then + echo "CONFIG_TCG_INTERPRETER=y" >> $config_host_mak +fi if test "$fdatasync" = "yes" ; then echo "CONFIG_FDATASYNC=y" >> $config_host_mak fi diff --git a/docs/interop/live-block-operations.rst b/docs/interop/live-block-operations.rst index 1073b930dc..477d085f54 100644 --- a/docs/interop/live-block-operations.rst +++ b/docs/interop/live-block-operations.rst @@ -127,13 +127,15 @@ Interacting with a QEMU instance To show some example invocations of command-line, we will use the following invocation of QEMU, with a QMP server running over UNIX -socket:: +socket: - $ ./qemu-system-x86_64 -display none -no-user-config \ - -M q35 -nodefaults -m 512 \ - -blockdev node-name=node-A,driver=qcow2,file.driver=file,file.node-name=file,file.filename=./a.qcow2 \ - -device virtio-blk,drive=node-A,id=virtio0 \ - -monitor stdio -qmp unix:/tmp/qmp-sock,server=on,wait=off +.. parsed-literal:: + + $ |qemu_system| -display none -no-user-config -nodefaults \\ + -m 512 -blockdev \\ + node-name=node-A,driver=qcow2,file.driver=file,file.node-name=file,file.filename=./a.qcow2 \\ + -device virtio-blk,drive=node-A,id=virtio0 \\ + -monitor stdio -qmp unix:/tmp/qmp-sock,server=on,wait=off The ``-blockdev`` command-line option, used above, is available from QEMU 2.9 onwards. In the above invocation, notice the ``node-name`` @@ -692,14 +694,16 @@ And start the destination QEMU (we already have the source QEMU running -- discussed in the section: `Interacting with a QEMU instance`_) instance, with the following invocation. (As noted earlier, for simplicity's sake, the destination QEMU is started on the same host, but -it could be located elsewhere):: - - $ ./qemu-system-x86_64 -display none -no-user-config \ - -M q35 -nodefaults -m 512 \ - -blockdev node-name=node-TargetDisk,driver=qcow2,file.driver=file,file.node-name=file,file.filename=./target-disk.qcow2 \ - -device virtio-blk,drive=node-TargetDisk,id=virtio0 \ - -S -monitor stdio -qmp unix:./qmp-sock2,server=on,wait=off \ - -incoming tcp:localhost:6666 +it could be located elsewhere): + +.. parsed-literal:: + + $ |qemu_system| -display none -no-user-config -nodefaults \\ + -m 512 -blockdev \\ + node-name=node-TargetDisk,driver=qcow2,file.driver=file,file.node-name=file,file.filename=./target-disk.qcow2 \\ + -device virtio-blk,drive=node-TargetDisk,id=virtio0 \\ + -S -monitor stdio -qmp unix:./qmp-sock2,server=on,wait=off \\ + -incoming tcp:localhost:6666 Given the disk image chain on source QEMU:: diff --git a/docs/tools/virtiofsd.rst b/docs/tools/virtiofsd.rst index 00554c75bd..4911e797cb 100644 --- a/docs/tools/virtiofsd.rst +++ b/docs/tools/virtiofsd.rst @@ -239,7 +239,7 @@ xattr-mapping Examples :: --o xattrmap=":prefix:all::user.virtiofs.::bad:all:::" + -o xattrmap=":prefix:all::user.virtiofs.::bad:all:::" This uses two rules, using : as the field separator; @@ -250,7 +250,8 @@ the host set. This is equivalent to the 'map' rule: :: --o xattrmap=":map::user.virtiofs.:" + + -o xattrmap=":map::user.virtiofs.:" 2) Prefix 'trusted.' attributes, allow others through @@ -277,7 +278,8 @@ through. This is equivalent to the 'map' rule: :: --o xattrmap="/map/trusted./user.virtiofs./" + + -o xattrmap="/map/trusted./user.virtiofs./" 3) Hide 'security.' attributes, and allow everything else @@ -298,13 +300,13 @@ Examples Export ``/var/lib/fs/vm001/`` on vhost-user UNIX domain socket ``/var/run/vm001-vhost-fs.sock``: -:: +.. parsed-literal:: host# virtiofsd --socket-path=/var/run/vm001-vhost-fs.sock -o source=/var/lib/fs/vm001 - host# qemu-system-x86_64 \ - -chardev socket,id=char0,path=/var/run/vm001-vhost-fs.sock \ - -device vhost-user-fs-pci,chardev=char0,tag=myfs \ - -object memory-backend-memfd,id=mem,size=4G,share=on \ - -numa node,memdev=mem \ - ... + host# |qemu_system| \\ + -chardev socket,id=char0,path=/var/run/vm001-vhost-fs.sock \\ + -device vhost-user-fs-pci,chardev=char0,tag=myfs \\ + -object memory-backend-memfd,id=mem,size=4G,share=on \\ + -numa node,memdev=mem \\ + ... guest# mount -t virtiofs myfs /mnt diff --git a/hw/s390x/3270-ccw.c b/hw/s390x/3270-ccw.c index 13e93d8d8f..69e6783ade 100644 --- a/hw/s390x/3270-ccw.c +++ b/hw/s390x/3270-ccw.c @@ -129,6 +129,7 @@ static void emulated_ccw_3270_realize(DeviceState *ds, Error **errp) EMULATED_CCW_3270_CHPID_TYPE); sch->do_subchannel_work = do_subchannel_work_virtual; sch->ccw_cb = emulated_ccw_3270_cb; + sch->irb_cb = build_irb_virtual; ck->init(dev, &err); if (err) { diff --git a/hw/s390x/css.c b/hw/s390x/css.c index bed46f5ec3..133ddea575 100644 --- a/hw/s390x/css.c +++ b/hw/s390x/css.c @@ -1335,6 +1335,14 @@ static void copy_schib_to_guest(SCHIB *dest, const SCHIB *src) } } +void copy_esw_to_guest(ESW *dest, const ESW *src) +{ + dest->word0 = cpu_to_be32(src->word0); + dest->erw = cpu_to_be32(src->erw); + dest->word2 = cpu_to_be64(src->word2); + dest->word4 = cpu_to_be32(src->word4); +} + IOInstEnding css_do_stsch(SubchDev *sch, SCHIB *schib) { int ret; @@ -1604,9 +1612,8 @@ static void copy_irb_to_guest(IRB *dest, const IRB *src, const PMCW *pmcw, copy_scsw_to_guest(&dest->scsw, &src->scsw); - for (i = 0; i < ARRAY_SIZE(dest->esw); i++) { - dest->esw[i] = cpu_to_be32(src->esw[i]); - } + copy_esw_to_guest(&dest->esw, &src->esw); + for (i = 0; i < ARRAY_SIZE(dest->ecw); i++) { dest->ecw[i] = cpu_to_be32(src->ecw[i]); } @@ -1632,6 +1639,55 @@ static void copy_irb_to_guest(IRB *dest, const IRB *src, const PMCW *pmcw, *irb_len = sizeof(*dest); } +static void build_irb_sense_data(SubchDev *sch, IRB *irb) +{ + int i; + + /* Attention: sense_data is already BE! */ + memcpy(irb->ecw, sch->sense_data, sizeof(sch->sense_data)); + for (i = 0; i < ARRAY_SIZE(irb->ecw); i++) { + irb->ecw[i] = be32_to_cpu(irb->ecw[i]); + } +} + +void build_irb_passthrough(SubchDev *sch, IRB *irb) +{ + /* Copy ESW from hardware */ + irb->esw = sch->esw; + + /* + * If (irb->esw.erw & ESW_ERW_SENSE) is true, then the contents + * of the ECW is sense data. If false, then it is model-dependent + * information. Either way, copy it into the IRB for the guest to + * read/decide what to do with. + */ + build_irb_sense_data(sch, irb); +} + +void build_irb_virtual(SubchDev *sch, IRB *irb) +{ + SCHIB *schib = &sch->curr_status; + uint16_t stctl = schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL; + + if (stctl & SCSW_STCTL_STATUS_PEND) { + if (schib->scsw.cstat & (SCSW_CSTAT_DATA_CHECK | + SCSW_CSTAT_CHN_CTRL_CHK | + SCSW_CSTAT_INTF_CTRL_CHK)) { + irb->scsw.flags |= SCSW_FLAGS_MASK_ESWF; + irb->esw.word0 = 0x04804000; + } else { + irb->esw.word0 = 0x00800000; + } + /* If a unit check is pending, copy sense data. */ + if ((schib->scsw.dstat & SCSW_DSTAT_UNIT_CHECK) && + (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE)) { + irb->scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL; + build_irb_sense_data(sch, irb); + irb->esw.erw = ESW_ERW_SENSE | (sizeof(sch->sense_data) << 8); + } + } +} + int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len) { SCHIB *schib = &sch->curr_status; @@ -1650,29 +1706,12 @@ int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len) /* Copy scsw from current status. */ irb.scsw = schib->scsw; - if (stctl & SCSW_STCTL_STATUS_PEND) { - if (schib->scsw.cstat & (SCSW_CSTAT_DATA_CHECK | - SCSW_CSTAT_CHN_CTRL_CHK | - SCSW_CSTAT_INTF_CTRL_CHK)) { - irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF; - irb.esw[0] = 0x04804000; - } else { - irb.esw[0] = 0x00800000; - } - /* If a unit check is pending, copy sense data. */ - if ((schib->scsw.dstat & SCSW_DSTAT_UNIT_CHECK) && - (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE)) { - int i; - irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL; - /* Attention: sense_data is already BE! */ - memcpy(irb.ecw, sch->sense_data, sizeof(sch->sense_data)); - for (i = 0; i < ARRAY_SIZE(irb.ecw); i++) { - irb.ecw[i] = be32_to_cpu(irb.ecw[i]); - } - irb.esw[1] = 0x01000000 | (sizeof(sch->sense_data) << 8); - } + /* Build other IRB data, if necessary */ + if (sch->irb_cb) { + sch->irb_cb(sch, &irb); } + /* Store the irb to the guest. */ p = schib->pmcw; copy_irb_to_guest(target_irb, &irb, &p, irb_len); diff --git a/hw/s390x/s390-ccw.c b/hw/s390x/s390-ccw.c index c227c77984..2fc8bb9c23 100644 --- a/hw/s390x/s390-ccw.c +++ b/hw/s390x/s390-ccw.c @@ -124,6 +124,7 @@ static void s390_ccw_realize(S390CCWDevice *cdev, char *sysfsdev, Error **errp) } sch->driver_data = cdev; sch->do_subchannel_work = do_subchannel_work_passthrough; + sch->irb_cb = build_irb_passthrough; ccw_dev->sch = sch; ret = css_sch_build_schib(sch, &cdev->hostid); diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index 7af27ca305..e4b18aef49 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -802,7 +802,10 @@ DEFINE_CCW_MACHINE(6_1, "6.1", true); static void ccw_machine_6_0_instance_options(MachineState *machine) { + static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V6_0 }; + ccw_machine_6_1_instance_options(machine); + s390_set_qemu_cpu_model(0x2964, 13, 2, qemu_cpu_feat); } static void ccw_machine_6_0_class_options(MachineClass *mc) diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index 220b9efcf9..d68888fccd 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -753,6 +753,7 @@ static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp) sch->id.reserved = 0xff; sch->id.cu_type = VIRTIO_CCW_CU_TYPE; sch->do_subchannel_work = do_subchannel_work_virtual; + sch->irb_cb = build_irb_virtual; ccw_dev->sch = sch; dev->indicators = NULL; dev->revision = -1; diff --git a/hw/vfio/ccw.c b/hw/vfio/ccw.c index 139a3d9d1b..000992fb9f 100644 --- a/hw/vfio/ccw.c +++ b/hw/vfio/ccw.c @@ -321,6 +321,7 @@ static void vfio_ccw_io_notifier_handler(void *opaque) SCHIB *schib = &sch->curr_status; SCSW s; IRB irb; + ESW esw; int size; if (!event_notifier_test_and_clear(&vcdev->io_notifier)) { @@ -371,6 +372,9 @@ static void vfio_ccw_io_notifier_handler(void *opaque) copy_scsw_to_guest(&s, &irb.scsw); schib->scsw = s; + copy_esw_to_guest(&esw, &irb.esw); + sch->esw = esw; + /* If a uint check is pending, copy sense data. */ if ((schib->scsw.dstat & SCSW_DSTAT_UNIT_CHECK) && (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE)) { diff --git a/include/elf.h b/include/elf.h index 033bcc9576..811bf4a1cb 100644 --- a/include/elf.h +++ b/include/elf.h @@ -605,6 +605,13 @@ typedef struct { #define HWCAP_S390_HIGH_GPRS 512 #define HWCAP_S390_TE 1024 #define HWCAP_S390_VXRS 2048 +#define HWCAP_S390_VXRS_BCD 4096 +#define HWCAP_S390_VXRS_EXT 8192 +#define HWCAP_S390_GS 16384 +#define HWCAP_S390_VXRS_EXT2 32768 +#define HWCAP_S390_VXRS_PDE 65536 +#define HWCAP_S390_SORT 131072 +#define HWCAP_S390_DFLT 262144 /* M68K specific definitions. */ /* We use the top 24 bits to encode information about the diff --git a/include/exec/helper-head.h b/include/exec/helper-head.h index 3094c7946d..b974eb394a 100644 --- a/include/exec/helper-head.h +++ b/include/exec/helper-head.h @@ -85,32 +85,14 @@ #define dh_retvar_ptr tcgv_ptr_temp(retval) #define dh_retvar(t) glue(dh_retvar_, dh_alias(t)) -#define dh_is_64bit_void 0 -#define dh_is_64bit_noreturn 0 -#define dh_is_64bit_i32 0 -#define dh_is_64bit_i64 1 -#define dh_is_64bit_ptr (sizeof(void *) == 8) -#define dh_is_64bit_cptr dh_is_64bit_ptr -#define dh_is_64bit(t) glue(dh_is_64bit_, dh_alias(t)) - -#define dh_is_signed_void 0 -#define dh_is_signed_noreturn 0 -#define dh_is_signed_i32 0 -#define dh_is_signed_s32 1 -#define dh_is_signed_i64 0 -#define dh_is_signed_s64 1 -#define dh_is_signed_f16 0 -#define dh_is_signed_f32 0 -#define dh_is_signed_f64 0 -#define dh_is_signed_tl 0 -#define dh_is_signed_int 1 -/* ??? This is highly specific to the host cpu. There are even special - extension instructions that may be required, e.g. ia64's addp4. But - for now we don't support any 64-bit targets with 32-bit pointers. */ -#define dh_is_signed_ptr 0 -#define dh_is_signed_cptr dh_is_signed_ptr -#define dh_is_signed_env dh_is_signed_ptr -#define dh_is_signed(t) dh_is_signed_##t +#define dh_typecode_void 0 +#define dh_typecode_noreturn 0 +#define dh_typecode_i32 2 +#define dh_typecode_s32 3 +#define dh_typecode_i64 4 +#define dh_typecode_s64 5 +#define dh_typecode_ptr 6 +#define dh_typecode(t) glue(dh_typecode_, dh_alias(t)) #define dh_callflag_i32 0 #define dh_callflag_s32 0 @@ -126,8 +108,7 @@ #define dh_callflag_noreturn TCG_CALL_NO_RETURN #define dh_callflag(t) glue(dh_callflag_, dh_alias(t)) -#define dh_sizemask(t, n) \ - ((dh_is_64bit(t) << (n*2)) | (dh_is_signed(t) << (n*2+1))) +#define dh_typemask(t, n) (dh_typecode(t) << (n * 3)) #define dh_arg(t, n) \ glue(glue(tcgv_, dh_alias(t)), _temp)(glue(arg, n)) diff --git a/include/exec/helper-tcg.h b/include/exec/helper-tcg.h index 6888514635..16cd318b83 100644 --- a/include/exec/helper-tcg.h +++ b/include/exec/helper-tcg.h @@ -13,50 +13,50 @@ #define DEF_HELPER_FLAGS_0(NAME, FLAGS, ret) \ { .func = HELPER(NAME), .name = str(NAME), \ .flags = FLAGS | dh_callflag(ret), \ - .sizemask = dh_sizemask(ret, 0) }, + .typemask = dh_typemask(ret, 0) }, #define DEF_HELPER_FLAGS_1(NAME, FLAGS, ret, t1) \ { .func = HELPER(NAME), .name = str(NAME), \ .flags = FLAGS | dh_callflag(ret), \ - .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) }, + .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) }, #define DEF_HELPER_FLAGS_2(NAME, FLAGS, ret, t1, t2) \ { .func = HELPER(NAME), .name = str(NAME), \ .flags = FLAGS | dh_callflag(ret), \ - .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ - | dh_sizemask(t2, 2) }, + .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \ + | dh_typemask(t2, 2) }, #define DEF_HELPER_FLAGS_3(NAME, FLAGS, ret, t1, t2, t3) \ { .func = HELPER(NAME), .name = str(NAME), \ .flags = FLAGS | dh_callflag(ret), \ - .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ - | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) }, + .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \ + | dh_typemask(t2, 2) | dh_typemask(t3, 3) }, #define DEF_HELPER_FLAGS_4(NAME, FLAGS, ret, t1, t2, t3, t4) \ { .func = HELPER(NAME), .name = str(NAME), \ .flags = FLAGS | dh_callflag(ret), \ - .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ - | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) }, + .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \ + | dh_typemask(t2, 2) | dh_typemask(t3, 3) | dh_typemask(t4, 4) }, #define DEF_HELPER_FLAGS_5(NAME, FLAGS, ret, t1, t2, t3, t4, t5) \ { .func = HELPER(NAME), .name = str(NAME), \ .flags = FLAGS | dh_callflag(ret), \ - .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ - | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \ - | dh_sizemask(t5, 5) }, + .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \ + | dh_typemask(t2, 2) | dh_typemask(t3, 3) | dh_typemask(t4, 4) \ + | dh_typemask(t5, 5) }, #define DEF_HELPER_FLAGS_6(NAME, FLAGS, ret, t1, t2, t3, t4, t5, t6) \ { .func = HELPER(NAME), .name = str(NAME), \ .flags = FLAGS | dh_callflag(ret), \ - .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ - | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \ - | dh_sizemask(t5, 5) | dh_sizemask(t6, 6) }, + .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \ + | dh_typemask(t2, 2) | dh_typemask(t3, 3) | dh_typemask(t4, 4) \ + | dh_typemask(t5, 5) | dh_typemask(t6, 6) }, #define DEF_HELPER_FLAGS_7(NAME, FLAGS, ret, t1, t2, t3, t4, t5, t6, t7) \ { .func = HELPER(NAME), .name = str(NAME), .flags = FLAGS, \ - .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ - | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \ - | dh_sizemask(t5, 5) | dh_sizemask(t6, 6) | dh_sizemask(t7, 7) }, + .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \ + | dh_typemask(t2, 2) | dh_typemask(t3, 3) | dh_typemask(t4, 4) \ + | dh_typemask(t5, 5) | dh_typemask(t6, 6) | dh_typemask(t7, 7) }, #include "helper.h" #include "trace/generated-helpers.h" diff --git a/include/fpu/softfloat-helpers.h b/include/fpu/softfloat-helpers.h index 34f4cf92ae..a98d759cd3 100644 --- a/include/fpu/softfloat-helpers.h +++ b/include/fpu/softfloat-helpers.h @@ -48,8 +48,8 @@ this code that are retained. =============================================================================== */ -#ifndef _SOFTFLOAT_HELPERS_H_ -#define _SOFTFLOAT_HELPERS_H_ +#ifndef SOFTFLOAT_HELPERS_H +#define SOFTFLOAT_HELPERS_H #include "fpu/softfloat-types.h" diff --git a/include/hw/s390x/css.h b/include/hw/s390x/css.h index bba7593d2e..10ed1df1bb 100644 --- a/include/hw/s390x/css.h +++ b/include/hw/s390x/css.h @@ -138,8 +138,10 @@ struct SubchDev { int (*ccw_cb) (SubchDev *, CCW1); void (*disable_cb)(SubchDev *); IOInstEnding (*do_subchannel_work) (SubchDev *); + void (*irb_cb)(SubchDev *, IRB *); SenseId id; void *driver_data; + ESW esw; }; static inline void sch_gen_unit_exception(SubchDev *sch) @@ -201,6 +203,7 @@ int css_sch_build_schib(SubchDev *sch, CssDevId *dev_id); unsigned int css_find_free_chpid(uint8_t cssid); uint16_t css_build_subchannel_id(SubchDev *sch); void copy_scsw_to_guest(SCSW *dest, const SCSW *src); +void copy_esw_to_guest(ESW *dest, const ESW *src); void css_inject_io_interrupt(SubchDev *sch); void css_reset(void); void css_reset_sch(SubchDev *sch); @@ -215,6 +218,8 @@ void css_clear_sei_pending(void); IOInstEnding s390_ccw_cmd_request(SubchDev *sch); IOInstEnding do_subchannel_work_virtual(SubchDev *sub); IOInstEnding do_subchannel_work_passthrough(SubchDev *sub); +void build_irb_passthrough(SubchDev *sch, IRB *irb); +void build_irb_virtual(SubchDev *sch, IRB *irb); int s390_ccw_halt(SubchDev *sch); int s390_ccw_clear(SubchDev *sch); diff --git a/include/hw/s390x/ioinst.h b/include/hw/s390x/ioinst.h index c6737a30d4..3771fff9d4 100644 --- a/include/hw/s390x/ioinst.h +++ b/include/hw/s390x/ioinst.h @@ -123,10 +123,20 @@ typedef struct SCHIB { uint8_t mda[4]; } QEMU_PACKED SCHIB; +/* format-0 extended-status word */ +typedef struct ESW { + uint32_t word0; /* subchannel logout for format 0 */ + uint32_t erw; + uint64_t word2; /* failing-storage address for format 0 */ + uint32_t word4; /* secondary-CCW address for format 0 */ +} QEMU_PACKED ESW; + +#define ESW_ERW_SENSE 0x01000000 + /* interruption response block */ typedef struct IRB { SCSW scsw; - uint32_t esw[5]; + ESW esw; uint32_t ecw[8]; uint32_t emw[8]; } IRB; diff --git a/include/hw/usb/dwc2-regs.h b/include/hw/usb/dwc2-regs.h index 40af23a0ba..a7eb531485 100644 --- a/include/hw/usb/dwc2-regs.h +++ b/include/hw/usb/dwc2-regs.h @@ -39,8 +39,8 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef __DWC2_HW_H__ -#define __DWC2_HW_H__ +#ifndef DWC2_HW_H +#define DWC2_HW_H #define HSOTG_REG(x) (x) diff --git a/include/hw/usb/xlnx-usb-subsystem.h b/include/hw/usb/xlnx-usb-subsystem.h index 739bef7f45..999e423951 100644 --- a/include/hw/usb/xlnx-usb-subsystem.h +++ b/include/hw/usb/xlnx-usb-subsystem.h @@ -22,8 +22,8 @@ * THE SOFTWARE. */ -#ifndef _XLNX_VERSAL_USB_SUBSYSTEM_H_ -#define _XLNX_VERSAL_USB_SUBSYSTEM_H_ +#ifndef XLNX_VERSAL_USB_SUBSYSTEM_H +#define XLNX_VERSAL_USB_SUBSYSTEM_H #include "hw/usb/xlnx-versal-usb2-ctrl-regs.h" #include "hw/usb/hcd-dwc3.h" diff --git a/include/hw/usb/xlnx-versal-usb2-ctrl-regs.h b/include/hw/usb/xlnx-versal-usb2-ctrl-regs.h index 975a717627..b76dce0419 100644 --- a/include/hw/usb/xlnx-versal-usb2-ctrl-regs.h +++ b/include/hw/usb/xlnx-versal-usb2-ctrl-regs.h @@ -23,8 +23,8 @@ * THE SOFTWARE. */ -#ifndef _XLNX_USB2_REGS_H_ -#define _XLNX_USB2_REGS_H_ +#ifndef XLNX_USB2_REGS_H +#define XLNX_USB2_REGS_H #define TYPE_XILINX_VERSAL_USB2_CTRL_REGS "xlnx.versal-usb2-ctrl-regs" diff --git a/include/qemu/plugin-memory.h b/include/qemu/plugin-memory.h index fbbe99474b..b36def27d7 100644 --- a/include/qemu/plugin-memory.h +++ b/include/qemu/plugin-memory.h @@ -6,8 +6,8 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef _PLUGIN_MEMORY_H_ -#define _PLUGIN_MEMORY_H_ +#ifndef PLUGIN_MEMORY_H +#define PLUGIN_MEMORY_H struct qemu_plugin_hwaddr { bool is_io; diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h index c5a79a89f0..0fefbc6084 100644 --- a/include/qemu/plugin.h +++ b/include/qemu/plugin.h @@ -79,7 +79,6 @@ enum plugin_dyn_cb_subtype { struct qemu_plugin_dyn_cb { union qemu_plugin_cb_sig f; void *userp; - unsigned tcg_flags; enum plugin_dyn_cb_subtype type; /* @rw applies to mem callbacks only (both regular and inline) */ enum qemu_plugin_mem_rw rw; diff --git a/include/qemu/selfmap.h b/include/qemu/selfmap.h index 8382c4c779..80cf920fba 100644 --- a/include/qemu/selfmap.h +++ b/include/qemu/selfmap.h @@ -6,8 +6,8 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef _SELFMAP_H_ -#define _SELFMAP_H_ +#ifndef SELFMAP_H +#define SELFMAP_H typedef struct { unsigned long start; diff --git a/include/tcg/tcg-cond.h b/include/tcg/tcg-cond.h new file mode 100644 index 0000000000..2a38a386d4 --- /dev/null +++ b/include/tcg/tcg-cond.h @@ -0,0 +1,101 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef TCG_COND_H +#define TCG_COND_H + +/* + * Conditions. Note that these are laid out for easy manipulation by + * the functions below: + * bit 0 is used for inverting; + * bit 1 is signed, + * bit 2 is unsigned, + * bit 3 is used with bit 0 for swapping signed/unsigned. + */ +typedef enum { + /* non-signed */ + TCG_COND_NEVER = 0 | 0 | 0 | 0, + TCG_COND_ALWAYS = 0 | 0 | 0 | 1, + TCG_COND_EQ = 8 | 0 | 0 | 0, + TCG_COND_NE = 8 | 0 | 0 | 1, + /* signed */ + TCG_COND_LT = 0 | 0 | 2 | 0, + TCG_COND_GE = 0 | 0 | 2 | 1, + TCG_COND_LE = 8 | 0 | 2 | 0, + TCG_COND_GT = 8 | 0 | 2 | 1, + /* unsigned */ + TCG_COND_LTU = 0 | 4 | 0 | 0, + TCG_COND_GEU = 0 | 4 | 0 | 1, + TCG_COND_LEU = 8 | 4 | 0 | 0, + TCG_COND_GTU = 8 | 4 | 0 | 1, +} TCGCond; + +/* Invert the sense of the comparison. */ +static inline TCGCond tcg_invert_cond(TCGCond c) +{ + return (TCGCond)(c ^ 1); +} + +/* Swap the operands in a comparison. */ +static inline TCGCond tcg_swap_cond(TCGCond c) +{ + return c & 6 ? (TCGCond)(c ^ 9) : c; +} + +/* Create an "unsigned" version of a "signed" comparison. */ +static inline TCGCond tcg_unsigned_cond(TCGCond c) +{ + return c & 2 ? (TCGCond)(c ^ 6) : c; +} + +/* Create a "signed" version of an "unsigned" comparison. */ +static inline TCGCond tcg_signed_cond(TCGCond c) +{ + return c & 4 ? (TCGCond)(c ^ 6) : c; +} + +/* Must a comparison be considered unsigned? */ +static inline bool is_unsigned_cond(TCGCond c) +{ + return (c & 4) != 0; +} + +/* + * Create a "high" version of a double-word comparison. + * This removes equality from a LTE or GTE comparison. + */ +static inline TCGCond tcg_high_cond(TCGCond c) +{ + switch (c) { + case TCG_COND_GE: + case TCG_COND_LE: + case TCG_COND_GEU: + case TCG_COND_LEU: + return (TCGCond)(c ^ 8); + default: + return c; + } +} + +#endif /* TCG_COND_H */ diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h index bbb0884af8..5bbec858aa 100644 --- a/include/tcg/tcg-opc.h +++ b/include/tcg/tcg-opc.h @@ -277,8 +277,8 @@ DEF(last_generic, 0, 0, 0, TCG_OPF_NOT_PRESENT) #ifdef TCG_TARGET_INTERPRETER /* These opcodes are only for use between the tci generator and interpreter. */ -DEF(tci_movi_i32, 1, 0, 1, TCG_OPF_NOT_PRESENT) -DEF(tci_movi_i64, 1, 0, 1, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT) +DEF(tci_movi, 1, 0, 1, TCG_OPF_NOT_PRESENT) +DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT) #endif #undef TLADDR_ARGS diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h index 483e1e1f24..2dad364240 100644 --- a/include/tcg/tcg.h +++ b/include/tcg/tcg.h @@ -33,6 +33,7 @@ #include "tcg/tcg-mo.h" #include "tcg-target.h" #include "qemu/int128.h" +#include "tcg/tcg-cond.h" /* XXX: make safe guess about sizes */ #define MAX_OP_PER_INSTR 266 @@ -52,6 +53,7 @@ #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS)) #define CPU_TEMP_BUF_NLONGS 128 +#define TCG_STATIC_FRAME_SIZE (CPU_TEMP_BUF_NLONGS * sizeof(long)) /* Default target word size to pointer size. */ #ifndef TCG_TARGET_REG_BITS @@ -406,75 +408,6 @@ typedef TCGv_ptr TCGv_env; /* Used to align parameters. See the comment before tcgv_i32_temp. */ #define TCG_CALL_DUMMY_ARG ((TCGArg)0) -/* Conditions. Note that these are laid out for easy manipulation by - the functions below: - bit 0 is used for inverting; - bit 1 is signed, - bit 2 is unsigned, - bit 3 is used with bit 0 for swapping signed/unsigned. */ -typedef enum { - /* non-signed */ - TCG_COND_NEVER = 0 | 0 | 0 | 0, - TCG_COND_ALWAYS = 0 | 0 | 0 | 1, - TCG_COND_EQ = 8 | 0 | 0 | 0, - TCG_COND_NE = 8 | 0 | 0 | 1, - /* signed */ - TCG_COND_LT = 0 | 0 | 2 | 0, - TCG_COND_GE = 0 | 0 | 2 | 1, - TCG_COND_LE = 8 | 0 | 2 | 0, - TCG_COND_GT = 8 | 0 | 2 | 1, - /* unsigned */ - TCG_COND_LTU = 0 | 4 | 0 | 0, - TCG_COND_GEU = 0 | 4 | 0 | 1, - TCG_COND_LEU = 8 | 4 | 0 | 0, - TCG_COND_GTU = 8 | 4 | 0 | 1, -} TCGCond; - -/* Invert the sense of the comparison. */ -static inline TCGCond tcg_invert_cond(TCGCond c) -{ - return (TCGCond)(c ^ 1); -} - -/* Swap the operands in a comparison. */ -static inline TCGCond tcg_swap_cond(TCGCond c) -{ - return c & 6 ? (TCGCond)(c ^ 9) : c; -} - -/* Create an "unsigned" version of a "signed" comparison. */ -static inline TCGCond tcg_unsigned_cond(TCGCond c) -{ - return c & 2 ? (TCGCond)(c ^ 6) : c; -} - -/* Create a "signed" version of an "unsigned" comparison. */ -static inline TCGCond tcg_signed_cond(TCGCond c) -{ - return c & 4 ? (TCGCond)(c ^ 6) : c; -} - -/* Must a comparison be considered unsigned? */ -static inline bool is_unsigned_cond(TCGCond c) -{ - return (c & 4) != 0; -} - -/* Create a "high" version of a double-word comparison. - This removes equality from a LTE or GTE comparison. */ -static inline TCGCond tcg_high_cond(TCGCond c) -{ - switch (c) { - case TCG_COND_GE: - case TCG_COND_LE: - case TCG_COND_GEU: - case TCG_COND_LEU: - return (TCGCond)(c ^ 8); - default: - return c; - } -} - typedef enum TCGTempVal { TEMP_VAL_DEAD, TEMP_VAL_REG, diff --git a/include/user/syscall-trace.h b/include/user/syscall-trace.h index 42e3b48b03..614cfacfa5 100644 --- a/include/user/syscall-trace.h +++ b/include/user/syscall-trace.h @@ -7,8 +7,8 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef _SYSCALL_TRACE_H_ -#define _SYSCALL_TRACE_H_ +#ifndef SYSCALL_TRACE_H +#define SYSCALL_TRACE_H #include "trace/trace-root.h" diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 17ab06f612..598ab8aa13 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -830,7 +830,7 @@ static uint32_t get_elf_hwcap2(void) PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07 | QEMU_PPC_FEATURE2_VEC_CRYPTO); GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00 | - QEMU_PPC_FEATURE2_DARN); + QEMU_PPC_FEATURE2_DARN | QEMU_PPC_FEATURE2_HAS_IEEE128); #undef GET_FEATURE #undef GET_FEATURE2 @@ -1376,6 +1376,7 @@ static uint32_t get_elf_hwcap(void) hwcap |= HWCAP_S390_ETF3EH; } GET_FEATURE(S390_FEAT_VECTOR, HWCAP_S390_VXRS); + GET_FEATURE(S390_FEAT_VECTOR_ENH, HWCAP_S390_VXRS_EXT); return hwcap; } diff --git a/linux-user/mmap.c b/linux-user/mmap.c index 7e3b245036..0e103859fe 100644 --- a/linux-user/mmap.c +++ b/linux-user/mmap.c @@ -451,6 +451,20 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, goto fail; } + /* + * If we're mapping shared memory, ensure we generate code for parallel + * execution and flush old translations. This will work up to the level + * supported by the host -- anything that requires EXCP_ATOMIC will not + * be atomic with respect to an external process. + */ + if (flags & MAP_SHARED) { + CPUState *cpu = thread_cpu; + if (!(cpu->tcg_cflags & CF_PARALLEL)) { + cpu->tcg_cflags |= CF_PARALLEL; + tb_flush(cpu); + } + } + real_start = start & qemu_host_page_mask; host_offset = offset & qemu_host_page_mask; diff --git a/linux-user/s390x/signal.c b/linux-user/s390x/signal.c index ef136dae33..bf8a8fbfe9 100644 --- a/linux-user/s390x/signal.c +++ b/linux-user/s390x/signal.c @@ -112,15 +112,23 @@ get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size) return (sp - frame_size) & -8ul; } +#define PSW_USER_BITS (PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \ + PSW_MASK_MCHECK | PSW_MASK_PSTATE | PSW_ASC_PRIMARY) +#define PSW_MASK_USER (PSW_MASK_ASC | PSW_MASK_CC | PSW_MASK_PM | \ + PSW_MASK_64 | PSW_MASK_32) + static void save_sigregs(CPUS390XState *env, target_sigregs *sregs) { + uint64_t psw_mask = s390_cpu_get_psw_mask(env); int i; /* * Copy a 'clean' PSW mask to the user to avoid leaking * information about whether PER is currently on. + * TODO: qemu does not support PSW_MASK_RI; it will never be set. */ - __put_user(env->psw.mask, &sregs->regs.psw.mask); + psw_mask = PSW_USER_BITS | (psw_mask & PSW_MASK_USER); + __put_user(psw_mask, &sregs->regs.psw.mask); __put_user(env->psw.addr, &sregs->regs.psw.addr); for (i = 0; i < 16; i++) { @@ -289,7 +297,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, static void restore_sigregs(CPUS390XState *env, target_sigregs *sc) { - target_ulong prev_addr; + uint64_t prev_addr, prev_mask, mask, addr; int i; for (i = 0; i < 16; i++) { @@ -297,9 +305,28 @@ static void restore_sigregs(CPUS390XState *env, target_sigregs *sc) } prev_addr = env->psw.addr; - __get_user(env->psw.mask, &sc->regs.psw.mask); - __get_user(env->psw.addr, &sc->regs.psw.addr); - trace_user_s390x_restore_sigregs(env, env->psw.addr, prev_addr); + __get_user(mask, &sc->regs.psw.mask); + __get_user(addr, &sc->regs.psw.addr); + trace_user_s390x_restore_sigregs(env, addr, prev_addr); + + /* + * Use current psw.mask to preserve PER bit. + * TODO: + * if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW_MASK_RI)) + * return -EINVAL; + * Simply do not allow it to be set in mask. + */ + prev_mask = s390_cpu_get_psw_mask(env); + mask = (prev_mask & ~PSW_MASK_USER) | (mask & PSW_MASK_USER); + /* Check for invalid user address space control. */ + if ((mask & PSW_MASK_ASC) == PSW_ASC_HOME) { + mask = (mask & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY; + } + /* Check for invalid amode. */ + if (mask & PSW_MASK_64) { + mask |= PSW_MASK_32; + } + s390_cpu_set_psw(env, mask, addr); for (i = 0; i < 16; i++) { __get_user(env->aregs[i], &sc->regs.acrs[i]); diff --git a/linux-user/signal.c b/linux-user/signal.c index 9016896dcd..a8faea6f09 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -38,7 +38,9 @@ static void host_signal_handler(int host_signum, siginfo_t *info, * Signal number 0 is reserved for use as kill(pid, 0), to test whether * a process exists without sending it a signal. */ +#ifdef __SIGRTMAX QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG); +#endif static uint8_t host_to_target_signal_table[_NSIG] = { [SIGHUP] = TARGET_SIGHUP, [SIGINT] = TARGET_SIGINT, @@ -851,7 +853,11 @@ int do_sigaction(int sig, const struct target_sigaction *act, trace_signal_do_sigaction_guest(sig, TARGET_NSIG); - if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) { + if (sig < 1 || sig > TARGET_NSIG) { + return -TARGET_EINVAL; + } + + if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) { return -TARGET_EINVAL; } diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 974dd46c9a..64bbf331b2 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -4603,6 +4603,7 @@ static inline abi_ulong target_shmlba(CPUArchState *cpu_env) static inline abi_ulong do_shmat(CPUArchState *cpu_env, int shmid, abi_ulong shmaddr, int shmflg) { + CPUState *cpu = env_cpu(cpu_env); abi_long raddr; void *host_raddr; struct shmid_ds shm_info; @@ -4633,6 +4634,17 @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env, mmap_lock(); + /* + * We're mapping shared memory, so ensure we generate code for parallel + * execution and flush old translations. This will work up to the level + * supported by the host -- anything that requires EXCP_ATOMIC will not + * be atomic with respect to an external process. + */ + if (!(cpu->tcg_cflags & CF_PARALLEL)) { + cpu->tcg_cflags |= CF_PARALLEL; + tb_flush(cpu); + } + if (shmaddr) host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg); else { @@ -7393,6 +7405,10 @@ static inline abi_long host_to_target_timex64(abi_long target_addr, } #endif +#ifndef HAVE_SIGEV_NOTIFY_THREAD_ID +#define sigev_notify_thread_id _sigev_un._tid +#endif + static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp, abi_ulong target_addr) { @@ -7413,7 +7429,7 @@ static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp, host_sevp->sigev_signo = target_to_host_signal(tswap32(target_sevp->sigev_signo)); host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify); - host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid); + host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid); unlock_user_struct(target_sevp, target_addr, 1); return 0; @@ -7470,7 +7486,7 @@ static inline abi_long host_to_target_stat64(void *cpu_env, __put_user(host_st->st_atime, &target_st->target_st_atime); __put_user(host_st->st_mtime, &target_st->target_st_mtime); __put_user(host_st->st_ctime, &target_st->target_st_ctime); -#if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700 +#ifdef HAVE_STRUCT_STAT_ST_ATIM __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec); __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec); __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec); @@ -7505,7 +7521,7 @@ static inline abi_long host_to_target_stat64(void *cpu_env, __put_user(host_st->st_atime, &target_st->target_st_atime); __put_user(host_st->st_mtime, &target_st->target_st_mtime); __put_user(host_st->st_ctime, &target_st->target_st_ctime); -#if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700 +#ifdef HAVE_STRUCT_STAT_ST_ATIM __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec); __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec); __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec); @@ -8245,6 +8261,10 @@ static int host_to_target_cpu_mask(const unsigned long *host_mask, return 0; } +#if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root) +_syscall2(int, pivot_root, const char *, new_root, const char *, put_old) +#endif + /* This is an internal helper for do_syscall so that it is easier * to have a single return point, so that actions, such as logging * of syscall results, can be performed. @@ -10056,8 +10076,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, __put_user(st.st_atime, &target_st->target_st_atime); __put_user(st.st_mtime, &target_st->target_st_mtime); __put_user(st.st_ctime, &target_st->target_st_ctime); -#if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \ - defined(TARGET_STAT_HAVE_NSEC) +#if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC) __put_user(st.st_atim.tv_nsec, &target_st->target_st_atime_nsec); __put_user(st.st_mtim.tv_nsec, @@ -13208,6 +13227,23 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, return ret; #endif +#if defined(TARGET_NR_pivot_root) + case TARGET_NR_pivot_root: + { + void *p2; + p = lock_user_string(arg1); /* new_root */ + p2 = lock_user_string(arg2); /* put_old */ + if (!p || !p2) { + ret = -TARGET_EFAULT; + } else { + ret = get_errno(pivot_root(p, p2)); + } + unlock_user(p2, arg2, 0); + unlock_user(p, arg1, 0); + } + return ret; +#endif + default: qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num); return -TARGET_ENOSYS; diff --git a/linux-user/trace-events b/linux-user/trace-events index 1ec0d11ee3..e7d2f54e94 100644 --- a/linux-user/trace-events +++ b/linux-user/trace-events @@ -11,7 +11,7 @@ user_do_rt_sigreturn(void *env, uint64_t frame_addr) "env=%p frame_addr=0x%"PRIx user_do_sigreturn(void *env, uint64_t frame_addr) "env=%p frame_addr=0x%"PRIx64 user_force_sig(void *env, int target_sig, int host_sig) "env=%p signal %d (host %d)" user_handle_signal(void *env, int target_sig) "env=%p signal %d" -user_host_signal(void *env, int host_sig, int target_sig) "env=%p signal %d (target %d(" +user_host_signal(void *env, int host_sig, int target_sig) "env=%p signal %d (target %d)" user_queue_signal(void *env, int target_sig) "env=%p signal %d" user_s390x_restore_sigregs(void *env, uint64_t sc_psw_addr, uint64_t env_psw_addr) "env=%p frame psw.addr 0x%"PRIx64 " current psw.addr 0x%"PRIx64 diff --git a/plugins/core.c b/plugins/core.c index 55d188af51..e1bcdb570d 100644 --- a/plugins/core.c +++ b/plugins/core.c @@ -295,33 +295,15 @@ void plugin_register_inline_op(GArray **arr, dyn_cb->inline_insn.imm = imm; } -static inline uint32_t cb_to_tcg_flags(enum qemu_plugin_cb_flags flags) -{ - uint32_t ret; - - switch (flags) { - case QEMU_PLUGIN_CB_RW_REGS: - ret = 0; - break; - case QEMU_PLUGIN_CB_R_REGS: - ret = TCG_CALL_NO_WG; - break; - case QEMU_PLUGIN_CB_NO_REGS: - default: - ret = TCG_CALL_NO_RWG; - } - return ret; -} - -inline void -plugin_register_dyn_cb__udata(GArray **arr, - qemu_plugin_vcpu_udata_cb_t cb, - enum qemu_plugin_cb_flags flags, void *udata) +void plugin_register_dyn_cb__udata(GArray **arr, + qemu_plugin_vcpu_udata_cb_t cb, + enum qemu_plugin_cb_flags flags, + void *udata) { struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr); dyn_cb->userp = udata; - dyn_cb->tcg_flags = cb_to_tcg_flags(flags); + /* Note flags are discarded as unused. */ dyn_cb->f.vcpu_udata = cb; dyn_cb->type = PLUGIN_CB_REGULAR; } @@ -336,7 +318,7 @@ void plugin_register_vcpu_mem_cb(GArray **arr, dyn_cb = plugin_get_dyn_cb(arr); dyn_cb->userp = udata; - dyn_cb->tcg_flags = cb_to_tcg_flags(flags); + /* Note flags are discarded as unused. */ dyn_cb->type = PLUGIN_CB_REGULAR; dyn_cb->rw = rw; dyn_cb->f.generic = cb; diff --git a/plugins/plugin.h b/plugins/plugin.h index 55017e3581..b13677d0dc 100644 --- a/plugins/plugin.h +++ b/plugins/plugin.h @@ -9,8 +9,8 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef _PLUGIN_INTERNAL_H_ -#define _PLUGIN_INTERNAL_H_ +#ifndef PLUGIN_INTERNAL_H +#define PLUGIN_INTERNAL_H #include <gmodule.h> #include "qemu/qht.h" diff --git a/python/Pipfile.lock b/python/Pipfile.lock index 6e344f5fad..5bb3f1b635 100644 --- a/python/Pipfile.lock +++ b/python/Pipfile.lock @@ -22,6 +22,13 @@ } }, "develop": { + "appdirs": { + "hashes": [ + "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41", + "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128" + ], + "version": "==1.4.4" + }, "astroid": { "hashes": [ "sha256:4db03ab5fc3340cf619dbc25e42c2cc3755154ce6009469766d7143d1fc2ee4e", @@ -38,6 +45,20 @@ "markers": "python_version >= '3.6'", "version": "==88.1" }, + "distlib": { + "hashes": [ + "sha256:106fef6dc37dd8c0e2c0a60d3fca3e77460a48907f335fa28420463a6f799736", + "sha256:23e223426b28491b1ced97dc3bbe183027419dfc7982b4fa2f05d5f3ff10711c" + ], + "version": "==0.3.2" + }, + "filelock": { + "hashes": [ + "sha256:18d82244ee114f543149c66a6e0c14e9c4f8a1044b5cdaadd0f82159d6a6ff59", + "sha256:929b7d63ec5b7d6b71b0fa5ac14e030b3f70b75747cef1b10da9b879fef15836" + ], + "version": "==3.0.12" + }, "flake8": { "hashes": [ "sha256:07528381786f2a6237b061f6e96610a4167b226cb926e2aa2b6b1d78057c576b", @@ -46,6 +67,12 @@ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==3.9.2" }, + "fusepy": { + "hashes": [ + "sha256:72ff783ec2f43de3ab394e3f7457605bf04c8cf288a2f4068b4cde141d4ee6bd" + ], + "version": "==3.0.1" + }, "importlib-metadata": { "hashes": [ "sha256:8c501196e49fb9df5df43833bdb1e4328f64847763ec8a50703148b73784d581", @@ -54,6 +81,14 @@ "markers": "python_version < '3.8'", "version": "==4.0.1" }, + "importlib-resources": { + "hashes": [ + "sha256:54161657e8ffc76596c4ede7080ca68cb02962a2e074a2586b695a93a925d36e", + "sha256:e962bff7440364183203d179d7ae9ad90cb1f2b74dcb84300e88ecc42dca3351" + ], + "markers": "python_version < '3.7'", + "version": "==5.1.4" + }, "isort": { "hashes": [ "sha256:0a943902919f65c5684ac4e0154b1ad4fac6dcaa5d9f3426b732f1c8b5419be6", @@ -132,6 +167,30 @@ ], "version": "==0.4.3" }, + "packaging": { + "hashes": [ + "sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5", + "sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==20.9" + }, + "pluggy": { + "hashes": [ + "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0", + "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==0.13.1" + }, + "py": { + "hashes": [ + "sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3", + "sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==1.10.0" + }, "pycodestyle": { "hashes": [ "sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068", @@ -156,18 +215,42 @@ "markers": "python_version ~= '3.6'", "version": "==2.8.2" }, + "pyparsing": { + "hashes": [ + "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", + "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" + ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==2.4.7" + }, "qemu": { "editable": true, "path": "." }, + "six": { + "hashes": [ + "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", + "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==1.16.0" + }, "toml": { "hashes": [ "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b", "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f" ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2'", + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.10.2" }, + "tox": { + "hashes": [ + "sha256:307a81ddb82bd463971a273f33e9533a24ed22185f27db8ce3386bff27d324e3", + "sha256:b0b5818049a1c1997599d42012a637a33f24c62ab8187223fdd318fa8522637b" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==3.23.1" + }, "typed-ast": { "hashes": [ "sha256:01ae5f73431d21eead5015997ab41afa53aa1fbe252f9da060be5dad2c730ace", @@ -201,7 +284,7 @@ "sha256:f8afcf15cc511ada719a88e013cec87c11aff7b91f019295eb4530f96fe5ef2f", "sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65" ], - "markers": "implementation_name == 'cpython' and python_version < '3.8'", + "markers": "python_version < '3.8' and implementation_name == 'cpython'", "version": "==1.4.3" }, "typing-extensions": { @@ -213,6 +296,14 @@ "markers": "python_version < '3.8'", "version": "==3.10.0.0" }, + "virtualenv": { + "hashes": [ + "sha256:14fdf849f80dbb29a4eb6caa9875d476ee2a5cf76a5f5415fa2f1606010ab467", + "sha256:2b0126166ea7c9c3661f5b8e06773d28f83322de7a3ff7d06f0aed18c9de6a76" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==20.4.7" + }, "wrapt": { "hashes": [ "sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7" @@ -224,7 +315,7 @@ "sha256:3607921face881ba3e026887d8150cca609d517579abe052ac81fc5aeffdbd76", "sha256:51cb66cc54621609dd593d1787f286ee42a5c0adbb4b29abea5a63edc3e03098" ], - "markers": "python_version >= '3.6'", + "markers": "python_version < '3.10'", "version": "==3.4.1" } } diff --git a/python/qemu/qmp/__init__.py b/python/qemu/qmp/__init__.py index 9606248a3d..376954cb6d 100644 --- a/python/qemu/qmp/__init__.py +++ b/python/qemu/qmp/__init__.py @@ -30,21 +30,30 @@ from typing import ( TextIO, Tuple, Type, + TypeVar, Union, cast, ) -# QMPMessage is a QMP Message of any kind. -# e.g. {'yee': 'haw'} +#: QMPMessage is an entire QMP message of any kind. +QMPMessage = Dict[str, Any] + +#: QMPReturnValue is the 'return' value of a command. +QMPReturnValue = object + +#: QMPObject is any object in a QMP message. +QMPObject = Dict[str, object] + +# QMPMessage can be outgoing commands or incoming events/returns. +# QMPReturnValue is usually a dict/json object, but due to QAPI's +# 'returns-whitelist', it can actually be anything. # -# QMPReturnValue is the inner value of return values only. -# {'return': {}} is the QMPMessage, +# {'return': {}} is a QMPMessage, # {} is the QMPReturnValue. -QMPMessage = Dict[str, Any] -QMPReturnValue = Dict[str, Any] -InternetAddrT = Tuple[str, str] + +InternetAddrT = Tuple[str, int] UnixAddrT = str SocketAddrT = Union[InternetAddrT, UnixAddrT] @@ -92,6 +101,12 @@ class QMPResponseError(QMPError): self.reply = reply +class QMPBadPortError(QMPError): + """ + Unable to parse socket address: Port was non-numerical. + """ + + class QEMUMonitorProtocol: """ Provide an API to connect to QEMU via QEMU Monitor Protocol (QMP) and then @@ -206,7 +221,9 @@ class QEMUMonitorProtocol: if ret is None: raise QMPConnectError("Error while reading from socket") - def __enter__(self) -> 'QEMUMonitorProtocol': + T = TypeVar('T') + + def __enter__(self: T) -> T: # Implement context manager enter function. return self @@ -219,6 +236,26 @@ class QEMUMonitorProtocol: # Implement context manager exit function. self.close() + @classmethod + def parse_address(cls, address: str) -> SocketAddrT: + """ + Parse a string into a QMP address. + + Figure out if the argument is in the port:host form. + If it's not, it's probably a file path. + """ + components = address.split(':') + if len(components) == 2: + try: + port = int(components[1]) + except ValueError: + msg = f"Bad port: '{components[1]}' in '{address}'." + raise QMPBadPortError(msg) from None + return (components[0], port) + + # Treat as filepath. + return address + def connect(self, negotiate: bool = True) -> Optional[QMPMessage]: """ Connect to the QMP Monitor and perform capabilities negotiation. @@ -271,8 +308,8 @@ class QEMUMonitorProtocol: return resp def cmd(self, name: str, - args: Optional[Dict[str, Any]] = None, - cmd_id: Optional[Any] = None) -> QMPMessage: + args: Optional[Dict[str, object]] = None, + cmd_id: Optional[object] = None) -> QMPMessage: """ Build a QMP command and send it to the QMP Monitor. @@ -287,7 +324,7 @@ class QEMUMonitorProtocol: qmp_cmd['id'] = cmd_id return self.cmd_obj(qmp_cmd) - def command(self, cmd: str, **kwds: Any) -> QMPReturnValue: + def command(self, cmd: str, **kwds: object) -> QMPReturnValue: """ Build and send a QMP command to the monitor, report errors if any """ diff --git a/python/qemu/qmp/qemu_ga_client.py b/python/qemu/qmp/qemu_ga_client.py new file mode 100644 index 0000000000..67ac0b4211 --- /dev/null +++ b/python/qemu/qmp/qemu_ga_client.py @@ -0,0 +1,323 @@ +""" +QEMU Guest Agent Client + +Usage: + +Start QEMU with: + +# qemu [...] -chardev socket,path=/tmp/qga.sock,server,wait=off,id=qga0 \ + -device virtio-serial \ + -device virtserialport,chardev=qga0,name=org.qemu.guest_agent.0 + +Run the script: + +$ qemu-ga-client --address=/tmp/qga.sock <command> [args...] + +or + +$ export QGA_CLIENT_ADDRESS=/tmp/qga.sock +$ qemu-ga-client <command> [args...] + +For example: + +$ qemu-ga-client cat /etc/resolv.conf +# Generated by NetworkManager +nameserver 10.0.2.3 +$ qemu-ga-client fsfreeze status +thawed +$ qemu-ga-client fsfreeze freeze +2 filesystems frozen + +See also: https://wiki.qemu.org/Features/QAPI/GuestAgent +""" + +# Copyright (C) 2012 Ryota Ozaki <ozaki.ryota@gmail.com> +# +# This work is licensed under the terms of the GNU GPL, version 2. See +# the COPYING file in the top-level directory. + +import argparse +import base64 +import errno +import os +import random +import sys +from typing import ( + Any, + Callable, + Dict, + Optional, + Sequence, +) + +from qemu import qmp +from qemu.qmp import SocketAddrT + + +# This script has not seen many patches or careful attention in quite +# some time. If you would like to improve it, please review the design +# carefully and add docstrings at that point in time. Until then: + +# pylint: disable=missing-docstring + + +class QemuGuestAgent(qmp.QEMUMonitorProtocol): + def __getattr__(self, name: str) -> Callable[..., Any]: + def wrapper(**kwds: object) -> object: + return self.command('guest-' + name.replace('_', '-'), **kwds) + return wrapper + + +class QemuGuestAgentClient: + def __init__(self, address: SocketAddrT): + self.qga = QemuGuestAgent(address) + self.qga.connect(negotiate=False) + + def sync(self, timeout: Optional[float] = 3) -> None: + # Avoid being blocked forever + if not self.ping(timeout): + raise EnvironmentError('Agent seems not alive') + uid = random.randint(0, (1 << 32) - 1) + while True: + ret = self.qga.sync(id=uid) + if isinstance(ret, int) and int(ret) == uid: + break + + def __file_read_all(self, handle: int) -> bytes: + eof = False + data = b'' + while not eof: + ret = self.qga.file_read(handle=handle, count=1024) + _data = base64.b64decode(ret['buf-b64']) + data += _data + eof = ret['eof'] + return data + + def read(self, path: str) -> bytes: + handle = self.qga.file_open(path=path) + try: + data = self.__file_read_all(handle) + finally: + self.qga.file_close(handle=handle) + return data + + def info(self) -> str: + info = self.qga.info() + + msgs = [] + msgs.append('version: ' + info['version']) + msgs.append('supported_commands:') + enabled = [c['name'] for c in info['supported_commands'] + if c['enabled']] + msgs.append('\tenabled: ' + ', '.join(enabled)) + disabled = [c['name'] for c in info['supported_commands'] + if not c['enabled']] + msgs.append('\tdisabled: ' + ', '.join(disabled)) + + return '\n'.join(msgs) + + @classmethod + def __gen_ipv4_netmask(cls, prefixlen: int) -> str: + mask = int('1' * prefixlen + '0' * (32 - prefixlen), 2) + return '.'.join([str(mask >> 24), + str((mask >> 16) & 0xff), + str((mask >> 8) & 0xff), + str(mask & 0xff)]) + + def ifconfig(self) -> str: + nifs = self.qga.network_get_interfaces() + + msgs = [] + for nif in nifs: + msgs.append(nif['name'] + ':') + if 'ip-addresses' in nif: + for ipaddr in nif['ip-addresses']: + if ipaddr['ip-address-type'] == 'ipv4': + addr = ipaddr['ip-address'] + mask = self.__gen_ipv4_netmask(int(ipaddr['prefix'])) + msgs.append(f"\tinet {addr} netmask {mask}") + elif ipaddr['ip-address-type'] == 'ipv6': + addr = ipaddr['ip-address'] + prefix = ipaddr['prefix'] + msgs.append(f"\tinet6 {addr} prefixlen {prefix}") + if nif['hardware-address'] != '00:00:00:00:00:00': + msgs.append("\tether " + nif['hardware-address']) + + return '\n'.join(msgs) + + def ping(self, timeout: Optional[float]) -> bool: + self.qga.settimeout(timeout) + try: + self.qga.ping() + except TimeoutError: + return False + return True + + def fsfreeze(self, cmd: str) -> object: + if cmd not in ['status', 'freeze', 'thaw']: + raise Exception('Invalid command: ' + cmd) + # Can be int (freeze, thaw) or GuestFsfreezeStatus (status) + return getattr(self.qga, 'fsfreeze' + '_' + cmd)() + + def fstrim(self, minimum: int) -> Dict[str, object]: + # returns GuestFilesystemTrimResponse + ret = getattr(self.qga, 'fstrim')(minimum=minimum) + assert isinstance(ret, dict) + return ret + + def suspend(self, mode: str) -> None: + if mode not in ['disk', 'ram', 'hybrid']: + raise Exception('Invalid mode: ' + mode) + + try: + getattr(self.qga, 'suspend' + '_' + mode)() + # On error exception will raise + except TimeoutError: + # On success command will timed out + return + + def shutdown(self, mode: str = 'powerdown') -> None: + if mode not in ['powerdown', 'halt', 'reboot']: + raise Exception('Invalid mode: ' + mode) + + try: + self.qga.shutdown(mode=mode) + except TimeoutError: + pass + + +def _cmd_cat(client: QemuGuestAgentClient, args: Sequence[str]) -> None: + if len(args) != 1: + print('Invalid argument') + print('Usage: cat <file>') + sys.exit(1) + print(client.read(args[0])) + + +def _cmd_fsfreeze(client: QemuGuestAgentClient, args: Sequence[str]) -> None: + usage = 'Usage: fsfreeze status|freeze|thaw' + if len(args) != 1: + print('Invalid argument') + print(usage) + sys.exit(1) + if args[0] not in ['status', 'freeze', 'thaw']: + print('Invalid command: ' + args[0]) + print(usage) + sys.exit(1) + cmd = args[0] + ret = client.fsfreeze(cmd) + if cmd == 'status': + print(ret) + return + + assert isinstance(ret, int) + verb = 'frozen' if cmd == 'freeze' else 'thawed' + print(f"{ret:d} filesystems {verb}") + + +def _cmd_fstrim(client: QemuGuestAgentClient, args: Sequence[str]) -> None: + if len(args) == 0: + minimum = 0 + else: + minimum = int(args[0]) + print(client.fstrim(minimum)) + + +def _cmd_ifconfig(client: QemuGuestAgentClient, args: Sequence[str]) -> None: + assert not args + print(client.ifconfig()) + + +def _cmd_info(client: QemuGuestAgentClient, args: Sequence[str]) -> None: + assert not args + print(client.info()) + + +def _cmd_ping(client: QemuGuestAgentClient, args: Sequence[str]) -> None: + timeout = 3.0 if len(args) == 0 else float(args[0]) + alive = client.ping(timeout) + if not alive: + print("Not responded in %s sec" % args[0]) + sys.exit(1) + + +def _cmd_suspend(client: QemuGuestAgentClient, args: Sequence[str]) -> None: + usage = 'Usage: suspend disk|ram|hybrid' + if len(args) != 1: + print('Less argument') + print(usage) + sys.exit(1) + if args[0] not in ['disk', 'ram', 'hybrid']: + print('Invalid command: ' + args[0]) + print(usage) + sys.exit(1) + client.suspend(args[0]) + + +def _cmd_shutdown(client: QemuGuestAgentClient, args: Sequence[str]) -> None: + assert not args + client.shutdown() + + +_cmd_powerdown = _cmd_shutdown + + +def _cmd_halt(client: QemuGuestAgentClient, args: Sequence[str]) -> None: + assert not args + client.shutdown('halt') + + +def _cmd_reboot(client: QemuGuestAgentClient, args: Sequence[str]) -> None: + assert not args + client.shutdown('reboot') + + +commands = [m.replace('_cmd_', '') for m in dir() if '_cmd_' in m] + + +def send_command(address: str, cmd: str, args: Sequence[str]) -> None: + if not os.path.exists(address): + print('%s not found' % address) + sys.exit(1) + + if cmd not in commands: + print('Invalid command: ' + cmd) + print('Available commands: ' + ', '.join(commands)) + sys.exit(1) + + try: + client = QemuGuestAgentClient(address) + except OSError as err: + print(err) + if err.errno == errno.ECONNREFUSED: + print('Hint: qemu is not running?') + sys.exit(1) + + if cmd == 'fsfreeze' and args[0] == 'freeze': + client.sync(60) + elif cmd != 'ping': + client.sync() + + globals()['_cmd_' + cmd](client, args) + + +def main() -> None: + address = os.environ.get('QGA_CLIENT_ADDRESS') + + parser = argparse.ArgumentParser() + parser.add_argument('--address', action='store', + default=address, + help='Specify a ip:port pair or a unix socket path') + parser.add_argument('command', choices=commands) + parser.add_argument('args', nargs='*') + + args = parser.parse_args() + if args.address is None: + parser.error('address is not specified') + sys.exit(1) + + send_command(args.address, args.command, args.args) + + +if __name__ == '__main__': + main() diff --git a/python/qemu/qmp/qmp_shell.py b/python/qemu/qmp/qmp_shell.py new file mode 100644 index 0000000000..337acfce2d --- /dev/null +++ b/python/qemu/qmp/qmp_shell.py @@ -0,0 +1,535 @@ +# +# Copyright (C) 2009, 2010 Red Hat Inc. +# +# Authors: +# Luiz Capitulino <lcapitulino@redhat.com> +# +# This work is licensed under the terms of the GNU GPL, version 2. See +# the COPYING file in the top-level directory. +# + +""" +Low-level QEMU shell on top of QMP. + +usage: qmp-shell [-h] [-H] [-N] [-v] [-p] qmp_server + +positional arguments: + qmp_server < UNIX socket path | TCP address:port > + +optional arguments: + -h, --help show this help message and exit + -H, --hmp Use HMP interface + -N, --skip-negotiation + Skip negotiate (for qemu-ga) + -v, --verbose Verbose (echo commands sent and received) + -p, --pretty Pretty-print JSON + + +Start QEMU with: + +# qemu [...] -qmp unix:./qmp-sock,server + +Run the shell: + +$ qmp-shell ./qmp-sock + +Commands have the following format: + + < command-name > [ arg-name1=arg1 ] ... [ arg-nameN=argN ] + +For example: + +(QEMU) device_add driver=e1000 id=net1 +{'return': {}} +(QEMU) + +key=value pairs also support Python or JSON object literal subset notations, +without spaces. Dictionaries/objects {} are supported as are arrays []. + + example-command arg-name1={'key':'value','obj'={'prop':"value"}} + +Both JSON and Python formatting should work, including both styles of +string literal quotes. Both paradigms of literal values should work, +including null/true/false for JSON and None/True/False for Python. + + +Transactions have the following multi-line format: + + transaction( + action-name1 [ arg-name1=arg1 ] ... [arg-nameN=argN ] + ... + action-nameN [ arg-name1=arg1 ] ... [arg-nameN=argN ] + ) + +One line transactions are also supported: + + transaction( action-name1 ... ) + +For example: + + (QEMU) transaction( + TRANS> block-dirty-bitmap-add node=drive0 name=bitmap1 + TRANS> block-dirty-bitmap-clear node=drive0 name=bitmap0 + TRANS> ) + {"return": {}} + (QEMU) + +Use the -v and -p options to activate the verbose and pretty-print options, +which will echo back the properly formatted JSON-compliant QMP that is being +sent to QEMU, which is useful for debugging and documentation generation. +""" + +import argparse +import ast +import json +import logging +import os +import re +import readline +import sys +from typing import ( + Iterator, + List, + NoReturn, + Optional, + Sequence, +) + +from qemu import qmp +from qemu.qmp import QMPMessage + + +LOG = logging.getLogger(__name__) + + +class QMPCompleter: + """ + QMPCompleter provides a readline library tab-complete behavior. + """ + # NB: Python 3.9+ will probably allow us to subclass list[str] directly, + # but pylint as of today does not know that List[str] is simply 'list'. + def __init__(self) -> None: + self._matches: List[str] = [] + + def append(self, value: str) -> None: + """Append a new valid completion to the list of possibilities.""" + return self._matches.append(value) + + def complete(self, text: str, state: int) -> Optional[str]: + """readline.set_completer() callback implementation.""" + for cmd in self._matches: + if cmd.startswith(text): + if state == 0: + return cmd + state -= 1 + return None + + +class QMPShellError(qmp.QMPError): + """ + QMP Shell Base error class. + """ + + +class FuzzyJSON(ast.NodeTransformer): + """ + This extension of ast.NodeTransformer filters literal "true/false/null" + values in a Python AST and replaces them by proper "True/False/None" values + that Python can properly evaluate. + """ + + @classmethod + def visit_Name(cls, # pylint: disable=invalid-name + node: ast.Name) -> ast.AST: + """ + Transform Name nodes with certain values into Constant (keyword) nodes. + """ + if node.id == 'true': + return ast.Constant(value=True) + if node.id == 'false': + return ast.Constant(value=False) + if node.id == 'null': + return ast.Constant(value=None) + return node + + +class QMPShell(qmp.QEMUMonitorProtocol): + """ + QMPShell provides a basic readline-based QMP shell. + + :param address: Address of the QMP server. + :param pretty: Pretty-print QMP messages. + :param verbose: Echo outgoing QMP messages to console. + """ + def __init__(self, address: qmp.SocketAddrT, + pretty: bool = False, verbose: bool = False): + super().__init__(address) + self._greeting: Optional[QMPMessage] = None + self._completer = QMPCompleter() + self._transmode = False + self._actions: List[QMPMessage] = [] + self._histfile = os.path.join(os.path.expanduser('~'), + '.qmp-shell_history') + self.pretty = pretty + self.verbose = verbose + + def close(self) -> None: + # Hook into context manager of parent to save shell history. + self._save_history() + super().close() + + def _fill_completion(self) -> None: + cmds = self.cmd('query-commands') + if 'error' in cmds: + return + for cmd in cmds['return']: + self._completer.append(cmd['name']) + + def _completer_setup(self) -> None: + self._completer = QMPCompleter() + self._fill_completion() + readline.set_history_length(1024) + readline.set_completer(self._completer.complete) + readline.parse_and_bind("tab: complete") + # NB: default delimiters conflict with some command names + # (eg. query-), clearing everything as it doesn't seem to matter + readline.set_completer_delims('') + try: + readline.read_history_file(self._histfile) + except FileNotFoundError: + pass + except IOError as err: + msg = f"Failed to read history '{self._histfile}': {err!s}" + LOG.warning(msg) + + def _save_history(self) -> None: + try: + readline.write_history_file(self._histfile) + except IOError as err: + msg = f"Failed to save history file '{self._histfile}': {err!s}" + LOG.warning(msg) + + @classmethod + def _parse_value(cls, val: str) -> object: + try: + return int(val) + except ValueError: + pass + + if val.lower() == 'true': + return True + if val.lower() == 'false': + return False + if val.startswith(('{', '[')): + # Try first as pure JSON: + try: + return json.loads(val) + except ValueError: + pass + # Try once again as FuzzyJSON: + try: + tree = ast.parse(val, mode='eval') + transformed = FuzzyJSON().visit(tree) + return ast.literal_eval(transformed) + except (SyntaxError, ValueError): + pass + return val + + def _cli_expr(self, + tokens: Sequence[str], + parent: qmp.QMPObject) -> None: + for arg in tokens: + (key, sep, val) = arg.partition('=') + if sep != '=': + raise QMPShellError( + f"Expected a key=value pair, got '{arg!s}'" + ) + + value = self._parse_value(val) + optpath = key.split('.') + curpath = [] + for path in optpath[:-1]: + curpath.append(path) + obj = parent.get(path, {}) + if not isinstance(obj, dict): + msg = 'Cannot use "{:s}" as both leaf and non-leaf key' + raise QMPShellError(msg.format('.'.join(curpath))) + parent[path] = obj + parent = obj + if optpath[-1] in parent: + if isinstance(parent[optpath[-1]], dict): + msg = 'Cannot use "{:s}" as both leaf and non-leaf key' + raise QMPShellError(msg.format('.'.join(curpath))) + raise QMPShellError(f'Cannot set "{key}" multiple times') + parent[optpath[-1]] = value + + def _build_cmd(self, cmdline: str) -> Optional[QMPMessage]: + """ + Build a QMP input object from a user provided command-line in the + following format: + + < command-name > [ arg-name1=arg1 ] ... [ arg-nameN=argN ] + """ + argument_regex = r'''(?:[^\s"']|"(?:\\.|[^"])*"|'(?:\\.|[^'])*')+''' + cmdargs = re.findall(argument_regex, cmdline) + qmpcmd: QMPMessage + + # Transactional CLI entry: + if cmdargs and cmdargs[0] == 'transaction(': + self._transmode = True + self._actions = [] + cmdargs.pop(0) + + # Transactional CLI exit: + if cmdargs and cmdargs[0] == ')' and self._transmode: + self._transmode = False + if len(cmdargs) > 1: + msg = 'Unexpected input after close of Transaction sub-shell' + raise QMPShellError(msg) + qmpcmd = { + 'execute': 'transaction', + 'arguments': {'actions': self._actions} + } + return qmpcmd + + # No args, or no args remaining + if not cmdargs: + return None + + if self._transmode: + # Parse and cache this Transactional Action + finalize = False + action = {'type': cmdargs[0], 'data': {}} + if cmdargs[-1] == ')': + cmdargs.pop(-1) + finalize = True + self._cli_expr(cmdargs[1:], action['data']) + self._actions.append(action) + return self._build_cmd(')') if finalize else None + + # Standard command: parse and return it to be executed. + qmpcmd = {'execute': cmdargs[0], 'arguments': {}} + self._cli_expr(cmdargs[1:], qmpcmd['arguments']) + return qmpcmd + + def _print(self, qmp_message: object) -> None: + jsobj = json.dumps(qmp_message, + indent=4 if self.pretty else None, + sort_keys=self.pretty) + print(str(jsobj)) + + def _execute_cmd(self, cmdline: str) -> bool: + try: + qmpcmd = self._build_cmd(cmdline) + except QMPShellError as err: + print( + f"Error while parsing command line: {err!s}\n" + "command format: <command-name> " + "[arg-name1=arg1] ... [arg-nameN=argN", + file=sys.stderr + ) + return True + # For transaction mode, we may have just cached the action: + if qmpcmd is None: + return True + if self.verbose: + self._print(qmpcmd) + resp = self.cmd_obj(qmpcmd) + if resp is None: + print('Disconnected') + return False + self._print(resp) + return True + + def connect(self, negotiate: bool = True) -> None: + self._greeting = super().connect(negotiate) + self._completer_setup() + + def show_banner(self, + msg: str = 'Welcome to the QMP low-level shell!') -> None: + """ + Print to stdio a greeting, and the QEMU version if available. + """ + print(msg) + if not self._greeting: + print('Connected') + return + version = self._greeting['QMP']['version']['qemu'] + print("Connected to QEMU {major}.{minor}.{micro}\n".format(**version)) + + @property + def prompt(self) -> str: + """ + Return the current shell prompt, including a trailing space. + """ + if self._transmode: + return 'TRANS> ' + return '(QEMU) ' + + def read_exec_command(self) -> bool: + """ + Read and execute a command. + + @return True if execution was ok, return False if disconnected. + """ + try: + cmdline = input(self.prompt) + except EOFError: + print() + return False + + if cmdline == '': + for event in self.get_events(): + print(event) + self.clear_events() + return True + + return self._execute_cmd(cmdline) + + def repl(self) -> Iterator[None]: + """ + Return an iterator that implements the REPL. + """ + self.show_banner() + while self.read_exec_command(): + yield + self.close() + + +class HMPShell(QMPShell): + """ + HMPShell provides a basic readline-based HMP shell, tunnelled via QMP. + + :param address: Address of the QMP server. + :param pretty: Pretty-print QMP messages. + :param verbose: Echo outgoing QMP messages to console. + """ + def __init__(self, address: qmp.SocketAddrT, + pretty: bool = False, verbose: bool = False): + super().__init__(address, pretty, verbose) + self._cpu_index = 0 + + def _cmd_completion(self) -> None: + for cmd in self._cmd_passthrough('help')['return'].split('\r\n'): + if cmd and cmd[0] != '[' and cmd[0] != '\t': + name = cmd.split()[0] # drop help text + if name == 'info': + continue + if name.find('|') != -1: + # Command in the form 'foobar|f' or 'f|foobar', take the + # full name + opt = name.split('|') + if len(opt[0]) == 1: + name = opt[1] + else: + name = opt[0] + self._completer.append(name) + self._completer.append('help ' + name) # help completion + + def _info_completion(self) -> None: + for cmd in self._cmd_passthrough('info')['return'].split('\r\n'): + if cmd: + self._completer.append('info ' + cmd.split()[1]) + + def _other_completion(self) -> None: + # special cases + self._completer.append('help info') + + def _fill_completion(self) -> None: + self._cmd_completion() + self._info_completion() + self._other_completion() + + def _cmd_passthrough(self, cmdline: str, + cpu_index: int = 0) -> QMPMessage: + return self.cmd_obj({ + 'execute': 'human-monitor-command', + 'arguments': { + 'command-line': cmdline, + 'cpu-index': cpu_index + } + }) + + def _execute_cmd(self, cmdline: str) -> bool: + if cmdline.split()[0] == "cpu": + # trap the cpu command, it requires special setting + try: + idx = int(cmdline.split()[1]) + if 'return' not in self._cmd_passthrough('info version', idx): + print('bad CPU index') + return True + self._cpu_index = idx + except ValueError: + print('cpu command takes an integer argument') + return True + resp = self._cmd_passthrough(cmdline, self._cpu_index) + if resp is None: + print('Disconnected') + return False + assert 'return' in resp or 'error' in resp + if 'return' in resp: + # Success + if len(resp['return']) > 0: + print(resp['return'], end=' ') + else: + # Error + print('%s: %s' % (resp['error']['class'], resp['error']['desc'])) + return True + + def show_banner(self, msg: str = 'Welcome to the HMP shell!') -> None: + QMPShell.show_banner(self, msg) + + +def die(msg: str) -> NoReturn: + """Write an error to stderr, then exit with a return code of 1.""" + sys.stderr.write('ERROR: %s\n' % msg) + sys.exit(1) + + +def main() -> None: + """ + qmp-shell entry point: parse command line arguments and start the REPL. + """ + parser = argparse.ArgumentParser() + parser.add_argument('-H', '--hmp', action='store_true', + help='Use HMP interface') + parser.add_argument('-N', '--skip-negotiation', action='store_true', + help='Skip negotiate (for qemu-ga)') + parser.add_argument('-v', '--verbose', action='store_true', + help='Verbose (echo commands sent and received)') + parser.add_argument('-p', '--pretty', action='store_true', + help='Pretty-print JSON') + + default_server = os.environ.get('QMP_SOCKET') + parser.add_argument('qmp_server', action='store', + default=default_server, + help='< UNIX socket path | TCP address:port >') + + args = parser.parse_args() + if args.qmp_server is None: + parser.error("QMP socket or TCP address must be specified") + + shell_class = HMPShell if args.hmp else QMPShell + + try: + address = shell_class.parse_address(args.qmp_server) + except qmp.QMPBadPortError: + parser.error(f"Bad port number: {args.qmp_server}") + return # pycharm doesn't know error() is noreturn + + with shell_class(address, args.pretty, args.verbose) as qemu: + try: + qemu.connect(negotiate=not args.skip_negotiation) + except qmp.QMPConnectError: + die("Didn't get QMP greeting message") + except qmp.QMPCapabilitiesError: + die("Couldn't negotiate capabilities") + except OSError as err: + die(f"Couldn't connect to {args.qmp_server}: {err!s}") + + for _ in qemu.repl(): + pass + + +if __name__ == '__main__': + main() diff --git a/python/qemu/qmp/qom.py b/python/qemu/qmp/qom.py new file mode 100644 index 0000000000..7ec7843d57 --- /dev/null +++ b/python/qemu/qmp/qom.py @@ -0,0 +1,272 @@ +""" +QEMU Object Model testing tools. + +usage: qom [-h] {set,get,list,tree,fuse} ... + +Query and manipulate QOM data + +optional arguments: + -h, --help show this help message and exit + +QOM commands: + {set,get,list,tree,fuse} + set Set a QOM property value + get Get a QOM property value + list List QOM properties at a given path + tree Show QOM tree from a given path + fuse Mount a QOM tree as a FUSE filesystem +""" +## +# Copyright John Snow 2020, for Red Hat, Inc. +# Copyright IBM, Corp. 2011 +# +# Authors: +# John Snow <jsnow@redhat.com> +# Anthony Liguori <aliguori@amazon.com> +# +# This work is licensed under the terms of the GNU GPL, version 2 or later. +# See the COPYING file in the top-level directory. +# +# Based on ./scripts/qmp/qom-[set|get|tree|list] +## + +import argparse + +from . import QMPResponseError +from .qom_common import QOMCommand + + +try: + from .qom_fuse import QOMFuse +except ModuleNotFoundError as err: + if err.name != 'fuse': + raise +else: + assert issubclass(QOMFuse, QOMCommand) + + +class QOMSet(QOMCommand): + """ + QOM Command - Set a property to a given value. + + usage: qom-set [-h] [--socket SOCKET] <path>.<property> <value> + + Set a QOM property value + + positional arguments: + <path>.<property> QOM path and property, separated by a period '.' + <value> new QOM property value + + optional arguments: + -h, --help show this help message and exit + --socket SOCKET, -s SOCKET + QMP socket path or address (addr:port). May also be + set via QMP_SOCKET environment variable. + """ + name = 'set' + help = 'Set a QOM property value' + + @classmethod + def configure_parser(cls, parser: argparse.ArgumentParser) -> None: + super().configure_parser(parser) + cls.add_path_prop_arg(parser) + parser.add_argument( + 'value', + metavar='<value>', + action='store', + help='new QOM property value' + ) + + def __init__(self, args: argparse.Namespace): + super().__init__(args) + self.path, self.prop = args.path_prop.rsplit('.', 1) + self.value = args.value + + def run(self) -> int: + rsp = self.qmp.command( + 'qom-set', + path=self.path, + property=self.prop, + value=self.value + ) + print(rsp) + return 0 + + +class QOMGet(QOMCommand): + """ + QOM Command - Get a property's current value. + + usage: qom-get [-h] [--socket SOCKET] <path>.<property> + + Get a QOM property value + + positional arguments: + <path>.<property> QOM path and property, separated by a period '.' + + optional arguments: + -h, --help show this help message and exit + --socket SOCKET, -s SOCKET + QMP socket path or address (addr:port). May also be + set via QMP_SOCKET environment variable. + """ + name = 'get' + help = 'Get a QOM property value' + + @classmethod + def configure_parser(cls, parser: argparse.ArgumentParser) -> None: + super().configure_parser(parser) + cls.add_path_prop_arg(parser) + + def __init__(self, args: argparse.Namespace): + super().__init__(args) + try: + tmp = args.path_prop.rsplit('.', 1) + except ValueError as err: + raise ValueError('Invalid format for <path>.<property>') from err + self.path = tmp[0] + self.prop = tmp[1] + + def run(self) -> int: + rsp = self.qmp.command( + 'qom-get', + path=self.path, + property=self.prop + ) + if isinstance(rsp, dict): + for key, value in rsp.items(): + print(f"{key}: {value}") + else: + print(rsp) + return 0 + + +class QOMList(QOMCommand): + """ + QOM Command - List the properties at a given path. + + usage: qom-list [-h] [--socket SOCKET] <path> + + List QOM properties at a given path + + positional arguments: + <path> QOM path + + optional arguments: + -h, --help show this help message and exit + --socket SOCKET, -s SOCKET + QMP socket path or address (addr:port). May also be + set via QMP_SOCKET environment variable. + """ + name = 'list' + help = 'List QOM properties at a given path' + + @classmethod + def configure_parser(cls, parser: argparse.ArgumentParser) -> None: + super().configure_parser(parser) + parser.add_argument( + 'path', + metavar='<path>', + action='store', + help='QOM path', + ) + + def __init__(self, args: argparse.Namespace): + super().__init__(args) + self.path = args.path + + def run(self) -> int: + rsp = self.qom_list(self.path) + for item in rsp: + if item.child: + print(f"{item.name}/") + elif item.link: + print(f"@{item.name}/") + else: + print(item.name) + return 0 + + +class QOMTree(QOMCommand): + """ + QOM Command - Show the full tree below a given path. + + usage: qom-tree [-h] [--socket SOCKET] [<path>] + + Show QOM tree from a given path + + positional arguments: + <path> QOM path + + optional arguments: + -h, --help show this help message and exit + --socket SOCKET, -s SOCKET + QMP socket path or address (addr:port). May also be + set via QMP_SOCKET environment variable. + """ + name = 'tree' + help = 'Show QOM tree from a given path' + + @classmethod + def configure_parser(cls, parser: argparse.ArgumentParser) -> None: + super().configure_parser(parser) + parser.add_argument( + 'path', + metavar='<path>', + action='store', + help='QOM path', + nargs='?', + default='/' + ) + + def __init__(self, args: argparse.Namespace): + super().__init__(args) + self.path = args.path + + def _list_node(self, path: str) -> None: + print(path) + items = self.qom_list(path) + for item in items: + if item.child: + continue + try: + rsp = self.qmp.command('qom-get', path=path, + property=item.name) + print(f" {item.name}: {rsp} ({item.type})") + except QMPResponseError as err: + print(f" {item.name}: <EXCEPTION: {err!s}> ({item.type})") + print('') + for item in items: + if not item.child: + continue + if path == '/': + path = '' + self._list_node(f"{path}/{item.name}") + + def run(self) -> int: + self._list_node(self.path) + return 0 + + +def main() -> int: + """QOM script main entry point.""" + parser = argparse.ArgumentParser( + description='Query and manipulate QOM data' + ) + subparsers = parser.add_subparsers( + title='QOM commands', + dest='command' + ) + + for command in QOMCommand.__subclasses__(): + command.register(subparsers) + + args = parser.parse_args() + + if args.command is None: + parser.error('Command not specified.') + return 1 + + cmd_class = args.cmd_class + assert isinstance(cmd_class, type(QOMCommand)) + return cmd_class.command_runner(args) diff --git a/python/qemu/qmp/qom_common.py b/python/qemu/qmp/qom_common.py new file mode 100644 index 0000000000..f82b16772d --- /dev/null +++ b/python/qemu/qmp/qom_common.py @@ -0,0 +1,178 @@ +""" +QOM Command abstractions. +""" +## +# Copyright John Snow 2020, for Red Hat, Inc. +# Copyright IBM, Corp. 2011 +# +# Authors: +# John Snow <jsnow@redhat.com> +# Anthony Liguori <aliguori@amazon.com> +# +# This work is licensed under the terms of the GNU GPL, version 2 or later. +# See the COPYING file in the top-level directory. +# +# Based on ./scripts/qmp/qom-[set|get|tree|list] +## + +import argparse +import os +import sys +from typing import ( + Any, + Dict, + List, + Optional, + Type, + TypeVar, +) + +from . import QEMUMonitorProtocol, QMPError + + +# The following is needed only for a type alias. +Subparsers = argparse._SubParsersAction # pylint: disable=protected-access + + +class ObjectPropertyInfo: + """ + Represents the return type from e.g. qom-list. + """ + def __init__(self, name: str, type_: str, + description: Optional[str] = None, + default_value: Optional[object] = None): + self.name = name + self.type = type_ + self.description = description + self.default_value = default_value + + @classmethod + def make(cls, value: Dict[str, Any]) -> 'ObjectPropertyInfo': + """ + Build an ObjectPropertyInfo from a Dict with an unknown shape. + """ + assert value.keys() >= {'name', 'type'} + assert value.keys() <= {'name', 'type', 'description', 'default-value'} + return cls(value['name'], value['type'], + value.get('description'), + value.get('default-value')) + + @property + def child(self) -> bool: + """Is this property a child property?""" + return self.type.startswith('child<') + + @property + def link(self) -> bool: + """Is this property a link property?""" + return self.type.startswith('link<') + + +CommandT = TypeVar('CommandT', bound='QOMCommand') + + +class QOMCommand: + """ + Represents a QOM sub-command. + + :param args: Parsed arguments, as returned from parser.parse_args. + """ + name: str + help: str + + def __init__(self, args: argparse.Namespace): + if args.socket is None: + raise QMPError("No QMP socket path or address given") + self.qmp = QEMUMonitorProtocol( + QEMUMonitorProtocol.parse_address(args.socket) + ) + self.qmp.connect() + + @classmethod + def register(cls, subparsers: Subparsers) -> None: + """ + Register this command with the argument parser. + + :param subparsers: argparse subparsers object, from "add_subparsers". + """ + subparser = subparsers.add_parser(cls.name, help=cls.help, + description=cls.help) + cls.configure_parser(subparser) + + @classmethod + def configure_parser(cls, parser: argparse.ArgumentParser) -> None: + """ + Configure a parser with this command's arguments. + + :param parser: argparse parser or subparser object. + """ + default_path = os.environ.get('QMP_SOCKET') + parser.add_argument( + '--socket', '-s', + dest='socket', + action='store', + help='QMP socket path or address (addr:port).' + ' May also be set via QMP_SOCKET environment variable.', + default=default_path + ) + parser.set_defaults(cmd_class=cls) + + @classmethod + def add_path_prop_arg(cls, parser: argparse.ArgumentParser) -> None: + """ + Add the <path>.<proptery> positional argument to this command. + + :param parser: The parser to add the argument to. + """ + parser.add_argument( + 'path_prop', + metavar='<path>.<property>', + action='store', + help="QOM path and property, separated by a period '.'" + ) + + def run(self) -> int: + """ + Run this command. + + :return: 0 on success, 1 otherwise. + """ + raise NotImplementedError + + def qom_list(self, path: str) -> List[ObjectPropertyInfo]: + """ + :return: a strongly typed list from the 'qom-list' command. + """ + rsp = self.qmp.command('qom-list', path=path) + # qom-list returns List[ObjectPropertyInfo] + assert isinstance(rsp, list) + return [ObjectPropertyInfo.make(x) for x in rsp] + + @classmethod + def command_runner( + cls: Type[CommandT], + args: argparse.Namespace + ) -> int: + """ + Run a fully-parsed subcommand, with error-handling for the CLI. + + :return: The return code from `.run()`. + """ + try: + cmd = cls(args) + return cmd.run() + except QMPError as err: + print(f"{type(err).__name__}: {err!s}", file=sys.stderr) + return -1 + + @classmethod + def entry_point(cls) -> int: + """ + Build this command's parser, parse arguments, and run the command. + + :return: `run`'s return code. + """ + parser = argparse.ArgumentParser(description=cls.help) + cls.configure_parser(parser) + args = parser.parse_args() + return cls.command_runner(args) diff --git a/python/qemu/qmp/qom_fuse.py b/python/qemu/qmp/qom_fuse.py new file mode 100644 index 0000000000..43f4671fdb --- /dev/null +++ b/python/qemu/qmp/qom_fuse.py @@ -0,0 +1,206 @@ +""" +QEMU Object Model FUSE filesystem tool + +This script offers a simple FUSE filesystem within which the QOM tree +may be browsed, queried and edited using traditional shell tooling. + +This script requires the 'fusepy' python package. + + +usage: qom-fuse [-h] [--socket SOCKET] <mount> + +Mount a QOM tree as a FUSE filesystem + +positional arguments: + <mount> Mount point + +optional arguments: + -h, --help show this help message and exit + --socket SOCKET, -s SOCKET + QMP socket path or address (addr:port). May also be + set via QMP_SOCKET environment variable. +""" +## +# Copyright IBM, Corp. 2012 +# Copyright (C) 2020 Red Hat, Inc. +# +# Authors: +# Anthony Liguori <aliguori@us.ibm.com> +# Markus Armbruster <armbru@redhat.com> +# +# This work is licensed under the terms of the GNU GPL, version 2 or later. +# See the COPYING file in the top-level directory. +## + +import argparse +from errno import ENOENT, EPERM +import stat +import sys +from typing import ( + IO, + Dict, + Iterator, + Mapping, + Optional, + Union, +) + +import fuse +from fuse import FUSE, FuseOSError, Operations + +from . import QMPResponseError +from .qom_common import QOMCommand + + +fuse.fuse_python_api = (0, 2) + + +class QOMFuse(QOMCommand, Operations): + """ + QOMFuse implements both fuse.Operations and QOMCommand. + + Operations implements the FS, and QOMCommand implements the CLI command. + """ + name = 'fuse' + help = 'Mount a QOM tree as a FUSE filesystem' + fuse: FUSE + + @classmethod + def configure_parser(cls, parser: argparse.ArgumentParser) -> None: + super().configure_parser(parser) + parser.add_argument( + 'mount', + metavar='<mount>', + action='store', + help="Mount point", + ) + + def __init__(self, args: argparse.Namespace): + super().__init__(args) + self.mount = args.mount + self.ino_map: Dict[str, int] = {} + self.ino_count = 1 + + def run(self) -> int: + print(f"Mounting QOMFS to '{self.mount}'", file=sys.stderr) + self.fuse = FUSE(self, self.mount, foreground=True) + return 0 + + def get_ino(self, path: str) -> int: + """Get an inode number for a given QOM path.""" + if path in self.ino_map: + return self.ino_map[path] + self.ino_map[path] = self.ino_count + self.ino_count += 1 + return self.ino_map[path] + + def is_object(self, path: str) -> bool: + """Is the given QOM path an object?""" + try: + self.qom_list(path) + return True + except QMPResponseError: + return False + + def is_property(self, path: str) -> bool: + """Is the given QOM path a property?""" + path, prop = path.rsplit('/', 1) + if path == '': + path = '/' + try: + for item in self.qom_list(path): + if item.name == prop: + return True + return False + except QMPResponseError: + return False + + def is_link(self, path: str) -> bool: + """Is the given QOM path a link?""" + path, prop = path.rsplit('/', 1) + if path == '': + path = '/' + try: + for item in self.qom_list(path): + if item.name == prop and item.link: + return True + return False + except QMPResponseError: + return False + + def read(self, path: str, size: int, offset: int, fh: IO[bytes]) -> bytes: + if not self.is_property(path): + raise FuseOSError(ENOENT) + + path, prop = path.rsplit('/', 1) + if path == '': + path = '/' + try: + data = str(self.qmp.command('qom-get', path=path, property=prop)) + data += '\n' # make values shell friendly + except QMPResponseError as err: + raise FuseOSError(EPERM) from err + + if offset > len(data): + return b'' + + return bytes(data[offset:][:size], encoding='utf-8') + + def readlink(self, path: str) -> Union[bool, str]: + if not self.is_link(path): + return False + path, prop = path.rsplit('/', 1) + prefix = '/'.join(['..'] * (len(path.split('/')) - 1)) + return prefix + str(self.qmp.command('qom-get', path=path, + property=prop)) + + def getattr(self, path: str, + fh: Optional[IO[bytes]] = None) -> Mapping[str, object]: + if self.is_link(path): + value = { + 'st_mode': 0o755 | stat.S_IFLNK, + 'st_ino': self.get_ino(path), + 'st_dev': 0, + 'st_nlink': 2, + 'st_uid': 1000, + 'st_gid': 1000, + 'st_size': 4096, + 'st_atime': 0, + 'st_mtime': 0, + 'st_ctime': 0 + } + elif self.is_object(path): + value = { + 'st_mode': 0o755 | stat.S_IFDIR, + 'st_ino': self.get_ino(path), + 'st_dev': 0, + 'st_nlink': 2, + 'st_uid': 1000, + 'st_gid': 1000, + 'st_size': 4096, + 'st_atime': 0, + 'st_mtime': 0, + 'st_ctime': 0 + } + elif self.is_property(path): + value = { + 'st_mode': 0o644 | stat.S_IFREG, + 'st_ino': self.get_ino(path), + 'st_dev': 0, + 'st_nlink': 1, + 'st_uid': 1000, + 'st_gid': 1000, + 'st_size': 4096, + 'st_atime': 0, + 'st_mtime': 0, + 'st_ctime': 0 + } + else: + raise FuseOSError(ENOENT) + return value + + def readdir(self, path: str, fh: IO[bytes]) -> Iterator[str]: + yield '.' + yield '..' + for item in self.qom_list(path): + yield item.name diff --git a/python/setup.cfg b/python/setup.cfg index 0fcdec6f32..85cecbb41b 100644 --- a/python/setup.cfg +++ b/python/setup.cfg @@ -32,11 +32,27 @@ packages = devel = avocado-framework >= 87.0 flake8 >= 3.6.0 + fusepy >= 2.0.4 isort >= 5.1.2 mypy >= 0.770 pylint >= 2.8.0 tox >= 3.18.0 +# Provides qom-fuse functionality +fuse = + fusepy >= 2.0.4 + +[options.entry_points] +console_scripts = + qom = qemu.qmp.qom:main + qom-set = qemu.qmp.qom:QOMSet.entry_point + qom-get = qemu.qmp.qom:QOMGet.entry_point + qom-list = qemu.qmp.qom:QOMList.entry_point + qom-tree = qemu.qmp.qom:QOMTree.entry_point + qom-fuse = qemu.qmp.qom_fuse:QOMFuse.entry_point [fuse] + qemu-ga-client = qemu.qmp.qemu_ga_client:main + qmp-shell = qemu.qmp.qmp_shell:main + [flake8] extend-ignore = E722 # Prefer pylint's bare-except checks to flake8's exclude = __pycache__, @@ -49,6 +65,14 @@ python_version = 3.6 warn_unused_configs = True namespace_packages = True +[mypy-qemu.qmp.qom_fuse] +# fusepy has no type stubs: +allow_subclassing_any = True + +[mypy-fuse] +# fusepy has no type stubs: +ignore_missing_imports = True + [pylint.messages control] # Disable the message, report, category or checker with the given id(s). You # can either give multiple identifiers separated by comma (,) or put this @@ -70,9 +94,10 @@ good-names=i, k, ex, Run, - _, - fd, - c, + _, # By convention: Unused variable + fh, # fh = open(...) + fd, # fd = os.open(...) + c, # for c in string: ... [pylint.similarities] # Ignore imports when computing similarities. @@ -97,6 +122,8 @@ envlist = py36, py37, py38, py39, py310 [testenv] allowlist_externals = make -deps = .[devel] +deps = + .[devel] + .[fuse] # Workaround to trigger tox venv rebuild commands = make check diff --git a/scripts/qmp/qemu-ga-client b/scripts/qmp/qemu-ga-client index 348d85864c..102fd2cad9 100755 --- a/scripts/qmp/qemu-ga-client +++ b/scripts/qmp/qemu-ga-client @@ -1,304 +1,11 @@ #!/usr/bin/env python3 -# QEMU Guest Agent Client -# -# Copyright (C) 2012 Ryota Ozaki <ozaki.ryota@gmail.com> -# -# This work is licensed under the terms of the GNU GPL, version 2. See -# the COPYING file in the top-level directory. -# -# Usage: -# -# Start QEMU with: -# -# # qemu [...] -chardev socket,path=/tmp/qga.sock,server=on,wait=off,id=qga0 \ -# -device virtio-serial -device virtserialport,chardev=qga0,name=org.qemu.guest_agent.0 -# -# Run the script: -# -# $ qemu-ga-client --address=/tmp/qga.sock <command> [args...] -# -# or -# -# $ export QGA_CLIENT_ADDRESS=/tmp/qga.sock -# $ qemu-ga-client <command> [args...] -# -# For example: -# -# $ qemu-ga-client cat /etc/resolv.conf -# # Generated by NetworkManager -# nameserver 10.0.2.3 -# $ qemu-ga-client fsfreeze status -# thawed -# $ qemu-ga-client fsfreeze freeze -# 2 filesystems frozen -# -# See also: https://wiki.qemu.org/Features/QAPI/GuestAgent -# - import os import sys -import base64 -import random sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) -from qemu import qmp - - -class QemuGuestAgent(qmp.QEMUMonitorProtocol): - def __getattr__(self, name): - def wrapper(**kwds): - return self.command('guest-' + name.replace('_', '-'), **kwds) - return wrapper - - -class QemuGuestAgentClient: - error = QemuGuestAgent.error - - def __init__(self, address): - self.qga = QemuGuestAgent(address) - self.qga.connect(negotiate=False) - - def sync(self, timeout=3): - # Avoid being blocked forever - if not self.ping(timeout): - raise EnvironmentError('Agent seems not alive') - uid = random.randint(0, (1 << 32) - 1) - while True: - ret = self.qga.sync(id=uid) - if isinstance(ret, int) and int(ret) == uid: - break - - def __file_read_all(self, handle): - eof = False - data = '' - while not eof: - ret = self.qga.file_read(handle=handle, count=1024) - _data = base64.b64decode(ret['buf-b64']) - data += _data - eof = ret['eof'] - return data - - def read(self, path): - handle = self.qga.file_open(path=path) - try: - data = self.__file_read_all(handle) - finally: - self.qga.file_close(handle=handle) - return data - - def info(self): - info = self.qga.info() - - msgs = [] - msgs.append('version: ' + info['version']) - msgs.append('supported_commands:') - enabled = [c['name'] for c in info['supported_commands'] if c['enabled']] - msgs.append('\tenabled: ' + ', '.join(enabled)) - disabled = [c['name'] for c in info['supported_commands'] if not c['enabled']] - msgs.append('\tdisabled: ' + ', '.join(disabled)) - - return '\n'.join(msgs) - - def __gen_ipv4_netmask(self, prefixlen): - mask = int('1' * prefixlen + '0' * (32 - prefixlen), 2) - return '.'.join([str(mask >> 24), - str((mask >> 16) & 0xff), - str((mask >> 8) & 0xff), - str(mask & 0xff)]) - - def ifconfig(self): - nifs = self.qga.network_get_interfaces() - - msgs = [] - for nif in nifs: - msgs.append(nif['name'] + ':') - if 'ip-addresses' in nif: - for ipaddr in nif['ip-addresses']: - if ipaddr['ip-address-type'] == 'ipv4': - addr = ipaddr['ip-address'] - mask = self.__gen_ipv4_netmask(int(ipaddr['prefix'])) - msgs.append("\tinet %s netmask %s" % (addr, mask)) - elif ipaddr['ip-address-type'] == 'ipv6': - addr = ipaddr['ip-address'] - prefix = ipaddr['prefix'] - msgs.append("\tinet6 %s prefixlen %s" % (addr, prefix)) - if nif['hardware-address'] != '00:00:00:00:00:00': - msgs.append("\tether " + nif['hardware-address']) - - return '\n'.join(msgs) - - def ping(self, timeout): - self.qga.settimeout(timeout) - try: - self.qga.ping() - except self.qga.timeout: - return False - return True - - def fsfreeze(self, cmd): - if cmd not in ['status', 'freeze', 'thaw']: - raise Exception('Invalid command: ' + cmd) - - return getattr(self.qga, 'fsfreeze' + '_' + cmd)() - - def fstrim(self, minimum=0): - return getattr(self.qga, 'fstrim')(minimum=minimum) - - def suspend(self, mode): - if mode not in ['disk', 'ram', 'hybrid']: - raise Exception('Invalid mode: ' + mode) - - try: - getattr(self.qga, 'suspend' + '_' + mode)() - # On error exception will raise - except self.qga.timeout: - # On success command will timed out - return - - def shutdown(self, mode='powerdown'): - if mode not in ['powerdown', 'halt', 'reboot']: - raise Exception('Invalid mode: ' + mode) - - try: - self.qga.shutdown(mode=mode) - except self.qga.timeout: - return - - -def _cmd_cat(client, args): - if len(args) != 1: - print('Invalid argument') - print('Usage: cat <file>') - sys.exit(1) - print(client.read(args[0])) - - -def _cmd_fsfreeze(client, args): - usage = 'Usage: fsfreeze status|freeze|thaw' - if len(args) != 1: - print('Invalid argument') - print(usage) - sys.exit(1) - if args[0] not in ['status', 'freeze', 'thaw']: - print('Invalid command: ' + args[0]) - print(usage) - sys.exit(1) - cmd = args[0] - ret = client.fsfreeze(cmd) - if cmd == 'status': - print(ret) - elif cmd == 'freeze': - print("%d filesystems frozen" % ret) - else: - print("%d filesystems thawed" % ret) - - -def _cmd_fstrim(client, args): - if len(args) == 0: - minimum = 0 - else: - minimum = int(args[0]) - print(client.fstrim(minimum)) - - -def _cmd_ifconfig(client, args): - print(client.ifconfig()) - - -def _cmd_info(client, args): - print(client.info()) - - -def _cmd_ping(client, args): - if len(args) == 0: - timeout = 3 - else: - timeout = float(args[0]) - alive = client.ping(timeout) - if not alive: - print("Not responded in %s sec" % args[0]) - sys.exit(1) - - -def _cmd_suspend(client, args): - usage = 'Usage: suspend disk|ram|hybrid' - if len(args) != 1: - print('Less argument') - print(usage) - sys.exit(1) - if args[0] not in ['disk', 'ram', 'hybrid']: - print('Invalid command: ' + args[0]) - print(usage) - sys.exit(1) - client.suspend(args[0]) - - -def _cmd_shutdown(client, args): - client.shutdown() -_cmd_powerdown = _cmd_shutdown - - -def _cmd_halt(client, args): - client.shutdown('halt') - - -def _cmd_reboot(client, args): - client.shutdown('reboot') - - -commands = [m.replace('_cmd_', '') for m in dir() if '_cmd_' in m] - - -def main(address, cmd, args): - if not os.path.exists(address): - print('%s not found' % address) - sys.exit(1) - - if cmd not in commands: - print('Invalid command: ' + cmd) - print('Available commands: ' + ', '.join(commands)) - sys.exit(1) - - try: - client = QemuGuestAgentClient(address) - except QemuGuestAgent.error as e: - import errno - - print(e) - if e.errno == errno.ECONNREFUSED: - print('Hint: qemu is not running?') - sys.exit(1) - - if cmd == 'fsfreeze' and args[0] == 'freeze': - client.sync(60) - elif cmd != 'ping': - client.sync() - - globals()['_cmd_' + cmd](client, args) +from qemu.qmp import qemu_ga_client if __name__ == '__main__': - import sys - import os - import optparse - - address = os.environ['QGA_CLIENT_ADDRESS'] if 'QGA_CLIENT_ADDRESS' in os.environ else None - - usage = "%prog [--address=<unix_path>|<ipv4_address>] <command> [args...]\n" - usage += '<command>: ' + ', '.join(commands) - parser = optparse.OptionParser(usage=usage) - parser.add_option('--address', action='store', type='string', - default=address, help='Specify a ip:port pair or a unix socket path') - options, args = parser.parse_args() - - address = options.address - if address is None: - parser.error('address is not specified') - sys.exit(1) - - if len(args) == 0: - parser.error('Less argument') - sys.exit(1) - - main(address, args[0], args[1:]) + sys.exit(qemu_ga_client.main()) diff --git a/scripts/qmp/qmp-shell b/scripts/qmp/qmp-shell index b4d06096ab..4a20f97db7 100755 --- a/scripts/qmp/qmp-shell +++ b/scripts/qmp/qmp-shell @@ -1,459 +1,11 @@ #!/usr/bin/env python3 -# -# Low-level QEMU shell on top of QMP. -# -# Copyright (C) 2009, 2010 Red Hat Inc. -# -# Authors: -# Luiz Capitulino <lcapitulino@redhat.com> -# -# This work is licensed under the terms of the GNU GPL, version 2. See -# the COPYING file in the top-level directory. -# -# Usage: -# -# Start QEMU with: -# -# # qemu [...] -qmp unix:./qmp-sock,server -# -# Run the shell: -# -# $ qmp-shell ./qmp-sock -# -# Commands have the following format: -# -# < command-name > [ arg-name1=arg1 ] ... [ arg-nameN=argN ] -# -# For example: -# -# (QEMU) device_add driver=e1000 id=net1 -# {u'return': {}} -# (QEMU) -# -# key=value pairs also support Python or JSON object literal subset notations, -# without spaces. Dictionaries/objects {} are supported as are arrays []. -# -# example-command arg-name1={'key':'value','obj'={'prop':"value"}} -# -# Both JSON and Python formatting should work, including both styles of -# string literal quotes. Both paradigms of literal values should work, -# including null/true/false for JSON and None/True/False for Python. -# -# -# Transactions have the following multi-line format: -# -# transaction( -# action-name1 [ arg-name1=arg1 ] ... [arg-nameN=argN ] -# ... -# action-nameN [ arg-name1=arg1 ] ... [arg-nameN=argN ] -# ) -# -# One line transactions are also supported: -# -# transaction( action-name1 ... ) -# -# For example: -# -# (QEMU) transaction( -# TRANS> block-dirty-bitmap-add node=drive0 name=bitmap1 -# TRANS> block-dirty-bitmap-clear node=drive0 name=bitmap0 -# TRANS> ) -# {"return": {}} -# (QEMU) -# -# Use the -v and -p options to activate the verbose and pretty-print options, -# which will echo back the properly formatted JSON-compliant QMP that is being -# sent to QEMU, which is useful for debugging and documentation generation. -import json -import ast -import readline -import sys import os -import errno -import atexit -import re +import sys sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) -from qemu import qmp - -class QMPCompleter(list): - def complete(self, text, state): - for cmd in self: - if cmd.startswith(text): - if not state: - return cmd - else: - state -= 1 - -class QMPShellError(Exception): - pass - -class QMPShellBadPort(QMPShellError): - pass - -class FuzzyJSON(ast.NodeTransformer): - '''This extension of ast.NodeTransformer filters literal "true/false/null" - values in an AST and replaces them by proper "True/False/None" values that - Python can properly evaluate.''' - def visit_Name(self, node): - if node.id == 'true': - node.id = 'True' - if node.id == 'false': - node.id = 'False' - if node.id == 'null': - node.id = 'None' - return node - -# TODO: QMPShell's interface is a bit ugly (eg. _fill_completion() and -# _execute_cmd()). Let's design a better one. -class QMPShell(qmp.QEMUMonitorProtocol): - def __init__(self, address, pretty=False): - super(QMPShell, self).__init__(self.__get_address(address)) - self._greeting = None - self._completer = None - self._pretty = pretty - self._transmode = False - self._actions = list() - self._histfile = os.path.join(os.path.expanduser('~'), - '.qmp-shell_history') - - def __get_address(self, arg): - """ - Figure out if the argument is in the port:host form, if it's not it's - probably a file path. - """ - addr = arg.split(':') - if len(addr) == 2: - try: - port = int(addr[1]) - except ValueError: - raise QMPShellBadPort - return ( addr[0], port ) - # socket path - return arg - - def _fill_completion(self): - cmds = self.cmd('query-commands') - if 'error' in cmds: - return - for cmd in cmds['return']: - self._completer.append(cmd['name']) - - def __completer_setup(self): - self._completer = QMPCompleter() - self._fill_completion() - readline.set_history_length(1024) - readline.set_completer(self._completer.complete) - readline.parse_and_bind("tab: complete") - # XXX: default delimiters conflict with some command names (eg. query-), - # clearing everything as it doesn't seem to matter - readline.set_completer_delims('') - try: - readline.read_history_file(self._histfile) - except Exception as e: - if isinstance(e, IOError) and e.errno == errno.ENOENT: - # File not found. No problem. - pass - else: - print("Failed to read history '%s'; %s" % (self._histfile, e)) - atexit.register(self.__save_history) - - def __save_history(self): - try: - readline.write_history_file(self._histfile) - except Exception as e: - print("Failed to save history file '%s'; %s" % (self._histfile, e)) - - def __parse_value(self, val): - try: - return int(val) - except ValueError: - pass - - if val.lower() == 'true': - return True - if val.lower() == 'false': - return False - if val.startswith(('{', '[')): - # Try first as pure JSON: - try: - return json.loads(val) - except ValueError: - pass - # Try once again as FuzzyJSON: - try: - st = ast.parse(val, mode='eval') - return ast.literal_eval(FuzzyJSON().visit(st)) - except SyntaxError: - pass - except ValueError: - pass - return val - - def __cli_expr(self, tokens, parent): - for arg in tokens: - (key, sep, val) = arg.partition('=') - if sep != '=': - raise QMPShellError("Expected a key=value pair, got '%s'" % arg) - - value = self.__parse_value(val) - optpath = key.split('.') - curpath = [] - for p in optpath[:-1]: - curpath.append(p) - d = parent.get(p, {}) - if type(d) is not dict: - raise QMPShellError('Cannot use "%s" as both leaf and non-leaf key' % '.'.join(curpath)) - parent[p] = d - parent = d - if optpath[-1] in parent: - if type(parent[optpath[-1]]) is dict: - raise QMPShellError('Cannot use "%s" as both leaf and non-leaf key' % '.'.join(curpath)) - else: - raise QMPShellError('Cannot set "%s" multiple times' % key) - parent[optpath[-1]] = value - - def __build_cmd(self, cmdline): - """ - Build a QMP input object from a user provided command-line in the - following format: - - < command-name > [ arg-name1=arg1 ] ... [ arg-nameN=argN ] - """ - cmdargs = re.findall(r'''(?:[^\s"']|"(?:\\.|[^"])*"|'(?:\\.|[^'])*')+''', cmdline) - - # Transactional CLI entry/exit: - if cmdargs[0] == 'transaction(': - self._transmode = True - cmdargs.pop(0) - elif cmdargs[0] == ')' and self._transmode: - self._transmode = False - if len(cmdargs) > 1: - raise QMPShellError("Unexpected input after close of Transaction sub-shell") - qmpcmd = { 'execute': 'transaction', - 'arguments': { 'actions': self._actions } } - self._actions = list() - return qmpcmd - - # Nothing to process? - if not cmdargs: - return None - - # Parse and then cache this Transactional Action - if self._transmode: - finalize = False - action = { 'type': cmdargs[0], 'data': {} } - if cmdargs[-1] == ')': - cmdargs.pop(-1) - finalize = True - self.__cli_expr(cmdargs[1:], action['data']) - self._actions.append(action) - return self.__build_cmd(')') if finalize else None - - # Standard command: parse and return it to be executed. - qmpcmd = { 'execute': cmdargs[0], 'arguments': {} } - self.__cli_expr(cmdargs[1:], qmpcmd['arguments']) - return qmpcmd - - def _print(self, qmp): - indent = None - if self._pretty: - indent = 4 - jsobj = json.dumps(qmp, indent=indent, sort_keys=self._pretty) - print(str(jsobj)) - - def _execute_cmd(self, cmdline): - try: - qmpcmd = self.__build_cmd(cmdline) - except Exception as e: - print('Error while parsing command line: %s' % e) - print('command format: <command-name> ', end=' ') - print('[arg-name1=arg1] ... [arg-nameN=argN]') - return True - # For transaction mode, we may have just cached the action: - if qmpcmd is None: - return True - if self._verbose: - self._print(qmpcmd) - resp = self.cmd_obj(qmpcmd) - if resp is None: - print('Disconnected') - return False - self._print(resp) - return True - - def connect(self, negotiate): - self._greeting = super(QMPShell, self).connect(negotiate) - self.__completer_setup() - - def show_banner(self, msg='Welcome to the QMP low-level shell!'): - print(msg) - if not self._greeting: - print('Connected') - return - version = self._greeting['QMP']['version']['qemu'] - print('Connected to QEMU %d.%d.%d\n' % (version['major'],version['minor'],version['micro'])) - - def get_prompt(self): - if self._transmode: - return "TRANS> " - return "(QEMU) " - - def read_exec_command(self, prompt): - """ - Read and execute a command. - - @return True if execution was ok, return False if disconnected. - """ - try: - cmdline = input(prompt) - except EOFError: - print() - return False - if cmdline == '': - for ev in self.get_events(): - print(ev) - self.clear_events() - return True - else: - return self._execute_cmd(cmdline) - - def set_verbosity(self, verbose): - self._verbose = verbose - -class HMPShell(QMPShell): - def __init__(self, address): - QMPShell.__init__(self, address) - self.__cpu_index = 0 - - def __cmd_completion(self): - for cmd in self.__cmd_passthrough('help')['return'].split('\r\n'): - if cmd and cmd[0] != '[' and cmd[0] != '\t': - name = cmd.split()[0] # drop help text - if name == 'info': - continue - if name.find('|') != -1: - # Command in the form 'foobar|f' or 'f|foobar', take the - # full name - opt = name.split('|') - if len(opt[0]) == 1: - name = opt[1] - else: - name = opt[0] - self._completer.append(name) - self._completer.append('help ' + name) # help completion - - def __info_completion(self): - for cmd in self.__cmd_passthrough('info')['return'].split('\r\n'): - if cmd: - self._completer.append('info ' + cmd.split()[1]) - - def __other_completion(self): - # special cases - self._completer.append('help info') - - def _fill_completion(self): - self.__cmd_completion() - self.__info_completion() - self.__other_completion() - - def __cmd_passthrough(self, cmdline, cpu_index = 0): - return self.cmd_obj({ 'execute': 'human-monitor-command', 'arguments': - { 'command-line': cmdline, - 'cpu-index': cpu_index } }) - - def _execute_cmd(self, cmdline): - if cmdline.split()[0] == "cpu": - # trap the cpu command, it requires special setting - try: - idx = int(cmdline.split()[1]) - if not 'return' in self.__cmd_passthrough('info version', idx): - print('bad CPU index') - return True - self.__cpu_index = idx - except ValueError: - print('cpu command takes an integer argument') - return True - resp = self.__cmd_passthrough(cmdline, self.__cpu_index) - if resp is None: - print('Disconnected') - return False - assert 'return' in resp or 'error' in resp - if 'return' in resp: - # Success - if len(resp['return']) > 0: - print(resp['return'], end=' ') - else: - # Error - print('%s: %s' % (resp['error']['class'], resp['error']['desc'])) - return True - - def show_banner(self): - QMPShell.show_banner(self, msg='Welcome to the HMP shell!') - -def die(msg): - sys.stderr.write('ERROR: %s\n' % msg) - sys.exit(1) - -def fail_cmdline(option=None): - if option: - sys.stderr.write('ERROR: bad command-line option \'%s\'\n' % option) - sys.stderr.write('qmp-shell [ -v ] [ -p ] [ -H ] [ -N ] < UNIX socket path> | < TCP address:port >\n') - sys.stderr.write(' -v Verbose (echo command sent and received)\n') - sys.stderr.write(' -p Pretty-print JSON\n') - sys.stderr.write(' -H Use HMP interface\n') - sys.stderr.write(' -N Skip negotiate (for qemu-ga)\n') - sys.exit(1) - -def main(): - addr = '' - qemu = None - hmp = False - pretty = False - verbose = False - negotiate = True - - try: - for arg in sys.argv[1:]: - if arg == "-H": - if qemu is not None: - fail_cmdline(arg) - hmp = True - elif arg == "-p": - pretty = True - elif arg == "-N": - negotiate = False - elif arg == "-v": - verbose = True - else: - if qemu is not None: - fail_cmdline(arg) - if hmp: - qemu = HMPShell(arg) - else: - qemu = QMPShell(arg, pretty) - addr = arg - - if qemu is None: - fail_cmdline() - except QMPShellBadPort: - die('bad port number in command-line') - - try: - qemu.connect(negotiate) - except qmp.QMPConnectError: - die('Didn\'t get QMP greeting message') - except qmp.QMPCapabilitiesError: - die('Could not negotiate capabilities') - except qemu.error: - die('Could not connect to %s' % addr) +from qemu.qmp import qmp_shell - qemu.show_banner() - qemu.set_verbosity(verbose) - while qemu.read_exec_command(qemu.get_prompt()): - pass - qemu.close() if __name__ == '__main__': - main() + qmp_shell.main() diff --git a/scripts/qmp/qom-fuse b/scripts/qmp/qom-fuse index 7c7cff8edf..a58c8ef979 100755 --- a/scripts/qmp/qom-fuse +++ b/scripts/qmp/qom-fuse @@ -1,147 +1,11 @@ #!/usr/bin/env python3 -## -# QEMU Object Model test tools -# -# Copyright IBM, Corp. 2012 -# Copyright (C) 2020 Red Hat, Inc. -# -# Authors: -# Anthony Liguori <aliguori@us.ibm.com> -# Markus Armbruster <armbru@redhat.com> -# -# This work is licensed under the terms of the GNU GPL, version 2 or later. See -# the COPYING file in the top-level directory. -## -import fuse, stat -from fuse import FUSE, FuseOSError, Operations -import os, posix, sys -from errno import * +import os +import sys sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) -from qemu.qmp import QEMUMonitorProtocol +from qemu.qmp.qom_fuse import QOMFuse -fuse.fuse_python_api = (0, 2) - -class QOMFS(Operations): - def __init__(self, qmp): - self.qmp = qmp - self.qmp.connect() - self.ino_map = {} - self.ino_count = 1 - - def get_ino(self, path): - if path in self.ino_map: - return self.ino_map[path] - self.ino_map[path] = self.ino_count - self.ino_count += 1 - return self.ino_map[path] - - def is_object(self, path): - try: - items = self.qmp.command('qom-list', path=path) - return True - except: - return False - - def is_property(self, path): - path, prop = path.rsplit('/', 1) - if path == '': - path = '/' - try: - for item in self.qmp.command('qom-list', path=path): - if item['name'] == prop: - return True - return False - except: - return False - - def is_link(self, path): - path, prop = path.rsplit('/', 1) - if path == '': - path = '/' - try: - for item in self.qmp.command('qom-list', path=path): - if item['name'] == prop: - if item['type'].startswith('link<'): - return True - return False - return False - except: - return False - - def read(self, path, length, offset, fh): - if not self.is_property(path): - return -ENOENT - - path, prop = path.rsplit('/', 1) - if path == '': - path = '/' - try: - data = self.qmp.command('qom-get', path=path, property=prop) - data += '\n' # make values shell friendly - except: - raise FuseOSError(EPERM) - - if offset > len(data): - return '' - - return bytes(data[offset:][:length], encoding='utf-8') - - def readlink(self, path): - if not self.is_link(path): - return False - path, prop = path.rsplit('/', 1) - prefix = '/'.join(['..'] * (len(path.split('/')) - 1)) - return prefix + str(self.qmp.command('qom-get', path=path, - property=prop)) - - def getattr(self, path, fh=None): - if self.is_link(path): - value = { 'st_mode': 0o755 | stat.S_IFLNK, - 'st_ino': self.get_ino(path), - 'st_dev': 0, - 'st_nlink': 2, - 'st_uid': 1000, - 'st_gid': 1000, - 'st_size': 4096, - 'st_atime': 0, - 'st_mtime': 0, - 'st_ctime': 0 } - elif self.is_object(path): - value = { 'st_mode': 0o755 | stat.S_IFDIR, - 'st_ino': self.get_ino(path), - 'st_dev': 0, - 'st_nlink': 2, - 'st_uid': 1000, - 'st_gid': 1000, - 'st_size': 4096, - 'st_atime': 0, - 'st_mtime': 0, - 'st_ctime': 0 } - elif self.is_property(path): - value = { 'st_mode': 0o644 | stat.S_IFREG, - 'st_ino': self.get_ino(path), - 'st_dev': 0, - 'st_nlink': 1, - 'st_uid': 1000, - 'st_gid': 1000, - 'st_size': 4096, - 'st_atime': 0, - 'st_mtime': 0, - 'st_ctime': 0 } - else: - raise FuseOSError(ENOENT) - return value - - def readdir(self, path, fh): - yield '.' - yield '..' - for item in self.qmp.command('qom-list', path=path): - yield str(item['name']) if __name__ == '__main__': - import os - - fuse = FUSE(QOMFS(QEMUMonitorProtocol(os.environ['QMP_SOCKET'])), - sys.argv[1], foreground=True) + sys.exit(QOMFuse.entry_point()) diff --git a/scripts/qmp/qom-get b/scripts/qmp/qom-get index 666df71832..e4f3e0c013 100755 --- a/scripts/qmp/qom-get +++ b/scripts/qmp/qom-get @@ -1,69 +1,11 @@ #!/usr/bin/env python3 -## -# QEMU Object Model test tools -# -# Copyright IBM, Corp. 2011 -# -# Authors: -# Anthony Liguori <aliguori@us.ibm.com> -# -# This work is licensed under the terms of the GNU GPL, version 2 or later. See -# the COPYING file in the top-level directory. -## -import sys import os +import sys sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) -from qemu.qmp import QEMUMonitorProtocol - -cmd, args = sys.argv[0], sys.argv[1:] -socket_path = None -path = None -prop = None - -def usage(): - return '''environment variables: - QMP_SOCKET=<path | addr:port> -usage: - %s [-h] [-s <QMP socket path | addr:port>] <path>.<property> -''' % cmd - -def usage_error(error_msg = "unspecified error"): - sys.stderr.write('%s\nERROR: %s\n' % (usage(), error_msg)) - exit(1) - -if len(args) > 0: - if args[0] == "-h": - print(usage()) - exit(0); - elif args[0] == "-s": - try: - socket_path = args[1] - except: - usage_error("missing argument: QMP socket path or address"); - args = args[2:] - -if not socket_path: - if 'QMP_SOCKET' in os.environ: - socket_path = os.environ['QMP_SOCKET'] - else: - usage_error("no QMP socket path or address given"); - -if len(args) > 0: - try: - path, prop = args[0].rsplit('.', 1) - except: - usage_error("invalid format for path/property/value") -else: - usage_error("not enough arguments") +from qemu.qmp.qom import QOMGet -srv = QEMUMonitorProtocol(socket_path) -srv.connect() -rsp = srv.command('qom-get', path=path, property=prop) -if type(rsp) == dict: - for i in rsp.keys(): - print('%s: %s' % (i, rsp[i])) -else: - print(rsp) +if __name__ == '__main__': + sys.exit(QOMGet.entry_point()) diff --git a/scripts/qmp/qom-list b/scripts/qmp/qom-list index 5074fd939f..7a071a54e1 100755 --- a/scripts/qmp/qom-list +++ b/scripts/qmp/qom-list @@ -1,66 +1,11 @@ #!/usr/bin/env python3 -## -# QEMU Object Model test tools -# -# Copyright IBM, Corp. 2011 -# -# Authors: -# Anthony Liguori <aliguori@us.ibm.com> -# -# This work is licensed under the terms of the GNU GPL, version 2 or later. See -# the COPYING file in the top-level directory. -## -import sys import os +import sys sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) -from qemu.qmp import QEMUMonitorProtocol - -cmd, args = sys.argv[0], sys.argv[1:] -socket_path = None -path = None -prop = None - -def usage(): - return '''environment variables: - QMP_SOCKET=<path | addr:port> -usage: - %s [-h] [-s <QMP socket path | addr:port>] [<path>] -''' % cmd - -def usage_error(error_msg = "unspecified error"): - sys.stderr.write('%s\nERROR: %s\n' % (usage(), error_msg)) - exit(1) - -if len(args) > 0: - if args[0] == "-h": - print(usage()) - exit(0); - elif args[0] == "-s": - try: - socket_path = args[1] - except: - usage_error("missing argument: QMP socket path or address"); - args = args[2:] - -if not socket_path: - if 'QMP_SOCKET' in os.environ: - socket_path = os.environ['QMP_SOCKET'] - else: - usage_error("no QMP socket path or address given"); - -srv = QEMUMonitorProtocol(socket_path) -srv.connect() +from qemu.qmp.qom import QOMList -if len(args) == 0: - print('/') - sys.exit(0) -for item in srv.command('qom-list', path=args[0]): - if item['type'].startswith('child<'): - print('%s/' % item['name']) - elif item['type'].startswith('link<'): - print('@%s/' % item['name']) - else: - print('%s' % item['name']) +if __name__ == '__main__': + sys.exit(QOMList.entry_point()) diff --git a/scripts/qmp/qom-set b/scripts/qmp/qom-set index 240a78187f..9ca9e2ba10 100755 --- a/scripts/qmp/qom-set +++ b/scripts/qmp/qom-set @@ -1,66 +1,11 @@ #!/usr/bin/env python3 -## -# QEMU Object Model test tools -# -# Copyright IBM, Corp. 2011 -# -# Authors: -# Anthony Liguori <aliguori@us.ibm.com> -# -# This work is licensed under the terms of the GNU GPL, version 2 or later. See -# the COPYING file in the top-level directory. -## -import sys import os +import sys sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) -from qemu.qmp import QEMUMonitorProtocol - -cmd, args = sys.argv[0], sys.argv[1:] -socket_path = None -path = None -prop = None -value = None - -def usage(): - return '''environment variables: - QMP_SOCKET=<path | addr:port> -usage: - %s [-h] [-s <QMP socket path | addr:port>] <path>.<property> <value> -''' % cmd - -def usage_error(error_msg = "unspecified error"): - sys.stderr.write('%s\nERROR: %s\n' % (usage(), error_msg)) - exit(1) - -if len(args) > 0: - if args[0] == "-h": - print(usage()) - exit(0); - elif args[0] == "-s": - try: - socket_path = args[1] - except: - usage_error("missing argument: QMP socket path or address"); - args = args[2:] - -if not socket_path: - if 'QMP_SOCKET' in os.environ: - socket_path = os.environ['QMP_SOCKET'] - else: - usage_error("no QMP socket path or address given"); - -if len(args) > 1: - try: - path, prop = args[0].rsplit('.', 1) - except: - usage_error("invalid format for path/property/value") - value = args[1] -else: - usage_error("not enough arguments") +from qemu.qmp.qom import QOMSet -srv = QEMUMonitorProtocol(socket_path) -srv.connect() -print(srv.command('qom-set', path=path, property=prop, value=value)) +if __name__ == '__main__': + sys.exit(QOMSet.entry_point()) diff --git a/scripts/qmp/qom-tree b/scripts/qmp/qom-tree index 25b0781323..7d0ccca3a4 100755 --- a/scripts/qmp/qom-tree +++ b/scripts/qmp/qom-tree @@ -1,77 +1,11 @@ #!/usr/bin/env python3 -## -# QEMU Object Model test tools -# -# Copyright IBM, Corp. 2011 -# Copyright (c) 2013 SUSE LINUX Products GmbH -# -# Authors: -# Anthony Liguori <aliguori@amazon.com> -# Andreas Faerber <afaerber@suse.de> -# -# This work is licensed under the terms of the GNU GPL, version 2 or later. See -# the COPYING file in the top-level directory. -## -import sys import os +import sys sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) -from qemu.qmp import QEMUMonitorProtocol - -cmd, args = sys.argv[0], sys.argv[1:] -socket_path = None -path = None -prop = None - -def usage(): - return '''environment variables: - QMP_SOCKET=<path | addr:port> -usage: - %s [-h] [-s <QMP socket path | addr:port>] [<path>] -''' % cmd - -def usage_error(error_msg = "unspecified error"): - sys.stderr.write('%s\nERROR: %s\n' % (usage(), error_msg)) - exit(1) - -if len(args) > 0: - if args[0] == "-h": - print(usage()) - exit(0); - elif args[0] == "-s": - try: - socket_path = args[1] - except: - usage_error("missing argument: QMP socket path or address"); - args = args[2:] - -if not socket_path: - if 'QMP_SOCKET' in os.environ: - socket_path = os.environ['QMP_SOCKET'] - else: - usage_error("no QMP socket path or address given"); - -srv = QEMUMonitorProtocol(socket_path) -srv.connect() - -def list_node(path): - print('%s' % path) - items = srv.command('qom-list', path=path) - for item in items: - if not item['type'].startswith('child<'): - try: - print(' %s: %s (%s)' % (item['name'], srv.command('qom-get', path=path, property=item['name']), item['type'])) - except: - print(' %s: <EXCEPTION> (%s)' % (item['name'], item['type'])) - print('') - for item in items: - if item['type'].startswith('child<'): - list_node((path if (path != '/') else '') + '/' + item['name']) +from qemu.qmp.qom import QOMTree -if len(args) == 0: - path = '/' -else: - path = args[0] -list_node(path) +if __name__ == '__main__': + sys.exit(QOMTree.entry_point()) diff --git a/target/hppa/helper.h b/target/hppa/helper.h index 2d483aab58..0a629ffa7c 100644 --- a/target/hppa/helper.h +++ b/target/hppa/helper.h @@ -1,12 +1,9 @@ #if TARGET_REGISTER_BITS == 64 # define dh_alias_tr i64 -# define dh_is_64bit_tr 1 #else # define dh_alias_tr i32 -# define dh_is_64bit_tr 0 #endif #define dh_ctype_tr target_ureg -#define dh_is_signed_tr 0 DEF_HELPER_2(excp, noreturn, env, int) DEF_HELPER_FLAGS_2(tsv, TCG_CALL_NO_WG, void, env, tr) diff --git a/target/i386/ops_sse_header.h b/target/i386/ops_sse_header.h index 6c0c849347..e68af5c403 100644 --- a/target/i386/ops_sse_header.h +++ b/target/i386/ops_sse_header.h @@ -30,9 +30,6 @@ #define dh_ctype_Reg Reg * #define dh_ctype_ZMMReg ZMMReg * #define dh_ctype_MMXReg MMXReg * -#define dh_is_signed_Reg dh_is_signed_ptr -#define dh_is_signed_ZMMReg dh_is_signed_ptr -#define dh_is_signed_MMXReg dh_is_signed_ptr DEF_HELPER_3(glue(psrlw, SUFFIX), void, env, Reg, Reg) DEF_HELPER_3(glue(psraw, SUFFIX), void, env, Reg, Reg) diff --git a/target/m68k/helper.h b/target/m68k/helper.h index 77808497a9..9842eeaa95 100644 --- a/target/m68k/helper.h +++ b/target/m68k/helper.h @@ -17,7 +17,6 @@ DEF_HELPER_4(cas2l_parallel, void, env, i32, i32, i32) #define dh_alias_fp ptr #define dh_ctype_fp FPReg * -#define dh_is_signed_fp dh_is_signed_ptr DEF_HELPER_3(exts32, void, env, fp, s32) DEF_HELPER_3(extf32, void, env, fp, f32) diff --git a/target/ppc/helper.h b/target/ppc/helper.h index c517b9f025..4076aa281e 100644 --- a/target/ppc/helper.h +++ b/target/ppc/helper.h @@ -109,11 +109,9 @@ DEF_HELPER_FLAGS_1(ftsqrt, TCG_CALL_NO_RWG_SE, i32, i64) #define dh_alias_avr ptr #define dh_ctype_avr ppc_avr_t * -#define dh_is_signed_avr dh_is_signed_ptr #define dh_alias_vsr ptr #define dh_ctype_vsr ppc_vsr_t * -#define dh_is_signed_vsr dh_is_signed_ptr DEF_HELPER_3(vavgub, void, avr, avr, avr) DEF_HELPER_3(vavguh, void, avr, avr, avr) @@ -697,7 +695,6 @@ DEF_HELPER_3(store_601_batu, void, env, i32, tl) #define dh_alias_fprp ptr #define dh_ctype_fprp ppc_fprp_t * -#define dh_is_signed_fprp dh_is_signed_ptr DEF_HELPER_4(dadd, void, env, fprp, fprp, fprp) DEF_HELPER_4(daddq, void, env, fprp, fprp, fprp) diff --git a/target/s390x/cc_helper.c b/target/s390x/cc_helper.c index e7039d0d18..e7a74d66dd 100644 --- a/target/s390x/cc_helper.c +++ b/target/s390x/cc_helper.c @@ -509,7 +509,7 @@ uint32_t HELPER(calc_cc)(CPUS390XState *env, uint32_t cc_op, uint64_t src, #ifndef CONFIG_USER_ONLY void HELPER(load_psw)(CPUS390XState *env, uint64_t mask, uint64_t addr) { - load_psw(env, mask, addr); + s390_cpu_set_psw(env, mask, addr); cpu_loop_exit(env_cpu(env)); } diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h index 2464d4076c..b26ae8fff2 100644 --- a/target/s390x/cpu.h +++ b/target/s390x/cpu.h @@ -845,6 +845,9 @@ int s390_cpu_pv_mem_rw(S390CPU *cpu, unsigned int offset, void *hostbuf, int s390_cpu_restart(S390CPU *cpu); void s390_init_sigp(void); +/* helper.c */ +void s390_cpu_set_psw(CPUS390XState *env, uint64_t mask, uint64_t addr); +uint64_t s390_cpu_get_psw_mask(CPUS390XState *env); /* outside of target/s390x/ */ S390CPU *s390_cpu_addr2state(uint16_t cpu_addr); diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c index 050dcf2d42..94090a6e22 100644 --- a/target/s390x/cpu_models.c +++ b/target/s390x/cpu_models.c @@ -90,8 +90,8 @@ static S390CPUDef s390_cpu_defs[] = { CPUDEF_INIT(0x8562, 15, 1, 47, 0x08000000U, "gen15b", "IBM z15 T02 GA1"), }; -#define QEMU_MAX_CPU_TYPE 0x2964 -#define QEMU_MAX_CPU_GEN 13 +#define QEMU_MAX_CPU_TYPE 0x3906 +#define QEMU_MAX_CPU_GEN 14 #define QEMU_MAX_CPU_EC_GA 2 static const S390FeatInit qemu_max_cpu_feat_init = { S390_FEAT_LIST_QEMU_MAX }; static S390FeatBitmap qemu_max_cpu_feat; diff --git a/target/s390x/excp_helper.c b/target/s390x/excp_helper.c index 20625c2c8f..9c361428c8 100644 --- a/target/s390x/excp_helper.c +++ b/target/s390x/excp_helper.c @@ -252,7 +252,7 @@ static void do_program_interrupt(CPUS390XState *env) lowcore->pgm_ilen = cpu_to_be16(ilen); lowcore->pgm_code = cpu_to_be16(env->int_pgm_code); - lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env)); + lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env)); lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr); mask = be64_to_cpu(lowcore->program_new_psw.mask); addr = be64_to_cpu(lowcore->program_new_psw.addr); @@ -260,7 +260,7 @@ static void do_program_interrupt(CPUS390XState *env) cpu_unmap_lowcore(lowcore); - load_psw(env, mask, addr); + s390_cpu_set_psw(env, mask, addr); } static void do_svc_interrupt(CPUS390XState *env) @@ -272,14 +272,14 @@ static void do_svc_interrupt(CPUS390XState *env) lowcore->svc_code = cpu_to_be16(env->int_svc_code); lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen); - lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env)); + lowcore->svc_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env)); lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen); mask = be64_to_cpu(lowcore->svc_new_psw.mask); addr = be64_to_cpu(lowcore->svc_new_psw.addr); cpu_unmap_lowcore(lowcore); - load_psw(env, mask, addr); + s390_cpu_set_psw(env, mask, addr); /* When a PER event is pending, the PER exception has to happen immediately after the SERVICE CALL one. */ @@ -348,12 +348,12 @@ static void do_ext_interrupt(CPUS390XState *env) mask = be64_to_cpu(lowcore->external_new_psw.mask); addr = be64_to_cpu(lowcore->external_new_psw.addr); - lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env)); + lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env)); lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr); cpu_unmap_lowcore(lowcore); - load_psw(env, mask, addr); + s390_cpu_set_psw(env, mask, addr); } static void do_io_interrupt(CPUS390XState *env) @@ -373,7 +373,7 @@ static void do_io_interrupt(CPUS390XState *env) lowcore->subchannel_nr = cpu_to_be16(io->nr); lowcore->io_int_parm = cpu_to_be32(io->parm); lowcore->io_int_word = cpu_to_be32(io->word); - lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env)); + lowcore->io_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env)); lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr); mask = be64_to_cpu(lowcore->io_new_psw.mask); addr = be64_to_cpu(lowcore->io_new_psw.addr); @@ -381,7 +381,7 @@ static void do_io_interrupt(CPUS390XState *env) cpu_unmap_lowcore(lowcore); g_free(io); - load_psw(env, mask, addr); + s390_cpu_set_psw(env, mask, addr); } typedef struct MchkExtSaveArea { @@ -457,14 +457,14 @@ static void do_mchk_interrupt(CPUS390XState *env) lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8); lowcore->mcic = cpu_to_be64(mcic); - lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env)); + lowcore->mcck_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env)); lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr); mask = be64_to_cpu(lowcore->mcck_new_psw.mask); addr = be64_to_cpu(lowcore->mcck_new_psw.addr); cpu_unmap_lowcore(lowcore); - load_psw(env, mask, addr); + s390_cpu_set_psw(env, mask, addr); } void s390_cpu_do_interrupt(CPUState *cs) @@ -592,9 +592,11 @@ void s390x_cpu_debug_excp_handler(CPUState *cs) and MVCS instrutions are not used. */ env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46; - /* Remove all watchpoints to re-execute the code. A PER exception - will be triggered, it will call load_psw which will recompute - the watchpoints. */ + /* + * Remove all watchpoints to re-execute the code. A PER exception + * will be triggered, it will call s390_cpu_set_psw which will + * recompute the watchpoints. + */ cpu_watchpoint_remove_all(cs, BP_CPU); cpu_loop_exit_noexc(cs); } diff --git a/target/s390x/fpu_helper.c b/target/s390x/fpu_helper.c index f155bc048c..13af158748 100644 --- a/target/s390x/fpu_helper.c +++ b/target/s390x/fpu_helper.c @@ -509,6 +509,9 @@ uint64_t HELPER(cgeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); + if (float32_is_any_nan(v2)) { + return INT64_MIN; + } return ret; } @@ -520,6 +523,9 @@ uint64_t HELPER(cgdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); + if (float64_is_any_nan(v2)) { + return INT64_MIN; + } return ret; } @@ -532,6 +538,9 @@ uint64_t HELPER(cgxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); + if (float128_is_any_nan(v2)) { + return INT64_MIN; + } return ret; } @@ -543,6 +552,9 @@ uint64_t HELPER(cfeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); + if (float32_is_any_nan(v2)) { + return INT32_MIN; + } return ret; } @@ -554,6 +566,9 @@ uint64_t HELPER(cfdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); + if (float64_is_any_nan(v2)) { + return INT32_MIN; + } return ret; } @@ -566,6 +581,9 @@ uint64_t HELPER(cfxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); + if (float128_is_any_nan(v2)) { + return INT32_MIN; + } return ret; } @@ -573,12 +591,12 @@ uint64_t HELPER(cfxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) uint64_t HELPER(clgeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) { int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); - uint64_t ret; - - v2 = float32_to_float64(v2, &env->fpu_status); - ret = float64_to_uint64(v2, &env->fpu_status); + uint64_t ret = float32_to_uint64(v2, &env->fpu_status); s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); + if (float32_is_any_nan(v2)) { + return 0; + } return ret; } @@ -590,6 +608,9 @@ uint64_t HELPER(clgdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); + if (float64_is_any_nan(v2)) { + return 0; + } return ret; } @@ -601,6 +622,9 @@ uint64_t HELPER(clgxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); + if (float128_is_any_nan(make_float128(h, l))) { + return 0; + } return ret; } @@ -612,6 +636,9 @@ uint64_t HELPER(clfeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); + if (float32_is_any_nan(v2)) { + return 0; + } return ret; } @@ -623,6 +650,9 @@ uint64_t HELPER(clfdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); + if (float64_is_any_nan(v2)) { + return 0; + } return ret; } @@ -634,6 +664,9 @@ uint64_t HELPER(clfxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) s390_restore_bfp_rounding_mode(env, old_mode); handle_exceptions(env, xxc_from_m34(m34), GETPC()); + if (float128_is_any_nan(make_float128(h, l))) { + return 0; + } return ret; } diff --git a/target/s390x/gdbstub.c b/target/s390x/gdbstub.c index d6fce5ff1e..5b4e38a13b 100644 --- a/target/s390x/gdbstub.c +++ b/target/s390x/gdbstub.c @@ -31,18 +31,10 @@ int s390_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; - uint64_t val; - int cc_op; switch (n) { case S390_PSWM_REGNUM: - if (tcg_enabled()) { - cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, - env->cc_vr); - val = deposit64(env->psw.mask, 44, 2, cc_op); - return gdb_get_regl(mem_buf, val); - } - return gdb_get_regl(mem_buf, env->psw.mask); + return gdb_get_regl(mem_buf, s390_cpu_get_psw_mask(env)); case S390_PSWA_REGNUM: return gdb_get_regl(mem_buf, env->psw.addr); case S390_R0_REGNUM ... S390_R15_REGNUM: @@ -59,10 +51,7 @@ int s390_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) switch (n) { case S390_PSWM_REGNUM: - env->psw.mask = tmpl; - if (tcg_enabled()) { - env->cc_op = extract64(tmpl, 44, 2); - } + s390_cpu_set_psw(env, tmpl, env->psw.addr); break; case S390_PSWA_REGNUM: env->psw.addr = tmpl; diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c index a6ec918e90..242c95ede4 100644 --- a/target/s390x/gen-features.c +++ b/target/s390x/gen-features.c @@ -706,20 +706,23 @@ static uint16_t qemu_V4_1[] = { S390_FEAT_VECTOR, }; -static uint16_t qemu_LATEST[] = { +static uint16_t qemu_V6_0[] = { S390_FEAT_ACCESS_EXCEPTION_FS_INDICATION, S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2, S390_FEAT_ESOP, }; +static uint16_t qemu_LATEST[] = { + S390_FEAT_INSTRUCTION_EXEC_PROT, + S390_FEAT_MISC_INSTRUCTION_EXT2, + S390_FEAT_MSA_EXT_8, + S390_FEAT_VECTOR_ENH, +}; + /* add all new definitions before this point */ static uint16_t qemu_MAX[] = { /* generates a dependency warning, leave it out for now */ S390_FEAT_MSA_EXT_5, - /* features introduced after the z13 */ - S390_FEAT_INSTRUCTION_EXEC_PROT, - S390_FEAT_MISC_INSTRUCTION_EXT2, - S390_FEAT_MSA_EXT_8, }; /****** END FEATURE DEFS ******/ @@ -838,6 +841,7 @@ static FeatGroupDefSpec QemuFeatDef[] = { QEMU_FEAT_INITIALIZER(V3_1), QEMU_FEAT_INITIALIZER(V4_0), QEMU_FEAT_INITIALIZER(V4_1), + QEMU_FEAT_INITIALIZER(V6_0), QEMU_FEAT_INITIALIZER(LATEST), QEMU_FEAT_INITIALIZER(MAX), }; diff --git a/target/s390x/helper.c b/target/s390x/helper.c index 7678994feb..1445b74451 100644 --- a/target/s390x/helper.c +++ b/target/s390x/helper.c @@ -104,44 +104,6 @@ void s390_handle_wait(S390CPU *cpu) } } -void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr) -{ - uint64_t old_mask = env->psw.mask; - - env->psw.addr = addr; - env->psw.mask = mask; - - /* KVM will handle all WAITs and trigger a WAIT exit on disabled_wait */ - if (!tcg_enabled()) { - return; - } - env->cc_op = (mask >> 44) & 3; - - if ((old_mask ^ mask) & PSW_MASK_PER) { - s390_cpu_recompute_watchpoints(env_cpu(env)); - } - - if (mask & PSW_MASK_WAIT) { - s390_handle_wait(env_archcpu(env)); - } -} - -uint64_t get_psw_mask(CPUS390XState *env) -{ - uint64_t r = env->psw.mask; - - if (tcg_enabled()) { - env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, - env->cc_vr); - - r &= ~PSW_MASK_CC; - assert(!(env->cc_op & ~3)); - r |= (uint64_t)env->cc_op << 44; - } - - return r; -} - LowCore *cpu_map_lowcore(CPUS390XState *env) { LowCore *lowcore; @@ -168,7 +130,7 @@ void do_restart_interrupt(CPUS390XState *env) lowcore = cpu_map_lowcore(env); - lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env)); + lowcore->restart_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env)); lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr); mask = be64_to_cpu(lowcore->restart_new_psw.mask); addr = be64_to_cpu(lowcore->restart_new_psw.addr); @@ -176,7 +138,7 @@ void do_restart_interrupt(CPUS390XState *env) cpu_unmap_lowcore(lowcore); env->pending_int &= ~INTERRUPT_RESTART; - load_psw(env, mask, addr); + s390_cpu_set_psw(env, mask, addr); } void s390_cpu_recompute_watchpoints(CPUState *cs) @@ -266,7 +228,7 @@ int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch) sa->grs[i] = cpu_to_be64(cpu->env.regs[i]); } sa->psw.addr = cpu_to_be64(cpu->env.psw.addr); - sa->psw.mask = cpu_to_be64(get_psw_mask(&cpu->env)); + sa->psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(&cpu->env)); sa->prefix = cpu_to_be32(cpu->env.psa); sa->fpc = cpu_to_be32(cpu->env.fpc); sa->todpr = cpu_to_be32(cpu->env.todpr); @@ -323,20 +285,67 @@ int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len) cpu_physical_memory_unmap(sa, len, 1, len); return 0; } +#else +/* For user-only, tcg is always enabled. */ +#define tcg_enabled() true #endif /* CONFIG_USER_ONLY */ +void s390_cpu_set_psw(CPUS390XState *env, uint64_t mask, uint64_t addr) +{ +#ifndef CONFIG_USER_ONLY + uint64_t old_mask = env->psw.mask; +#endif + + env->psw.addr = addr; + env->psw.mask = mask; + + /* KVM will handle all WAITs and trigger a WAIT exit on disabled_wait */ + if (!tcg_enabled()) { + return; + } + env->cc_op = (mask >> 44) & 3; + +#ifndef CONFIG_USER_ONLY + if ((old_mask ^ mask) & PSW_MASK_PER) { + s390_cpu_recompute_watchpoints(env_cpu(env)); + } + + if (mask & PSW_MASK_WAIT) { + s390_handle_wait(env_archcpu(env)); + } +#endif +} + +uint64_t s390_cpu_get_psw_mask(CPUS390XState *env) +{ + uint64_t r = env->psw.mask; + + if (tcg_enabled()) { + uint64_t cc = calc_cc(env, env->cc_op, env->cc_src, + env->cc_dst, env->cc_vr); + + assert(cc <= 3); + r &= ~PSW_MASK_CC; + r |= cc << 44; + } + + return r; +} + void s390_cpu_dump_state(CPUState *cs, FILE *f, int flags) { S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; int i; - if (env->cc_op > 3) { - qemu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n", - env->psw.mask, env->psw.addr, cc_name(env->cc_op)); + qemu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64, + s390_cpu_get_psw_mask(env), env->psw.addr); + if (!tcg_enabled()) { + qemu_fprintf(f, "\n"); + } else if (env->cc_op > 3) { + qemu_fprintf(f, " cc %15s\n", cc_name(env->cc_op)); } else { - qemu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n", - env->psw.mask, env->psw.addr, env->cc_op); + qemu_fprintf(f, " cc %02x\n", env->cc_op); } for (i = 0; i < 16; i++) { diff --git a/target/s390x/helper.h b/target/s390x/helper.h index d4e4f3388f..ba045f559d 100644 --- a/target/s390x/helper.h +++ b/target/s390x/helper.h @@ -126,6 +126,7 @@ DEF_HELPER_FLAGS_1(stck, TCG_CALL_NO_RWG_SE, i64, env) DEF_HELPER_FLAGS_3(probe_write_access, TCG_CALL_NO_WG, void, env, i64, i64) /* === Vector Support Instructions === */ +DEF_HELPER_FLAGS_4(gvec_vbperm, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(vll, TCG_CALL_NO_WG, void, env, ptr, i64, i64) DEF_HELPER_FLAGS_4(gvec_vpk16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) DEF_HELPER_FLAGS_4(gvec_vpk32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) @@ -246,50 +247,77 @@ DEF_HELPER_6(gvec_vstrc_cc_rt16, void, ptr, cptr, cptr, cptr, env, i32) DEF_HELPER_6(gvec_vstrc_cc_rt32, void, ptr, cptr, cptr, cptr, env, i32) /* === Vector Floating-Point Instructions */ +DEF_HELPER_FLAGS_5(gvec_vfa32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfa64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) -DEF_HELPER_FLAGS_5(gvec_vfa64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfa128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_4(gvec_wfc32, void, cptr, cptr, env, i32) +DEF_HELPER_4(gvec_wfk32, void, cptr, cptr, env, i32) DEF_HELPER_4(gvec_wfc64, void, cptr, cptr, env, i32) DEF_HELPER_4(gvec_wfk64, void, cptr, cptr, env, i32) +DEF_HELPER_4(gvec_wfc128, void, cptr, cptr, env, i32) +DEF_HELPER_4(gvec_wfk128, void, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfce32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vfce32_cc, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfce64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) -DEF_HELPER_FLAGS_5(gvec_vfce64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vfce64_cc, void, ptr, cptr, cptr, env, i32) -DEF_HELPER_5(gvec_vfce64s_cc, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfce128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vfce128_cc, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfch32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vfch32_cc, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfch64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) -DEF_HELPER_FLAGS_5(gvec_vfch64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vfch64_cc, void, ptr, cptr, cptr, env, i32) -DEF_HELPER_5(gvec_vfch64s_cc, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfch128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vfch128_cc, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfche32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vfche32_cc, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfche64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) -DEF_HELPER_FLAGS_5(gvec_vfche64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_5(gvec_vfche64_cc, void, ptr, cptr, cptr, env, i32) -DEF_HELPER_5(gvec_vfche64s_cc, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfche128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vfche128_cc, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vcdg64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) -DEF_HELPER_FLAGS_4(gvec_vcdg64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vcdlg64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) -DEF_HELPER_FLAGS_4(gvec_vcdlg64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vcgd64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) -DEF_HELPER_FLAGS_4(gvec_vcgd64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vclgd64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) -DEF_HELPER_FLAGS_4(gvec_vclgd64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfd32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfd64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) -DEF_HELPER_FLAGS_5(gvec_vfd64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfd128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vfi32, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vfi64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) -DEF_HELPER_FLAGS_4(gvec_vfi64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vfi128, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vfll32, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) -DEF_HELPER_FLAGS_4(gvec_vfll32s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vfll64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vflr64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) -DEF_HELPER_FLAGS_4(gvec_vflr64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vflr128, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfm32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfm64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) -DEF_HELPER_FLAGS_5(gvec_vfm64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfm128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfmax32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfmax64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfmax128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfmin32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfmin64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfmin128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_6(gvec_vfma32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_6(gvec_vfma64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) -DEF_HELPER_FLAGS_6(gvec_vfma64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_6(gvec_vfma128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_6(gvec_vfms32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_6(gvec_vfms64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) -DEF_HELPER_FLAGS_6(gvec_vfms64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_6(gvec_vfms128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_6(gvec_vfnma32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_6(gvec_vfnma64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_6(gvec_vfnma128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_6(gvec_vfnms32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_6(gvec_vfnms64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_6(gvec_vfnms128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vfsq32, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) DEF_HELPER_FLAGS_4(gvec_vfsq64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) -DEF_HELPER_FLAGS_4(gvec_vfsq64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vfsq128, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfs32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) DEF_HELPER_FLAGS_5(gvec_vfs64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) -DEF_HELPER_FLAGS_5(gvec_vfs64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfs128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_4(gvec_vftci32, void, ptr, cptr, env, i32) DEF_HELPER_4(gvec_vftci64, void, ptr, cptr, env, i32) -DEF_HELPER_4(gvec_vftci64s, void, ptr, cptr, env, i32) +DEF_HELPER_4(gvec_vftci128, void, ptr, cptr, env, i32) #ifndef CONFIG_USER_ONLY DEF_HELPER_3(servc, i32, env, i64, i64) diff --git a/target/s390x/insn-data.def b/target/s390x/insn-data.def index 0bb1886a2e..3e5594210c 100644 --- a/target/s390x/insn-data.def +++ b/target/s390x/insn-data.def @@ -989,6 +989,8 @@ /* === Vector Support Instructions === */ +/* VECTOR BIT PERMUTE */ + E(0xe785, VBPERM, VRR_c, VE, 0, 0, 0, 0, vbperm, 0, 0, IF_VEC) /* VECTOR GATHER ELEMENT */ E(0xe713, VGEF, VRV, V, la2, 0, 0, 0, vge, 0, ES_32, IF_VEC) E(0xe712, VGEG, VRV, V, la2, 0, 0, 0, vge, 0, ES_64, IF_VEC) @@ -1149,6 +1151,8 @@ F(0xe7a7, VMO, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) /* VECTOR MULTIPLY LOGICAL ODD */ F(0xe7a5, VMLO, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +/* VECTOR MULTIPLY SUM LOGICAL */ + F(0xe7b8, VMSL, VRR_d, VE, 0, 0, 0, 0, vmsl, 0, IF_VEC) /* VECTOR NAND */ F(0xe76e, VNN, VRR_c, VE, 0, 0, 0, 0, vnn, 0, IF_VEC) /* VECTOR NOR */ @@ -1245,16 +1249,24 @@ F(0xe7e5, VFD, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC) /* VECTOR LOAD FP INTEGER */ F(0xe7c7, VFI, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) -/* VECTOR LOAD LENGTHENED */ +/* VECTOR FP LOAD LENGTHENED */ F(0xe7c4, VFLL, VRR_a, V, 0, 0, 0, 0, vfll, 0, IF_VEC) -/* VECTOR LOAD ROUNDED */ +/* VECTOR FP LOAD ROUNDED */ F(0xe7c5, VFLR, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) +/* VECTOR FP MAXIMUM */ + F(0xe7ef, VFMAX, VRR_c, VE, 0, 0, 0, 0, vfmax, 0, IF_VEC) +/* VECTOR FP MINIMUM */ + F(0xe7ee, VFMIN, VRR_c, VE, 0, 0, 0, 0, vfmax, 0, IF_VEC) /* VECTOR FP MULTIPLY */ F(0xe7e7, VFM, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC) /* VECTOR FP MULTIPLY AND ADD */ F(0xe78f, VFMA, VRR_e, V, 0, 0, 0, 0, vfma, 0, IF_VEC) /* VECTOR FP MULTIPLY AND SUBTRACT */ F(0xe78e, VFMS, VRR_e, V, 0, 0, 0, 0, vfma, 0, IF_VEC) +/* VECTOR FP NEGATIVE MULTIPLY AND ADD */ + F(0xe79f, VFNMA, VRR_e, VE, 0, 0, 0, 0, vfma, 0, IF_VEC) +/* VECTOR FP NEGATIVE MULTIPLY AND SUBTRACT */ + F(0xe79e, VFNMS, VRR_e, VE, 0, 0, 0, 0, vfma, 0, IF_VEC) /* VECTOR FP PERFORM SIGN OPERATION */ F(0xe7cc, VFPSO, VRR_a, V, 0, 0, 0, 0, vfpso, 0, IF_VEC) /* VECTOR FP SQUARE ROOT */ diff --git a/target/s390x/internal.h b/target/s390x/internal.h index 11515bb617..9256275376 100644 --- a/target/s390x/internal.h +++ b/target/s390x/internal.h @@ -235,10 +235,6 @@ int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, const char *cc_name(enum cc_op cc_op); uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst, uint64_t vr); -#ifndef CONFIG_USER_ONLY -void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr); -#endif /* CONFIG_USER_ONLY */ - /* cpu.c */ #ifndef CONFIG_USER_ONLY @@ -288,6 +284,15 @@ uint8_t s390_softfloat_exc_to_ieee(unsigned int exc); int s390_swap_bfp_rounding_mode(CPUS390XState *env, int m3); void s390_restore_bfp_rounding_mode(CPUS390XState *env, int old_mode); int float_comp_to_cc(CPUS390XState *env, int float_compare); + +#define DCMASK_ZERO 0x0c00 +#define DCMASK_NORMAL 0x0300 +#define DCMASK_SUBNORMAL 0x00c0 +#define DCMASK_INFINITY 0x0030 +#define DCMASK_QUIET_NAN 0x000c +#define DCMASK_SIGNALING_NAN 0x0003 +#define DCMASK_NAN 0x000f +#define DCMASK_NEGATIVE 0x0555 uint16_t float32_dcmask(CPUS390XState *env, float32 f1); uint16_t float64_dcmask(CPUS390XState *env, float64 f1); uint16_t float128_dcmask(CPUS390XState *env, float128 f1); @@ -303,7 +308,6 @@ void s390_cpu_gdb_init(CPUState *cs); void s390_cpu_dump_state(CPUState *cpu, FILE *f, int flags); void do_restart_interrupt(CPUS390XState *env); #ifndef CONFIG_USER_ONLY -uint64_t get_psw_mask(CPUS390XState *env); void s390_cpu_recompute_watchpoints(CPUState *cs); void s390x_tod_timer(void *opaque); void s390x_cpu_timer(void *opaque); diff --git a/target/s390x/kvm-stub.c b/target/s390x/kvm-stub.c index 9970b5a8c7..8a308cfebb 100644 --- a/target/s390x/kvm-stub.c +++ b/target/s390x/kvm-stub.c @@ -49,11 +49,6 @@ int kvm_s390_get_ri(void) return 0; } -int kvm_s390_get_gs(void) -{ - return 0; -} - int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low) { return -ENOSYS; diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c index 4fb3bbfef5..2388924587 100644 --- a/target/s390x/kvm.c +++ b/target/s390x/kvm.c @@ -154,7 +154,6 @@ static int cap_async_pf; static int cap_mem_op; static int cap_s390_irq; static int cap_ri; -static int cap_gs; static int cap_hpage_1m; static int cap_vcpu_resets; static int cap_protected; @@ -369,9 +368,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } } if (cpu_model_allowed()) { - if (kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0) == 0) { - cap_gs = 1; - } + kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0); } /* @@ -2039,11 +2036,6 @@ int kvm_s390_get_ri(void) return cap_ri; } -int kvm_s390_get_gs(void) -{ - return cap_gs; -} - int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state) { struct kvm_mp_state mp_state = {}; diff --git a/target/s390x/kvm_s390x.h b/target/s390x/kvm_s390x.h index 25bbe98b25..05a5e1e6f4 100644 --- a/target/s390x/kvm_s390x.h +++ b/target/s390x/kvm_s390x.h @@ -27,7 +27,6 @@ void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu); int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu); int kvm_s390_get_hpage_1m(void); int kvm_s390_get_ri(void); -int kvm_s390_get_gs(void); int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock); int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_clock); int kvm_s390_set_clock(uint8_t tod_high, uint64_t tod_clock); diff --git a/target/s390x/sigp.c b/target/s390x/sigp.c index c604f17710..c2d5cdf061 100644 --- a/target/s390x/sigp.c +++ b/target/s390x/sigp.c @@ -235,7 +235,8 @@ static void sigp_restart(CPUState *cs, run_on_cpu_data arg) cpu_synchronize_state(cs); /* * Set OPERATING (and unhalting) before loading the restart PSW. - * load_psw() will then properly halt the CPU again if necessary (TCG). + * s390_cpu_set_psw() will then properly halt the CPU again if + * necessary (TCG). */ s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu); do_restart_interrupt(&cpu->env); diff --git a/target/s390x/translate_vx.c.inc b/target/s390x/translate_vx.c.inc index eb767f5288..a9d51b1f4c 100644 --- a/target/s390x/translate_vx.c.inc +++ b/target/s390x/translate_vx.c.inc @@ -327,6 +327,14 @@ static void gen_addi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, tcg_temp_free_i64(bh); } +static DisasJumpType op_vbperm(DisasContext *s, DisasOps *o) +{ + gen_gvec_3_ool(get_field(s, v1), get_field(s, v2), get_field(s, v3), 0, + gen_helper_gvec_vbperm); + + return DISAS_NEXT; +} + static DisasJumpType op_vge(DisasContext *s, DisasOps *o) { const uint8_t es = s->insn->data; @@ -1771,6 +1779,56 @@ static DisasJumpType op_vm(DisasContext *s, DisasOps *o) return DISAS_NEXT; } +static DisasJumpType op_vmsl(DisasContext *s, DisasOps *o) +{ + TCGv_i64 l1, h1, l2, h2; + + if (get_field(s, m4) != ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + l1 = tcg_temp_new_i64(); + h1 = tcg_temp_new_i64(); + l2 = tcg_temp_new_i64(); + h2 = tcg_temp_new_i64(); + + /* Multipy both even elements from v2 and v3 */ + read_vec_element_i64(l1, get_field(s, v2), 0, ES_64); + read_vec_element_i64(h1, get_field(s, v3), 0, ES_64); + tcg_gen_mulu2_i64(l1, h1, l1, h1); + /* Shift result left by one (x2) if requested */ + if (extract32(get_field(s, m6), 3, 1)) { + tcg_gen_add2_i64(l1, h1, l1, h1, l1, h1); + } + + /* Multipy both odd elements from v2 and v3 */ + read_vec_element_i64(l2, get_field(s, v2), 1, ES_64); + read_vec_element_i64(h2, get_field(s, v3), 1, ES_64); + tcg_gen_mulu2_i64(l2, h2, l2, h2); + /* Shift result left by one (x2) if requested */ + if (extract32(get_field(s, m6), 2, 1)) { + tcg_gen_add2_i64(l2, h2, l2, h2, l2, h2); + } + + /* Add both intermediate results */ + tcg_gen_add2_i64(l1, h1, l1, h1, l2, h2); + /* Add whole v4 */ + read_vec_element_i64(h2, get_field(s, v4), 0, ES_64); + read_vec_element_i64(l2, get_field(s, v4), 1, ES_64); + tcg_gen_add2_i64(l1, h1, l1, h1, l2, h2); + + /* Store final result into v1. */ + write_vec_element_i64(h1, get_field(s, v1), 0, ES_64); + write_vec_element_i64(l1, get_field(s, v1), 1, ES_64); + + tcg_temp_free_i64(l1); + tcg_temp_free_i64(h1); + tcg_temp_free_i64(l2); + tcg_temp_free_i64(h2); + return DISAS_NEXT; +} + static DisasJumpType op_vnn(DisasContext *s, DisasOps *o) { gen_gvec_fn_3(nand, ES_8, get_field(s, v1), @@ -2443,32 +2501,96 @@ static DisasJumpType op_vfa(DisasContext *s, DisasOps *o) { const uint8_t fpf = get_field(s, m4); const uint8_t m5 = get_field(s, m5); - const bool se = extract32(m5, 3, 1); - gen_helper_gvec_3_ptr *fn; - - if (fpf != FPF_LONG || extract32(m5, 0, 3)) { - gen_program_exception(s, PGM_SPECIFICATION); - return DISAS_NORETURN; - } + gen_helper_gvec_3_ptr *fn = NULL; switch (s->fields.op2) { case 0xe3: - fn = se ? gen_helper_gvec_vfa64s : gen_helper_gvec_vfa64; + switch (fpf) { + case FPF_SHORT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vfa32; + } + break; + case FPF_LONG: + fn = gen_helper_gvec_vfa64; + break; + case FPF_EXT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vfa128; + } + break; + default: + break; + } break; case 0xe5: - fn = se ? gen_helper_gvec_vfd64s : gen_helper_gvec_vfd64; + switch (fpf) { + case FPF_SHORT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vfd32; + } + break; + case FPF_LONG: + fn = gen_helper_gvec_vfd64; + break; + case FPF_EXT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vfd128; + } + break; + default: + break; + } break; case 0xe7: - fn = se ? gen_helper_gvec_vfm64s : gen_helper_gvec_vfm64; + switch (fpf) { + case FPF_SHORT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vfm32; + } + break; + case FPF_LONG: + fn = gen_helper_gvec_vfm64; + break; + case FPF_EXT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vfm128; + } + break; + default: + break; + } break; case 0xe2: - fn = se ? gen_helper_gvec_vfs64s : gen_helper_gvec_vfs64; + switch (fpf) { + case FPF_SHORT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vfs32; + } + break; + case FPF_LONG: + fn = gen_helper_gvec_vfs64; + break; + case FPF_EXT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vfs128; + } + break; + default: + break; + } break; default: g_assert_not_reached(); } + + if (!fn || extract32(m5, 0, 3)) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), - get_field(s, v3), cpu_env, 0, fn); + get_field(s, v3), cpu_env, m5, fn); return DISAS_NEXT; } @@ -2476,19 +2598,41 @@ static DisasJumpType op_wfc(DisasContext *s, DisasOps *o) { const uint8_t fpf = get_field(s, m3); const uint8_t m4 = get_field(s, m4); + gen_helper_gvec_2_ptr *fn = NULL; - if (fpf != FPF_LONG || m4) { + switch (fpf) { + case FPF_SHORT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_wfk32; + if (s->fields.op2 == 0xcb) { + fn = gen_helper_gvec_wfc32; + } + } + break; + case FPF_LONG: + fn = gen_helper_gvec_wfk64; + if (s->fields.op2 == 0xcb) { + fn = gen_helper_gvec_wfc64; + } + break; + case FPF_EXT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_wfk128; + if (s->fields.op2 == 0xcb) { + fn = gen_helper_gvec_wfc128; + } + } + break; + default: + break; + }; + + if (!fn || m4) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - if (s->fields.op2 == 0xcb) { - gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), - cpu_env, 0, gen_helper_gvec_wfc64); - } else { - gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), - cpu_env, 0, gen_helper_gvec_wfk64); - } + gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, 0, fn); set_cc_static(s); return DISAS_NEXT; } @@ -2498,46 +2642,68 @@ static DisasJumpType op_vfc(DisasContext *s, DisasOps *o) const uint8_t fpf = get_field(s, m4); const uint8_t m5 = get_field(s, m5); const uint8_t m6 = get_field(s, m6); - const bool se = extract32(m5, 3, 1); const bool cs = extract32(m6, 0, 1); - gen_helper_gvec_3_ptr *fn; + const bool sq = extract32(m5, 2, 1); + gen_helper_gvec_3_ptr *fn = NULL; - if (fpf != FPF_LONG || extract32(m5, 0, 3) || extract32(m6, 1, 3)) { - gen_program_exception(s, PGM_SPECIFICATION); - return DISAS_NORETURN; - } - - if (cs) { - switch (s->fields.op2) { - case 0xe8: - fn = se ? gen_helper_gvec_vfce64s_cc : gen_helper_gvec_vfce64_cc; + switch (s->fields.op2) { + case 0xe8: + switch (fpf) { + case FPF_SHORT: + fn = cs ? gen_helper_gvec_vfce32_cc : gen_helper_gvec_vfce32; break; - case 0xeb: - fn = se ? gen_helper_gvec_vfch64s_cc : gen_helper_gvec_vfch64_cc; + case FPF_LONG: + fn = cs ? gen_helper_gvec_vfce64_cc : gen_helper_gvec_vfce64; break; - case 0xea: - fn = se ? gen_helper_gvec_vfche64s_cc : gen_helper_gvec_vfche64_cc; + case FPF_EXT: + fn = cs ? gen_helper_gvec_vfce128_cc : gen_helper_gvec_vfce128; break; default: - g_assert_not_reached(); + break; } - } else { - switch (s->fields.op2) { - case 0xe8: - fn = se ? gen_helper_gvec_vfce64s : gen_helper_gvec_vfce64; + break; + case 0xeb: + switch (fpf) { + case FPF_SHORT: + fn = cs ? gen_helper_gvec_vfch32_cc : gen_helper_gvec_vfch32; break; - case 0xeb: - fn = se ? gen_helper_gvec_vfch64s : gen_helper_gvec_vfch64; + case FPF_LONG: + fn = cs ? gen_helper_gvec_vfch64_cc : gen_helper_gvec_vfch64; break; - case 0xea: - fn = se ? gen_helper_gvec_vfche64s : gen_helper_gvec_vfche64; + case FPF_EXT: + fn = cs ? gen_helper_gvec_vfch128_cc : gen_helper_gvec_vfch128; break; default: - g_assert_not_reached(); + break; + } + break; + case 0xea: + switch (fpf) { + case FPF_SHORT: + fn = cs ? gen_helper_gvec_vfche32_cc : gen_helper_gvec_vfche32; + break; + case FPF_LONG: + fn = cs ? gen_helper_gvec_vfche64_cc : gen_helper_gvec_vfche64; + break; + case FPF_EXT: + fn = cs ? gen_helper_gvec_vfche128_cc : gen_helper_gvec_vfche128; + break; + default: + break; } + break; + default: + g_assert_not_reached(); } - gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), - get_field(s, v3), cpu_env, 0, fn); + + if (!fn || extract32(m5, 0, 2) || extract32(m6, 1, 3) || + (!s390_has_feat(S390_FEAT_VECTOR_ENH) && (fpf != FPF_LONG || sq))) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), get_field(s, v3), + cpu_env, m5, fn); if (cs) { set_cc_static(s); } @@ -2549,36 +2715,72 @@ static DisasJumpType op_vcdg(DisasContext *s, DisasOps *o) const uint8_t fpf = get_field(s, m3); const uint8_t m4 = get_field(s, m4); const uint8_t erm = get_field(s, m5); - const bool se = extract32(m4, 3, 1); - gen_helper_gvec_2_ptr *fn; + gen_helper_gvec_2_ptr *fn = NULL; - if (fpf != FPF_LONG || extract32(m4, 0, 2) || erm > 7 || erm == 2) { - gen_program_exception(s, PGM_SPECIFICATION); - return DISAS_NORETURN; - } switch (s->fields.op2) { case 0xc3: - fn = se ? gen_helper_gvec_vcdg64s : gen_helper_gvec_vcdg64; + if (fpf == FPF_LONG) { + fn = gen_helper_gvec_vcdg64; + } break; case 0xc1: - fn = se ? gen_helper_gvec_vcdlg64s : gen_helper_gvec_vcdlg64; + if (fpf == FPF_LONG) { + fn = gen_helper_gvec_vcdlg64; + } break; case 0xc2: - fn = se ? gen_helper_gvec_vcgd64s : gen_helper_gvec_vcgd64; + if (fpf == FPF_LONG) { + fn = gen_helper_gvec_vcgd64; + } break; case 0xc0: - fn = se ? gen_helper_gvec_vclgd64s : gen_helper_gvec_vclgd64; + if (fpf == FPF_LONG) { + fn = gen_helper_gvec_vclgd64; + } break; case 0xc7: - fn = se ? gen_helper_gvec_vfi64s : gen_helper_gvec_vfi64; + switch (fpf) { + case FPF_SHORT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vfi32; + } + break; + case FPF_LONG: + fn = gen_helper_gvec_vfi64; + break; + case FPF_EXT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vfi128; + } + break; + default: + break; + } break; case 0xc5: - fn = se ? gen_helper_gvec_vflr64s : gen_helper_gvec_vflr64; + switch (fpf) { + case FPF_LONG: + fn = gen_helper_gvec_vflr64; + break; + case FPF_EXT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vflr128; + } + break; + default: + break; + } break; default: g_assert_not_reached(); } + + if (!fn || extract32(m4, 0, 2) || erm > 7 || erm == 2) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, deposit32(m4, 4, 4, erm), fn); return DISAS_NEXT; @@ -2588,18 +2790,71 @@ static DisasJumpType op_vfll(DisasContext *s, DisasOps *o) { const uint8_t fpf = get_field(s, m3); const uint8_t m4 = get_field(s, m4); - gen_helper_gvec_2_ptr *fn = gen_helper_gvec_vfll32; + gen_helper_gvec_2_ptr *fn = NULL; - if (fpf != FPF_SHORT || extract32(m4, 0, 3)) { + switch (fpf) { + case FPF_SHORT: + fn = gen_helper_gvec_vfll32; + break; + case FPF_LONG: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vfll64; + } + break; + default: + break; + } + + if (!fn || extract32(m4, 0, 3)) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - if (extract32(m4, 3, 1)) { - fn = gen_helper_gvec_vfll32s; + gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, m4, fn); + return DISAS_NEXT; +} + +static DisasJumpType op_vfmax(DisasContext *s, DisasOps *o) +{ + const uint8_t fpf = get_field(s, m4); + const uint8_t m6 = get_field(s, m6); + const uint8_t m5 = get_field(s, m5); + gen_helper_gvec_3_ptr *fn; + + if (m6 == 5 || m6 == 6 || m6 == 7 || m6 > 13) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; } - gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, - 0, fn); + + switch (fpf) { + case FPF_SHORT: + if (s->fields.op2 == 0xef) { + fn = gen_helper_gvec_vfmax32; + } else { + fn = gen_helper_gvec_vfmin32; + } + break; + case FPF_LONG: + if (s->fields.op2 == 0xef) { + fn = gen_helper_gvec_vfmax64; + } else { + fn = gen_helper_gvec_vfmin64; + } + break; + case FPF_EXT: + if (s->fields.op2 == 0xef) { + fn = gen_helper_gvec_vfmax128; + } else { + fn = gen_helper_gvec_vfmin128; + } + break; + default: + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), get_field(s, v3), + cpu_env, deposit32(m5, 4, 4, m6), fn); return DISAS_NEXT; } @@ -2607,22 +2862,88 @@ static DisasJumpType op_vfma(DisasContext *s, DisasOps *o) { const uint8_t m5 = get_field(s, m5); const uint8_t fpf = get_field(s, m6); - const bool se = extract32(m5, 3, 1); - gen_helper_gvec_4_ptr *fn; + gen_helper_gvec_4_ptr *fn = NULL; - if (fpf != FPF_LONG || extract32(m5, 0, 3)) { + switch (s->fields.op2) { + case 0x8f: + switch (fpf) { + case FPF_SHORT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vfma32; + } + break; + case FPF_LONG: + fn = gen_helper_gvec_vfma64; + break; + case FPF_EXT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vfma128; + } + break; + default: + break; + } + break; + case 0x8e: + switch (fpf) { + case FPF_SHORT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vfms32; + } + break; + case FPF_LONG: + fn = gen_helper_gvec_vfms64; + break; + case FPF_EXT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vfms128; + } + break; + default: + break; + } + break; + case 0x9f: + switch (fpf) { + case FPF_SHORT: + fn = gen_helper_gvec_vfnma32; + break; + case FPF_LONG: + fn = gen_helper_gvec_vfnma64; + break; + case FPF_EXT: + fn = gen_helper_gvec_vfnma128; + break; + default: + break; + } + break; + case 0x9e: + switch (fpf) { + case FPF_SHORT: + fn = gen_helper_gvec_vfnms32; + break; + case FPF_LONG: + fn = gen_helper_gvec_vfnms64; + break; + case FPF_EXT: + fn = gen_helper_gvec_vfnms128; + break; + default: + break; + } + break; + default: + g_assert_not_reached(); + } + + if (!fn || extract32(m5, 0, 3)) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - if (s->fields.op2 == 0x8f) { - fn = se ? gen_helper_gvec_vfma64s : gen_helper_gvec_vfma64; - } else { - fn = se ? gen_helper_gvec_vfms64s : gen_helper_gvec_vfms64; - } gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2), - get_field(s, v3), get_field(s, v4), cpu_env, - 0, fn); + get_field(s, v3), get_field(s, v4), cpu_env, m5, fn); return DISAS_NEXT; } @@ -2633,48 +2954,88 @@ static DisasJumpType op_vfpso(DisasContext *s, DisasOps *o) const uint8_t fpf = get_field(s, m3); const uint8_t m4 = get_field(s, m4); const uint8_t m5 = get_field(s, m5); + const bool se = extract32(m4, 3, 1); TCGv_i64 tmp; - if (fpf != FPF_LONG || extract32(m4, 0, 3) || m5 > 2) { + if ((fpf != FPF_LONG && !s390_has_feat(S390_FEAT_VECTOR_ENH)) || + extract32(m4, 0, 3) || m5 > 2) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - if (extract32(m4, 3, 1)) { - tmp = tcg_temp_new_i64(); - read_vec_element_i64(tmp, v2, 0, ES_64); - switch (m5) { - case 0: - /* sign bit is inverted (complement) */ - tcg_gen_xori_i64(tmp, tmp, 1ull << 63); - break; - case 1: - /* sign bit is set to one (negative) */ - tcg_gen_ori_i64(tmp, tmp, 1ull << 63); - break; - case 2: - /* sign bit is set to zero (positive) */ - tcg_gen_andi_i64(tmp, tmp, (1ull << 63) - 1); - break; + switch (fpf) { + case FPF_SHORT: + if (!se) { + switch (m5) { + case 0: + /* sign bit is inverted (complement) */ + gen_gvec_fn_2i(xori, ES_32, v1, v2, 1ull << 31); + break; + case 1: + /* sign bit is set to one (negative) */ + gen_gvec_fn_2i(ori, ES_32, v1, v2, 1ull << 31); + break; + case 2: + /* sign bit is set to zero (positive) */ + gen_gvec_fn_2i(andi, ES_32, v1, v2, (1ull << 31) - 1); + break; + } + return DISAS_NEXT; } - write_vec_element_i64(tmp, v1, 0, ES_64); - tcg_temp_free_i64(tmp); - } else { - switch (m5) { - case 0: - /* sign bit is inverted (complement) */ - gen_gvec_fn_2i(xori, ES_64, v1, v2, 1ull << 63); - break; - case 1: - /* sign bit is set to one (negative) */ - gen_gvec_fn_2i(ori, ES_64, v1, v2, 1ull << 63); - break; - case 2: - /* sign bit is set to zero (positive) */ - gen_gvec_fn_2i(andi, ES_64, v1, v2, (1ull << 63) - 1); - break; + break; + case FPF_LONG: + if (!se) { + switch (m5) { + case 0: + /* sign bit is inverted (complement) */ + gen_gvec_fn_2i(xori, ES_64, v1, v2, 1ull << 63); + break; + case 1: + /* sign bit is set to one (negative) */ + gen_gvec_fn_2i(ori, ES_64, v1, v2, 1ull << 63); + break; + case 2: + /* sign bit is set to zero (positive) */ + gen_gvec_fn_2i(andi, ES_64, v1, v2, (1ull << 63) - 1); + break; + } + return DISAS_NEXT; } + break; + case FPF_EXT: + /* Only a single element. */ + break; + default: + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + /* With a single element, we are only interested in bit 0. */ + tmp = tcg_temp_new_i64(); + read_vec_element_i64(tmp, v2, 0, ES_64); + switch (m5) { + case 0: + /* sign bit is inverted (complement) */ + tcg_gen_xori_i64(tmp, tmp, 1ull << 63); + break; + case 1: + /* sign bit is set to one (negative) */ + tcg_gen_ori_i64(tmp, tmp, 1ull << 63); + break; + case 2: + /* sign bit is set to zero (positive) */ + tcg_gen_andi_i64(tmp, tmp, (1ull << 63) - 1); + break; } + write_vec_element_i64(tmp, v1, 0, ES_64); + + if (fpf == FPF_EXT) { + read_vec_element_i64(tmp, v2, 1, ES_64); + write_vec_element_i64(tmp, v1, 1, ES_64); + } + + tcg_temp_free_i64(tmp); + return DISAS_NEXT; } @@ -2682,18 +3043,32 @@ static DisasJumpType op_vfsq(DisasContext *s, DisasOps *o) { const uint8_t fpf = get_field(s, m3); const uint8_t m4 = get_field(s, m4); - gen_helper_gvec_2_ptr *fn = gen_helper_gvec_vfsq64; + gen_helper_gvec_2_ptr *fn = NULL; - if (fpf != FPF_LONG || extract32(m4, 0, 3)) { + switch (fpf) { + case FPF_SHORT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vfsq32; + } + break; + case FPF_LONG: + fn = gen_helper_gvec_vfsq64; + break; + case FPF_EXT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vfsq128; + } + break; + default: + break; + } + + if (!fn || extract32(m4, 0, 3)) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - if (extract32(m4, 3, 1)) { - fn = gen_helper_gvec_vfsq64s; - } - gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, - 0, fn); + gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, m4, fn); return DISAS_NEXT; } @@ -2702,17 +3077,33 @@ static DisasJumpType op_vftci(DisasContext *s, DisasOps *o) const uint16_t i3 = get_field(s, i3); const uint8_t fpf = get_field(s, m4); const uint8_t m5 = get_field(s, m5); - gen_helper_gvec_2_ptr *fn = gen_helper_gvec_vftci64; + gen_helper_gvec_2_ptr *fn = NULL; + + switch (fpf) { + case FPF_SHORT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vftci32; + } + break; + case FPF_LONG: + fn = gen_helper_gvec_vftci64; + break; + case FPF_EXT: + if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { + fn = gen_helper_gvec_vftci128; + } + break; + default: + break; + } - if (fpf != FPF_LONG || extract32(m5, 0, 3)) { + if (!fn || extract32(m5, 0, 3)) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - if (extract32(m5, 3, 1)) { - fn = gen_helper_gvec_vftci64s; - } - gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, i3, fn); + gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, + deposit32(m5, 4, 12, i3), fn); set_cc_static(s); return DISAS_NEXT; } diff --git a/target/s390x/vec_fpu_helper.c b/target/s390x/vec_fpu_helper.c index c1564e819b..8e2b274547 100644 --- a/target/s390x/vec_fpu_helper.c +++ b/target/s390x/vec_fpu_helper.c @@ -78,9 +78,41 @@ static void handle_ieee_exc(CPUS390XState *env, uint8_t vxc, uint8_t vec_exc, } } -typedef uint64_t (*vop64_2_fn)(uint64_t a, float_status *s); -static void vop64_2(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, - bool s, bool XxC, uint8_t erm, vop64_2_fn fn, +static float32 s390_vec_read_float32(const S390Vector *v, uint8_t enr) +{ + return make_float32(s390_vec_read_element32(v, enr)); +} + +static float64 s390_vec_read_float64(const S390Vector *v, uint8_t enr) +{ + return make_float64(s390_vec_read_element64(v, enr)); +} + +static float128 s390_vec_read_float128(const S390Vector *v) +{ + return make_float128(s390_vec_read_element64(v, 0), + s390_vec_read_element64(v, 1)); +} + +static void s390_vec_write_float32(S390Vector *v, uint8_t enr, float32 data) +{ + return s390_vec_write_element32(v, enr, data); +} + +static void s390_vec_write_float64(S390Vector *v, uint8_t enr, float64 data) +{ + return s390_vec_write_element64(v, enr, data); +} + +static void s390_vec_write_float128(S390Vector *v, float128 data) +{ + s390_vec_write_element64(v, 0, data.high); + s390_vec_write_element64(v, 1, data.low); +} + +typedef float32 (*vop32_2_fn)(float32 a, float_status *s); +static void vop32_2(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, + bool s, bool XxC, uint8_t erm, vop32_2_fn fn, uintptr_t retaddr) { uint8_t vxc, vec_exc = 0; @@ -88,10 +120,10 @@ static void vop64_2(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, int i, old_mode; old_mode = s390_swap_bfp_rounding_mode(env, erm); - for (i = 0; i < 2; i++) { - const uint64_t a = s390_vec_read_element64(v2, i); + for (i = 0; i < 4; i++) { + const float32 a = s390_vec_read_float32(v2, i); - s390_vec_write_element64(&tmp, i, fn(a, &env->fpu_status)); + s390_vec_write_float32(&tmp, i, fn(a, &env->fpu_status)); vxc = check_ieee_exc(env, i, XxC, &vec_exc); if (s || vxc) { break; @@ -102,317 +134,374 @@ static void vop64_2(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, *v1 = tmp; } -typedef uint64_t (*vop64_3_fn)(uint64_t a, uint64_t b, float_status *s); -static void vop64_3(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, - CPUS390XState *env, bool s, vop64_3_fn fn, +typedef float64 (*vop64_2_fn)(float64 a, float_status *s); +static void vop64_2(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, + bool s, bool XxC, uint8_t erm, vop64_2_fn fn, uintptr_t retaddr) { uint8_t vxc, vec_exc = 0; S390Vector tmp = {}; - int i; + int i, old_mode; + old_mode = s390_swap_bfp_rounding_mode(env, erm); for (i = 0; i < 2; i++) { - const uint64_t a = s390_vec_read_element64(v2, i); - const uint64_t b = s390_vec_read_element64(v3, i); + const float64 a = s390_vec_read_float64(v2, i); - s390_vec_write_element64(&tmp, i, fn(a, b, &env->fpu_status)); - vxc = check_ieee_exc(env, i, false, &vec_exc); + s390_vec_write_float64(&tmp, i, fn(a, &env->fpu_status)); + vxc = check_ieee_exc(env, i, XxC, &vec_exc); if (s || vxc) { break; } } + s390_restore_bfp_rounding_mode(env, old_mode); handle_ieee_exc(env, vxc, vec_exc, retaddr); *v1 = tmp; } -static uint64_t vfa64(uint64_t a, uint64_t b, float_status *s) +typedef float128 (*vop128_2_fn)(float128 a, float_status *s); +static void vop128_2(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, + bool s, bool XxC, uint8_t erm, vop128_2_fn fn, + uintptr_t retaddr) { - return float64_add(a, b, s); + const float128 a = s390_vec_read_float128(v2); + uint8_t vxc, vec_exc = 0; + S390Vector tmp = {}; + int old_mode; + + old_mode = s390_swap_bfp_rounding_mode(env, erm); + s390_vec_write_float128(&tmp, fn(a, &env->fpu_status)); + vxc = check_ieee_exc(env, 0, XxC, &vec_exc); + s390_restore_bfp_rounding_mode(env, old_mode); + handle_ieee_exc(env, vxc, vec_exc, retaddr); + *v1 = tmp; } -void HELPER(gvec_vfa64)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) +static float64 vcdg64(float64 a, float_status *s) { - vop64_3(v1, v2, v3, env, false, vfa64, GETPC()); + return int64_to_float64(a, s); } -void HELPER(gvec_vfa64s)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) +static float64 vcdlg64(float64 a, float_status *s) { - vop64_3(v1, v2, v3, env, true, vfa64, GETPC()); + return uint64_to_float64(a, s); } -static int wfc64(const S390Vector *v1, const S390Vector *v2, - CPUS390XState *env, bool signal, uintptr_t retaddr) +static float64 vcgd64(float64 a, float_status *s) { - /* only the zero-indexed elements are compared */ - const float64 a = s390_vec_read_element64(v1, 0); - const float64 b = s390_vec_read_element64(v2, 0); - uint8_t vxc, vec_exc = 0; - int cmp; + const float64 tmp = float64_to_int64(a, s); - if (signal) { - cmp = float64_compare(a, b, &env->fpu_status); - } else { - cmp = float64_compare_quiet(a, b, &env->fpu_status); - } - vxc = check_ieee_exc(env, 0, false, &vec_exc); - handle_ieee_exc(env, vxc, vec_exc, retaddr); - - return float_comp_to_cc(env, cmp); + return float64_is_any_nan(a) ? INT64_MIN : tmp; } -void HELPER(gvec_wfc64)(const void *v1, const void *v2, CPUS390XState *env, - uint32_t desc) +static float64 vclgd64(float64 a, float_status *s) { - env->cc_op = wfc64(v1, v2, env, false, GETPC()); + const float64 tmp = float64_to_uint64(a, s); + + return float64_is_any_nan(a) ? 0 : tmp; } -void HELPER(gvec_wfk64)(const void *v1, const void *v2, CPUS390XState *env, - uint32_t desc) -{ - env->cc_op = wfc64(v1, v2, env, true, GETPC()); +#define DEF_GVEC_VOP2_FN(NAME, FN, BITS) \ +void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, CPUS390XState *env, \ + uint32_t desc) \ +{ \ + const uint8_t erm = extract32(simd_data(desc), 4, 4); \ + const bool se = extract32(simd_data(desc), 3, 1); \ + const bool XxC = extract32(simd_data(desc), 2, 1); \ + \ + vop##BITS##_2(v1, v2, env, se, XxC, erm, FN, GETPC()); \ } -typedef bool (*vfc64_fn)(float64 a, float64 b, float_status *status); -static int vfc64(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, - CPUS390XState *env, bool s, vfc64_fn fn, uintptr_t retaddr) +#define DEF_GVEC_VOP2_64(NAME) \ +DEF_GVEC_VOP2_FN(NAME, NAME##64, 64) + +#define DEF_GVEC_VOP2(NAME, OP) \ +DEF_GVEC_VOP2_FN(NAME, float32_##OP, 32) \ +DEF_GVEC_VOP2_FN(NAME, float64_##OP, 64) \ +DEF_GVEC_VOP2_FN(NAME, float128_##OP, 128) + +DEF_GVEC_VOP2_64(vcdg) +DEF_GVEC_VOP2_64(vcdlg) +DEF_GVEC_VOP2_64(vcgd) +DEF_GVEC_VOP2_64(vclgd) +DEF_GVEC_VOP2(vfi, round_to_int) +DEF_GVEC_VOP2(vfsq, sqrt) + +typedef float32 (*vop32_3_fn)(float32 a, float32 b, float_status *s); +static void vop32_3(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, + CPUS390XState *env, bool s, vop32_3_fn fn, + uintptr_t retaddr) { uint8_t vxc, vec_exc = 0; S390Vector tmp = {}; - int match = 0; int i; - for (i = 0; i < 2; i++) { - const float64 a = s390_vec_read_element64(v2, i); - const float64 b = s390_vec_read_element64(v3, i); + for (i = 0; i < 4; i++) { + const float32 a = s390_vec_read_float32(v2, i); + const float32 b = s390_vec_read_float32(v3, i); - /* swap the order of the parameters, so we can use existing functions */ - if (fn(b, a, &env->fpu_status)) { - match++; - s390_vec_write_element64(&tmp, i, -1ull); - } + s390_vec_write_float32(&tmp, i, fn(a, b, &env->fpu_status)); vxc = check_ieee_exc(env, i, false, &vec_exc); if (s || vxc) { break; } } - handle_ieee_exc(env, vxc, vec_exc, retaddr); *v1 = tmp; - if (match) { - return s || match == 2 ? 0 : 1; - } - return 3; -} - -void HELPER(gvec_vfce64)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) -{ - vfc64(v1, v2, v3, env, false, float64_eq_quiet, GETPC()); } -void HELPER(gvec_vfce64s)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) -{ - vfc64(v1, v2, v3, env, true, float64_eq_quiet, GETPC()); -} - -void HELPER(gvec_vfce64_cc)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) -{ - env->cc_op = vfc64(v1, v2, v3, env, false, float64_eq_quiet, GETPC()); -} - -void HELPER(gvec_vfce64s_cc)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) -{ - env->cc_op = vfc64(v1, v2, v3, env, true, float64_eq_quiet, GETPC()); -} - -void HELPER(gvec_vfch64)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) -{ - vfc64(v1, v2, v3, env, false, float64_lt_quiet, GETPC()); -} - -void HELPER(gvec_vfch64s)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) +typedef float64 (*vop64_3_fn)(float64 a, float64 b, float_status *s); +static void vop64_3(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, + CPUS390XState *env, bool s, vop64_3_fn fn, + uintptr_t retaddr) { - vfc64(v1, v2, v3, env, true, float64_lt_quiet, GETPC()); -} + uint8_t vxc, vec_exc = 0; + S390Vector tmp = {}; + int i; -void HELPER(gvec_vfch64_cc)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) -{ - env->cc_op = vfc64(v1, v2, v3, env, false, float64_lt_quiet, GETPC()); -} + for (i = 0; i < 2; i++) { + const float64 a = s390_vec_read_float64(v2, i); + const float64 b = s390_vec_read_float64(v3, i); -void HELPER(gvec_vfch64s_cc)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) -{ - env->cc_op = vfc64(v1, v2, v3, env, true, float64_lt_quiet, GETPC()); + s390_vec_write_float64(&tmp, i, fn(a, b, &env->fpu_status)); + vxc = check_ieee_exc(env, i, false, &vec_exc); + if (s || vxc) { + break; + } + } + handle_ieee_exc(env, vxc, vec_exc, retaddr); + *v1 = tmp; } -void HELPER(gvec_vfche64)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) +typedef float128 (*vop128_3_fn)(float128 a, float128 b, float_status *s); +static void vop128_3(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, + CPUS390XState *env, bool s, vop128_3_fn fn, + uintptr_t retaddr) { - vfc64(v1, v2, v3, env, false, float64_le_quiet, GETPC()); -} + const float128 a = s390_vec_read_float128(v2); + const float128 b = s390_vec_read_float128(v3); + uint8_t vxc, vec_exc = 0; + S390Vector tmp = {}; -void HELPER(gvec_vfche64s)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) -{ - vfc64(v1, v2, v3, env, true, float64_le_quiet, GETPC()); + s390_vec_write_float128(&tmp, fn(a, b, &env->fpu_status)); + vxc = check_ieee_exc(env, 0, false, &vec_exc); + handle_ieee_exc(env, vxc, vec_exc, retaddr); + *v1 = tmp; } -void HELPER(gvec_vfche64_cc)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) -{ - env->cc_op = vfc64(v1, v2, v3, env, false, float64_le_quiet, GETPC()); +#define DEF_GVEC_VOP3_B(NAME, OP, BITS) \ +void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, const void *v3, \ + CPUS390XState *env, uint32_t desc) \ +{ \ + const bool se = extract32(simd_data(desc), 3, 1); \ + \ + vop##BITS##_3(v1, v2, v3, env, se, float##BITS##_##OP, GETPC()); \ } -void HELPER(gvec_vfche64s_cc)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) -{ - env->cc_op = vfc64(v1, v2, v3, env, true, float64_le_quiet, GETPC()); -} +#define DEF_GVEC_VOP3(NAME, OP) \ +DEF_GVEC_VOP3_B(NAME, OP, 32) \ +DEF_GVEC_VOP3_B(NAME, OP, 64) \ +DEF_GVEC_VOP3_B(NAME, OP, 128) -static uint64_t vcdg64(uint64_t a, float_status *s) -{ - return int64_to_float64(a, s); -} +DEF_GVEC_VOP3(vfa, add) +DEF_GVEC_VOP3(vfs, sub) +DEF_GVEC_VOP3(vfd, div) +DEF_GVEC_VOP3(vfm, mul) -void HELPER(gvec_vcdg64)(void *v1, const void *v2, CPUS390XState *env, - uint32_t desc) +static int wfc32(const S390Vector *v1, const S390Vector *v2, + CPUS390XState *env, bool signal, uintptr_t retaddr) { - const uint8_t erm = extract32(simd_data(desc), 4, 4); - const bool XxC = extract32(simd_data(desc), 2, 1); - - vop64_2(v1, v2, env, false, XxC, erm, vcdg64, GETPC()); -} + /* only the zero-indexed elements are compared */ + const float32 a = s390_vec_read_float32(v1, 0); + const float32 b = s390_vec_read_float32(v2, 0); + uint8_t vxc, vec_exc = 0; + int cmp; -void HELPER(gvec_vcdg64s)(void *v1, const void *v2, CPUS390XState *env, - uint32_t desc) -{ - const uint8_t erm = extract32(simd_data(desc), 4, 4); - const bool XxC = extract32(simd_data(desc), 2, 1); + if (signal) { + cmp = float32_compare(a, b, &env->fpu_status); + } else { + cmp = float32_compare_quiet(a, b, &env->fpu_status); + } + vxc = check_ieee_exc(env, 0, false, &vec_exc); + handle_ieee_exc(env, vxc, vec_exc, retaddr); - vop64_2(v1, v2, env, true, XxC, erm, vcdg64, GETPC()); + return float_comp_to_cc(env, cmp); } -static uint64_t vcdlg64(uint64_t a, float_status *s) +static int wfc64(const S390Vector *v1, const S390Vector *v2, + CPUS390XState *env, bool signal, uintptr_t retaddr) { - return uint64_to_float64(a, s); -} + /* only the zero-indexed elements are compared */ + const float64 a = s390_vec_read_float64(v1, 0); + const float64 b = s390_vec_read_float64(v2, 0); + uint8_t vxc, vec_exc = 0; + int cmp; -void HELPER(gvec_vcdlg64)(void *v1, const void *v2, CPUS390XState *env, - uint32_t desc) -{ - const uint8_t erm = extract32(simd_data(desc), 4, 4); - const bool XxC = extract32(simd_data(desc), 2, 1); + if (signal) { + cmp = float64_compare(a, b, &env->fpu_status); + } else { + cmp = float64_compare_quiet(a, b, &env->fpu_status); + } + vxc = check_ieee_exc(env, 0, false, &vec_exc); + handle_ieee_exc(env, vxc, vec_exc, retaddr); - vop64_2(v1, v2, env, false, XxC, erm, vcdlg64, GETPC()); + return float_comp_to_cc(env, cmp); } -void HELPER(gvec_vcdlg64s)(void *v1, const void *v2, CPUS390XState *env, - uint32_t desc) +static int wfc128(const S390Vector *v1, const S390Vector *v2, + CPUS390XState *env, bool signal, uintptr_t retaddr) { - const uint8_t erm = extract32(simd_data(desc), 4, 4); - const bool XxC = extract32(simd_data(desc), 2, 1); + /* only the zero-indexed elements are compared */ + const float128 a = s390_vec_read_float128(v1); + const float128 b = s390_vec_read_float128(v2); + uint8_t vxc, vec_exc = 0; + int cmp; - vop64_2(v1, v2, env, true, XxC, erm, vcdlg64, GETPC()); -} + if (signal) { + cmp = float128_compare(a, b, &env->fpu_status); + } else { + cmp = float128_compare_quiet(a, b, &env->fpu_status); + } + vxc = check_ieee_exc(env, 0, false, &vec_exc); + handle_ieee_exc(env, vxc, vec_exc, retaddr); -static uint64_t vcgd64(uint64_t a, float_status *s) -{ - return float64_to_int64(a, s); + return float_comp_to_cc(env, cmp); } -void HELPER(gvec_vcgd64)(void *v1, const void *v2, CPUS390XState *env, - uint32_t desc) -{ - const uint8_t erm = extract32(simd_data(desc), 4, 4); - const bool XxC = extract32(simd_data(desc), 2, 1); - - vop64_2(v1, v2, env, false, XxC, erm, vcgd64, GETPC()); +#define DEF_GVEC_WFC_B(NAME, SIGNAL, BITS) \ +void HELPER(gvec_##NAME##BITS)(const void *v1, const void *v2, \ + CPUS390XState *env, uint32_t desc) \ +{ \ + env->cc_op = wfc##BITS(v1, v2, env, SIGNAL, GETPC()); \ } -void HELPER(gvec_vcgd64s)(void *v1, const void *v2, CPUS390XState *env, - uint32_t desc) -{ - const uint8_t erm = extract32(simd_data(desc), 4, 4); - const bool XxC = extract32(simd_data(desc), 2, 1); - - vop64_2(v1, v2, env, true, XxC, erm, vcgd64, GETPC()); -} +#define DEF_GVEC_WFC(NAME, SIGNAL) \ + DEF_GVEC_WFC_B(NAME, SIGNAL, 32) \ + DEF_GVEC_WFC_B(NAME, SIGNAL, 64) \ + DEF_GVEC_WFC_B(NAME, SIGNAL, 128) -static uint64_t vclgd64(uint64_t a, float_status *s) -{ - return float64_to_uint64(a, s); -} +DEF_GVEC_WFC(wfc, false) +DEF_GVEC_WFC(wfk, true) -void HELPER(gvec_vclgd64)(void *v1, const void *v2, CPUS390XState *env, - uint32_t desc) +typedef bool (*vfc32_fn)(float32 a, float32 b, float_status *status); +static int vfc32(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, + CPUS390XState *env, bool s, vfc32_fn fn, uintptr_t retaddr) { - const uint8_t erm = extract32(simd_data(desc), 4, 4); - const bool XxC = extract32(simd_data(desc), 2, 1); + uint8_t vxc, vec_exc = 0; + S390Vector tmp = {}; + int match = 0; + int i; - vop64_2(v1, v2, env, false, XxC, erm, vclgd64, GETPC()); -} + for (i = 0; i < 4; i++) { + const float32 a = s390_vec_read_float32(v2, i); + const float32 b = s390_vec_read_float32(v3, i); -void HELPER(gvec_vclgd64s)(void *v1, const void *v2, CPUS390XState *env, - uint32_t desc) -{ - const uint8_t erm = extract32(simd_data(desc), 4, 4); - const bool XxC = extract32(simd_data(desc), 2, 1); + /* swap the order of the parameters, so we can use existing functions */ + if (fn(b, a, &env->fpu_status)) { + match++; + s390_vec_write_element32(&tmp, i, -1u); + } + vxc = check_ieee_exc(env, i, false, &vec_exc); + if (s || vxc) { + break; + } + } - vop64_2(v1, v2, env, true, XxC, erm, vclgd64, GETPC()); + handle_ieee_exc(env, vxc, vec_exc, retaddr); + *v1 = tmp; + if (match) { + return s || match == 4 ? 0 : 1; + } + return 3; } -static uint64_t vfd64(uint64_t a, uint64_t b, float_status *s) +typedef bool (*vfc64_fn)(float64 a, float64 b, float_status *status); +static int vfc64(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, + CPUS390XState *env, bool s, vfc64_fn fn, uintptr_t retaddr) { - return float64_div(a, b, s); -} + uint8_t vxc, vec_exc = 0; + S390Vector tmp = {}; + int match = 0; + int i; -void HELPER(gvec_vfd64)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) -{ - vop64_3(v1, v2, v3, env, false, vfd64, GETPC()); -} + for (i = 0; i < 2; i++) { + const float64 a = s390_vec_read_float64(v2, i); + const float64 b = s390_vec_read_float64(v3, i); -void HELPER(gvec_vfd64s)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) -{ - vop64_3(v1, v2, v3, env, true, vfd64, GETPC()); -} + /* swap the order of the parameters, so we can use existing functions */ + if (fn(b, a, &env->fpu_status)) { + match++; + s390_vec_write_element64(&tmp, i, -1ull); + } + vxc = check_ieee_exc(env, i, false, &vec_exc); + if (s || vxc) { + break; + } + } -static uint64_t vfi64(uint64_t a, float_status *s) -{ - return float64_round_to_int(a, s); + handle_ieee_exc(env, vxc, vec_exc, retaddr); + *v1 = tmp; + if (match) { + return s || match == 2 ? 0 : 1; + } + return 3; } -void HELPER(gvec_vfi64)(void *v1, const void *v2, CPUS390XState *env, - uint32_t desc) +typedef bool (*vfc128_fn)(float128 a, float128 b, float_status *status); +static int vfc128(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, + CPUS390XState *env, bool s, vfc128_fn fn, uintptr_t retaddr) { - const uint8_t erm = extract32(simd_data(desc), 4, 4); - const bool XxC = extract32(simd_data(desc), 2, 1); + const float128 a = s390_vec_read_float128(v2); + const float128 b = s390_vec_read_float128(v3); + uint8_t vxc, vec_exc = 0; + S390Vector tmp = {}; + bool match = false; - vop64_2(v1, v2, env, false, XxC, erm, vfi64, GETPC()); -} + /* swap the order of the parameters, so we can use existing functions */ + if (fn(b, a, &env->fpu_status)) { + match = true; + s390_vec_write_element64(&tmp, 0, -1ull); + s390_vec_write_element64(&tmp, 1, -1ull); + } + vxc = check_ieee_exc(env, 0, false, &vec_exc); + handle_ieee_exc(env, vxc, vec_exc, retaddr); + *v1 = tmp; + return match ? 0 : 3; +} + +#define DEF_GVEC_VFC_B(NAME, OP, BITS) \ +void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, const void *v3, \ + CPUS390XState *env, uint32_t desc) \ +{ \ + const bool se = extract32(simd_data(desc), 3, 1); \ + const bool sq = extract32(simd_data(desc), 2, 1); \ + vfc##BITS##_fn fn = sq ? float##BITS##_##OP : float##BITS##_##OP##_quiet; \ + \ + vfc##BITS(v1, v2, v3, env, se, fn, GETPC()); \ +} \ + \ +void HELPER(gvec_##NAME##BITS##_cc)(void *v1, const void *v2, const void *v3, \ + CPUS390XState *env, uint32_t desc) \ +{ \ + const bool se = extract32(simd_data(desc), 3, 1); \ + const bool sq = extract32(simd_data(desc), 2, 1); \ + vfc##BITS##_fn fn = sq ? float##BITS##_##OP : float##BITS##_##OP##_quiet; \ + \ + env->cc_op = vfc##BITS(v1, v2, v3, env, se, fn, GETPC()); \ +} + +#define DEF_GVEC_VFC(NAME, OP) \ +DEF_GVEC_VFC_B(NAME, OP, 32) \ +DEF_GVEC_VFC_B(NAME, OP, 64) \ +DEF_GVEC_VFC_B(NAME, OP, 128) \ + +DEF_GVEC_VFC(vfce, eq) +DEF_GVEC_VFC(vfch, lt) +DEF_GVEC_VFC(vfche, le) -void HELPER(gvec_vfi64s)(void *v1, const void *v2, CPUS390XState *env, +void HELPER(gvec_vfll32)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { - const uint8_t erm = extract32(simd_data(desc), 4, 4); - const bool XxC = extract32(simd_data(desc), 2, 1); - - vop64_2(v1, v2, env, true, XxC, erm, vfi64, GETPC()); -} - -static void vfll32(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, - bool s, uintptr_t retaddr) -{ + const bool s = extract32(simd_data(desc), 3, 1); uint8_t vxc, vec_exc = 0; S390Vector tmp = {}; int i; @@ -429,25 +518,29 @@ static void vfll32(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, break; } } - handle_ieee_exc(env, vxc, vec_exc, retaddr); - *v1 = tmp; + handle_ieee_exc(env, vxc, vec_exc, GETPC()); + *(S390Vector *)v1 = tmp; } -void HELPER(gvec_vfll32)(void *v1, const void *v2, CPUS390XState *env, +void HELPER(gvec_vfll64)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { - vfll32(v1, v2, env, false, GETPC()); -} + /* load from even element */ + const float128 ret = float64_to_float128(s390_vec_read_float64(v2, 0), + &env->fpu_status); + uint8_t vxc, vec_exc = 0; -void HELPER(gvec_vfll32s)(void *v1, const void *v2, CPUS390XState *env, - uint32_t desc) -{ - vfll32(v1, v2, env, true, GETPC()); + vxc = check_ieee_exc(env, 0, false, &vec_exc); + handle_ieee_exc(env, vxc, vec_exc, GETPC()); + s390_vec_write_float128(v1, ret); } -static void vflr64(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, - bool s, bool XxC, uint8_t erm, uintptr_t retaddr) +void HELPER(gvec_vflr64)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) { + const uint8_t erm = extract32(simd_data(desc), 4, 4); + const bool s = extract32(simd_data(desc), 3, 1); + const bool XxC = extract32(simd_data(desc), 2, 1); uint8_t vxc, vec_exc = 0; S390Vector tmp = {}; int i, old_mode; @@ -466,43 +559,51 @@ static void vflr64(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, } } s390_restore_bfp_rounding_mode(env, old_mode); - handle_ieee_exc(env, vxc, vec_exc, retaddr); - *v1 = tmp; -} - -void HELPER(gvec_vflr64)(void *v1, const void *v2, CPUS390XState *env, - uint32_t desc) -{ - const uint8_t erm = extract32(simd_data(desc), 4, 4); - const bool XxC = extract32(simd_data(desc), 2, 1); - - vflr64(v1, v2, env, false, XxC, erm, GETPC()); + handle_ieee_exc(env, vxc, vec_exc, GETPC()); + *(S390Vector *)v1 = tmp; } -void HELPER(gvec_vflr64s)(void *v1, const void *v2, CPUS390XState *env, +void HELPER(gvec_vflr128)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { const uint8_t erm = extract32(simd_data(desc), 4, 4); const bool XxC = extract32(simd_data(desc), 2, 1); + uint8_t vxc, vec_exc = 0; + int old_mode; + float64 ret; - vflr64(v1, v2, env, true, XxC, erm, GETPC()); -} + old_mode = s390_swap_bfp_rounding_mode(env, erm); + ret = float128_to_float64(s390_vec_read_float128(v2), &env->fpu_status); + vxc = check_ieee_exc(env, 0, XxC, &vec_exc); + s390_restore_bfp_rounding_mode(env, old_mode); + handle_ieee_exc(env, vxc, vec_exc, GETPC()); -static uint64_t vfm64(uint64_t a, uint64_t b, float_status *s) -{ - return float64_mul(a, b, s); + /* place at even element, odd element is unpredictable */ + s390_vec_write_float64(v1, 0, ret); } -void HELPER(gvec_vfm64)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) +static void vfma32(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, + const S390Vector *v4, CPUS390XState *env, bool s, int flags, + uintptr_t retaddr) { - vop64_3(v1, v2, v3, env, false, vfm64, GETPC()); -} + uint8_t vxc, vec_exc = 0; + S390Vector tmp = {}; + int i; -void HELPER(gvec_vfm64s)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) -{ - vop64_3(v1, v2, v3, env, true, vfm64, GETPC()); + for (i = 0; i < 4; i++) { + const float32 a = s390_vec_read_float32(v2, i); + const float32 b = s390_vec_read_float32(v3, i); + const float32 c = s390_vec_read_float32(v4, i); + float32 ret = float32_muladd(a, b, c, flags, &env->fpu_status); + + s390_vec_write_float32(&tmp, i, ret); + vxc = check_ieee_exc(env, i, false, &vec_exc); + if (s || vxc) { + break; + } + } + handle_ieee_exc(env, vxc, vec_exc, retaddr); + *v1 = tmp; } static void vfma64(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, @@ -514,12 +615,12 @@ static void vfma64(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, int i; for (i = 0; i < 2; i++) { - const uint64_t a = s390_vec_read_element64(v2, i); - const uint64_t b = s390_vec_read_element64(v3, i); - const uint64_t c = s390_vec_read_element64(v4, i); - uint64_t ret = float64_muladd(a, b, c, flags, &env->fpu_status); + const float64 a = s390_vec_read_float64(v2, i); + const float64 b = s390_vec_read_float64(v3, i); + const float64 c = s390_vec_read_float64(v4, i); + const float64 ret = float64_muladd(a, b, c, flags, &env->fpu_status); - s390_vec_write_element64(&tmp, i, ret); + s390_vec_write_float64(&tmp, i, ret); vxc = check_ieee_exc(env, i, false, &vec_exc); if (s || vxc) { break; @@ -529,71 +630,81 @@ static void vfma64(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, *v1 = tmp; } -void HELPER(gvec_vfma64)(void *v1, const void *v2, const void *v3, - const void *v4, CPUS390XState *env, uint32_t desc) +static void vfma128(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, + const S390Vector *v4, CPUS390XState *env, bool s, int flags, + uintptr_t retaddr) { - vfma64(v1, v2, v3, v4, env, false, 0, GETPC()); -} + const float128 a = s390_vec_read_float128(v2); + const float128 b = s390_vec_read_float128(v3); + const float128 c = s390_vec_read_float128(v4); + uint8_t vxc, vec_exc = 0; + float128 ret; -void HELPER(gvec_vfma64s)(void *v1, const void *v2, const void *v3, - const void *v4, CPUS390XState *env, uint32_t desc) -{ - vfma64(v1, v2, v3, v4, env, true, 0, GETPC()); + ret = float128_muladd(a, b, c, flags, &env->fpu_status); + vxc = check_ieee_exc(env, 0, false, &vec_exc); + handle_ieee_exc(env, vxc, vec_exc, retaddr); + s390_vec_write_float128(v1, ret); } -void HELPER(gvec_vfms64)(void *v1, const void *v2, const void *v3, - const void *v4, CPUS390XState *env, uint32_t desc) -{ - vfma64(v1, v2, v3, v4, env, false, float_muladd_negate_c, GETPC()); +#define DEF_GVEC_VFMA_B(NAME, FLAGS, BITS) \ +void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, const void *v3, \ + const void *v4, CPUS390XState *env, \ + uint32_t desc) \ +{ \ + const bool se = extract32(simd_data(desc), 3, 1); \ + \ + vfma##BITS(v1, v2, v3, v4, env, se, FLAGS, GETPC()); \ } -void HELPER(gvec_vfms64s)(void *v1, const void *v2, const void *v3, - const void *v4, CPUS390XState *env, uint32_t desc) -{ - vfma64(v1, v2, v3, v4, env, true, float_muladd_negate_c, GETPC()); -} +#define DEF_GVEC_VFMA(NAME, FLAGS) \ + DEF_GVEC_VFMA_B(NAME, FLAGS, 32) \ + DEF_GVEC_VFMA_B(NAME, FLAGS, 64) \ + DEF_GVEC_VFMA_B(NAME, FLAGS, 128) -static uint64_t vfsq64(uint64_t a, float_status *s) -{ - return float64_sqrt(a, s); -} +DEF_GVEC_VFMA(vfma, 0) +DEF_GVEC_VFMA(vfms, float_muladd_negate_c) +DEF_GVEC_VFMA(vfnma, float_muladd_negate_result) +DEF_GVEC_VFMA(vfnms, float_muladd_negate_c | float_muladd_negate_result) -void HELPER(gvec_vfsq64)(void *v1, const void *v2, CPUS390XState *env, - uint32_t desc) -{ - vop64_2(v1, v2, env, false, false, 0, vfsq64, GETPC()); -} - -void HELPER(gvec_vfsq64s)(void *v1, const void *v2, CPUS390XState *env, +void HELPER(gvec_vftci32)(void *v1, const void *v2, CPUS390XState *env, uint32_t desc) { - vop64_2(v1, v2, env, true, false, 0, vfsq64, GETPC()); -} + uint16_t i3 = extract32(simd_data(desc), 4, 12); + bool s = extract32(simd_data(desc), 3, 1); + int i, match = 0; -static uint64_t vfs64(uint64_t a, uint64_t b, float_status *s) -{ - return float64_sub(a, b, s); -} + for (i = 0; i < 4; i++) { + float32 a = s390_vec_read_float32(v2, i); -void HELPER(gvec_vfs64)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) -{ - vop64_3(v1, v2, v3, env, false, vfs64, GETPC()); -} + if (float32_dcmask(env, a) & i3) { + match++; + s390_vec_write_element32(v1, i, -1u); + } else { + s390_vec_write_element32(v1, i, 0); + } + if (s) { + break; + } + } -void HELPER(gvec_vfs64s)(void *v1, const void *v2, const void *v3, - CPUS390XState *env, uint32_t desc) -{ - vop64_3(v1, v2, v3, env, true, vfs64, GETPC()); + if (match == 4 || (s && match)) { + env->cc_op = 0; + } else if (match) { + env->cc_op = 1; + } else { + env->cc_op = 3; + } } -static int vftci64(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, - bool s, uint16_t i3) +void HELPER(gvec_vftci64)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) { + const uint16_t i3 = extract32(simd_data(desc), 4, 12); + const bool s = extract32(simd_data(desc), 3, 1); int i, match = 0; for (i = 0; i < 2; i++) { - float64 a = s390_vec_read_element64(v2, i); + const float64 a = s390_vec_read_float64(v2, i); if (float64_dcmask(env, a) & i3) { match++; @@ -606,20 +717,356 @@ static int vftci64(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, } } - if (match) { - return s || match == 2 ? 0 : 1; + if (match == 2 || (s && match)) { + env->cc_op = 0; + } else if (match) { + env->cc_op = 1; + } else { + env->cc_op = 3; } - return 3; } -void HELPER(gvec_vftci64)(void *v1, const void *v2, CPUS390XState *env, - uint32_t desc) +void HELPER(gvec_vftci128)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) { - env->cc_op = vftci64(v1, v2, env, false, simd_data(desc)); + const float128 a = s390_vec_read_float128(v2); + uint16_t i3 = extract32(simd_data(desc), 4, 12); + + if (float128_dcmask(env, a) & i3) { + env->cc_op = 0; + s390_vec_write_element64(v1, 0, -1ull); + s390_vec_write_element64(v1, 1, -1ull); + } else { + env->cc_op = 3; + s390_vec_write_element64(v1, 0, 0); + s390_vec_write_element64(v1, 1, 0); + } } -void HELPER(gvec_vftci64s)(void *v1, const void *v2, CPUS390XState *env, - uint32_t desc) +typedef enum S390MinMaxType { + S390_MINMAX_TYPE_IEEE = 0, + S390_MINMAX_TYPE_JAVA, + S390_MINMAX_TYPE_C_MACRO, + S390_MINMAX_TYPE_CPP, + S390_MINMAX_TYPE_F, +} S390MinMaxType; + +typedef enum S390MinMaxRes { + S390_MINMAX_RES_MINMAX = 0, + S390_MINMAX_RES_A, + S390_MINMAX_RES_B, + S390_MINMAX_RES_SILENCE_A, + S390_MINMAX_RES_SILENCE_B, +} S390MinMaxRes; + +static S390MinMaxRes vfmin_res(uint16_t dcmask_a, uint16_t dcmask_b, + S390MinMaxType type, float_status *s) +{ + const bool neg_a = dcmask_a & DCMASK_NEGATIVE; + const bool nan_a = dcmask_a & DCMASK_NAN; + const bool nan_b = dcmask_b & DCMASK_NAN; + + g_assert(type > S390_MINMAX_TYPE_IEEE && type <= S390_MINMAX_TYPE_F); + + if (unlikely((dcmask_a | dcmask_b) & DCMASK_NAN)) { + const bool sig_a = dcmask_a & DCMASK_SIGNALING_NAN; + const bool sig_b = dcmask_b & DCMASK_SIGNALING_NAN; + + if ((dcmask_a | dcmask_b) & DCMASK_SIGNALING_NAN) { + s->float_exception_flags |= float_flag_invalid; + } + switch (type) { + case S390_MINMAX_TYPE_JAVA: + if (sig_a) { + return S390_MINMAX_RES_SILENCE_A; + } else if (sig_b) { + return S390_MINMAX_RES_SILENCE_B; + } + return nan_a ? S390_MINMAX_RES_A : S390_MINMAX_RES_B; + case S390_MINMAX_TYPE_F: + return nan_b ? S390_MINMAX_RES_A : S390_MINMAX_RES_B; + case S390_MINMAX_TYPE_C_MACRO: + s->float_exception_flags |= float_flag_invalid; + return S390_MINMAX_RES_B; + case S390_MINMAX_TYPE_CPP: + s->float_exception_flags |= float_flag_invalid; + return S390_MINMAX_RES_A; + default: + g_assert_not_reached(); + } + } else if (unlikely(dcmask_a & dcmask_b & DCMASK_ZERO)) { + switch (type) { + case S390_MINMAX_TYPE_JAVA: + return neg_a ? S390_MINMAX_RES_A : S390_MINMAX_RES_B; + case S390_MINMAX_TYPE_C_MACRO: + return S390_MINMAX_RES_B; + case S390_MINMAX_TYPE_F: + return !neg_a ? S390_MINMAX_RES_B : S390_MINMAX_RES_A; + case S390_MINMAX_TYPE_CPP: + return S390_MINMAX_RES_A; + default: + g_assert_not_reached(); + } + } + return S390_MINMAX_RES_MINMAX; +} + +static S390MinMaxRes vfmax_res(uint16_t dcmask_a, uint16_t dcmask_b, + S390MinMaxType type, float_status *s) +{ + g_assert(type > S390_MINMAX_TYPE_IEEE && type <= S390_MINMAX_TYPE_F); + + if (unlikely((dcmask_a | dcmask_b) & DCMASK_NAN)) { + const bool sig_a = dcmask_a & DCMASK_SIGNALING_NAN; + const bool sig_b = dcmask_b & DCMASK_SIGNALING_NAN; + const bool nan_a = dcmask_a & DCMASK_NAN; + const bool nan_b = dcmask_b & DCMASK_NAN; + + if ((dcmask_a | dcmask_b) & DCMASK_SIGNALING_NAN) { + s->float_exception_flags |= float_flag_invalid; + } + switch (type) { + case S390_MINMAX_TYPE_JAVA: + if (sig_a) { + return S390_MINMAX_RES_SILENCE_A; + } else if (sig_b) { + return S390_MINMAX_RES_SILENCE_B; + } + return nan_a ? S390_MINMAX_RES_A : S390_MINMAX_RES_B; + case S390_MINMAX_TYPE_F: + return nan_b ? S390_MINMAX_RES_A : S390_MINMAX_RES_B; + case S390_MINMAX_TYPE_C_MACRO: + s->float_exception_flags |= float_flag_invalid; + return S390_MINMAX_RES_B; + case S390_MINMAX_TYPE_CPP: + s->float_exception_flags |= float_flag_invalid; + return S390_MINMAX_RES_A; + default: + g_assert_not_reached(); + } + } else if (unlikely(dcmask_a & dcmask_b & DCMASK_ZERO)) { + const bool neg_a = dcmask_a & DCMASK_NEGATIVE; + + switch (type) { + case S390_MINMAX_TYPE_JAVA: + case S390_MINMAX_TYPE_F: + return neg_a ? S390_MINMAX_RES_B : S390_MINMAX_RES_A; + case S390_MINMAX_TYPE_C_MACRO: + return S390_MINMAX_RES_B; + case S390_MINMAX_TYPE_CPP: + return S390_MINMAX_RES_A; + default: + g_assert_not_reached(); + } + } + return S390_MINMAX_RES_MINMAX; +} + +static S390MinMaxRes vfminmax_res(uint16_t dcmask_a, uint16_t dcmask_b, + S390MinMaxType type, bool is_min, + float_status *s) +{ + return is_min ? vfmin_res(dcmask_a, dcmask_b, type, s) : + vfmax_res(dcmask_a, dcmask_b, type, s); +} + +static void vfminmax32(S390Vector *v1, const S390Vector *v2, + const S390Vector *v3, CPUS390XState *env, + S390MinMaxType type, bool is_min, bool is_abs, bool se, + uintptr_t retaddr) +{ + float_status *s = &env->fpu_status; + uint8_t vxc, vec_exc = 0; + S390Vector tmp = {}; + int i; + + for (i = 0; i < 4; i++) { + float32 a = s390_vec_read_float32(v2, i); + float32 b = s390_vec_read_float32(v3, i); + float32 result; + + if (type != S390_MINMAX_TYPE_IEEE) { + S390MinMaxRes res; + + if (is_abs) { + a = float32_abs(a); + b = float32_abs(b); + } + + res = vfminmax_res(float32_dcmask(env, a), float32_dcmask(env, b), + type, is_min, s); + switch (res) { + case S390_MINMAX_RES_MINMAX: + result = is_min ? float32_min(a, b, s) : float32_max(a, b, s); + break; + case S390_MINMAX_RES_A: + result = a; + break; + case S390_MINMAX_RES_B: + result = b; + break; + case S390_MINMAX_RES_SILENCE_A: + result = float32_silence_nan(a, s); + break; + case S390_MINMAX_RES_SILENCE_B: + result = float32_silence_nan(b, s); + break; + default: + g_assert_not_reached(); + } + } else if (!is_abs) { + result = is_min ? float32_minnum(a, b, &env->fpu_status) : + float32_maxnum(a, b, &env->fpu_status); + } else { + result = is_min ? float32_minnummag(a, b, &env->fpu_status) : + float32_maxnummag(a, b, &env->fpu_status); + } + + s390_vec_write_float32(&tmp, i, result); + vxc = check_ieee_exc(env, i, false, &vec_exc); + if (se || vxc) { + break; + } + } + handle_ieee_exc(env, vxc, vec_exc, retaddr); + *v1 = tmp; +} + +static void vfminmax64(S390Vector *v1, const S390Vector *v2, + const S390Vector *v3, CPUS390XState *env, + S390MinMaxType type, bool is_min, bool is_abs, bool se, + uintptr_t retaddr) { - env->cc_op = vftci64(v1, v2, env, true, simd_data(desc)); + float_status *s = &env->fpu_status; + uint8_t vxc, vec_exc = 0; + S390Vector tmp = {}; + int i; + + for (i = 0; i < 2; i++) { + float64 a = s390_vec_read_float64(v2, i); + float64 b = s390_vec_read_float64(v3, i); + float64 result; + + if (type != S390_MINMAX_TYPE_IEEE) { + S390MinMaxRes res; + + if (is_abs) { + a = float64_abs(a); + b = float64_abs(b); + } + + res = vfminmax_res(float64_dcmask(env, a), float64_dcmask(env, b), + type, is_min, s); + switch (res) { + case S390_MINMAX_RES_MINMAX: + result = is_min ? float64_min(a, b, s) : float64_max(a, b, s); + break; + case S390_MINMAX_RES_A: + result = a; + break; + case S390_MINMAX_RES_B: + result = b; + break; + case S390_MINMAX_RES_SILENCE_A: + result = float64_silence_nan(a, s); + break; + case S390_MINMAX_RES_SILENCE_B: + result = float64_silence_nan(b, s); + break; + default: + g_assert_not_reached(); + } + } else if (!is_abs) { + result = is_min ? float64_minnum(a, b, &env->fpu_status) : + float64_maxnum(a, b, &env->fpu_status); + } else { + result = is_min ? float64_minnummag(a, b, &env->fpu_status) : + float64_maxnummag(a, b, &env->fpu_status); + } + + s390_vec_write_float64(&tmp, i, result); + vxc = check_ieee_exc(env, i, false, &vec_exc); + if (se || vxc) { + break; + } + } + handle_ieee_exc(env, vxc, vec_exc, retaddr); + *v1 = tmp; } + +static void vfminmax128(S390Vector *v1, const S390Vector *v2, + const S390Vector *v3, CPUS390XState *env, + S390MinMaxType type, bool is_min, bool is_abs, bool se, + uintptr_t retaddr) +{ + float128 a = s390_vec_read_float128(v2); + float128 b = s390_vec_read_float128(v3); + float_status *s = &env->fpu_status; + uint8_t vxc, vec_exc = 0; + float128 result; + + if (type != S390_MINMAX_TYPE_IEEE) { + S390MinMaxRes res; + + if (is_abs) { + a = float128_abs(a); + b = float128_abs(b); + } + + res = vfminmax_res(float128_dcmask(env, a), float128_dcmask(env, b), + type, is_min, s); + switch (res) { + case S390_MINMAX_RES_MINMAX: + result = is_min ? float128_min(a, b, s) : float128_max(a, b, s); + break; + case S390_MINMAX_RES_A: + result = a; + break; + case S390_MINMAX_RES_B: + result = b; + break; + case S390_MINMAX_RES_SILENCE_A: + result = float128_silence_nan(a, s); + break; + case S390_MINMAX_RES_SILENCE_B: + result = float128_silence_nan(b, s); + break; + default: + g_assert_not_reached(); + } + } else if (!is_abs) { + result = is_min ? float128_minnum(a, b, &env->fpu_status) : + float128_maxnum(a, b, &env->fpu_status); + } else { + result = is_min ? float128_minnummag(a, b, &env->fpu_status) : + float128_maxnummag(a, b, &env->fpu_status); + } + + vxc = check_ieee_exc(env, 0, false, &vec_exc); + handle_ieee_exc(env, vxc, vec_exc, retaddr); + s390_vec_write_float128(v1, result); +} + +#define DEF_GVEC_VFMINMAX_B(NAME, IS_MIN, BITS) \ +void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, const void *v3, \ + CPUS390XState *env, uint32_t desc) \ +{ \ + const bool se = extract32(simd_data(desc), 3, 1); \ + uint8_t type = extract32(simd_data(desc), 4, 4); \ + bool is_abs = false; \ + \ + if (type >= 8) { \ + is_abs = true; \ + type -= 8; \ + } \ + \ + vfminmax##BITS(v1, v2, v3, env, type, IS_MIN, is_abs, se, GETPC()); \ +} + +#define DEF_GVEC_VFMINMAX(NAME, IS_MIN) \ + DEF_GVEC_VFMINMAX_B(NAME, IS_MIN, 32) \ + DEF_GVEC_VFMINMAX_B(NAME, IS_MIN, 64) \ + DEF_GVEC_VFMINMAX_B(NAME, IS_MIN, 128) + +DEF_GVEC_VFMINMAX(vfmax, false) +DEF_GVEC_VFMINMAX(vfmin, true) diff --git a/target/s390x/vec_helper.c b/target/s390x/vec_helper.c index 986e7cc825..599bab06bd 100644 --- a/target/s390x/vec_helper.c +++ b/target/s390x/vec_helper.c @@ -19,6 +19,28 @@ #include "exec/cpu_ldst.h" #include "exec/exec-all.h" +void HELPER(gvec_vbperm)(void *v1, const void *v2, const void *v3, + uint32_t desc) +{ + S390Vector tmp = {}; + uint16_t result = 0; + int i; + + for (i = 0; i < 16; i++) { + const uint8_t bit_nr = s390_vec_read_element8(v3, i); + uint16_t bit; + + if (bit_nr >= 128) { + continue; + } + bit = (s390_vec_read_element8(v2, bit_nr / 8) + >> (7 - (bit_nr % 8))) & 1; + result |= (bit << (15 - i)); + } + s390_vec_write_element16(&tmp, 3, result); + *(S390Vector *)v1 = tmp; +} + void HELPER(vll)(CPUS390XState *env, void *v1, uint64_t addr, uint64_t bytes) { if (likely(bytes >= 16)) { diff --git a/tcg/meson.build b/tcg/meson.build index 5be3915529..c4c63b19d4 100644 --- a/tcg/meson.build +++ b/tcg/meson.build @@ -9,6 +9,12 @@ tcg_ss.add(files( 'tcg-op-gvec.c', 'tcg-op-vec.c', )) -tcg_ss.add(when: 'CONFIG_TCG_INTERPRETER', if_true: files('tci.c')) + +if get_option('tcg_interpreter') + libffi = dependency('libffi', version: '>=3.0', required: true, + method: 'pkg-config', kwargs: static_kwargs) + specific_ss.add(libffi) + specific_ss.add(files('tci.c')) +endif specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss) diff --git a/tcg/optimize.c b/tcg/optimize.c index 37c902283e..211a4209a0 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -25,6 +25,7 @@ #include "qemu/osdep.h" #include "tcg/tcg-op.h" +#include "tcg-internal.h" #define CASE_OP_32_64(x) \ glue(glue(case INDEX_op_, x), _i32): \ @@ -1481,7 +1482,7 @@ void tcg_optimize(TCGContext *s) break; case INDEX_op_call: - if (!(op->args[nb_oargs + nb_iargs + 1] + if (!(tcg_call_flags(op) & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) { for (i = 0; i < nb_globals; i++) { if (test_bit(i, temps_used.l)) { diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc index ce39ac2d86..a6ec94a094 100644 --- a/tcg/sparc/tcg-target.c.inc +++ b/tcg/sparc/tcg-target.c.inc @@ -984,14 +984,18 @@ static void tcg_target_qemu_prologue(TCGContext *s) { int tmp_buf_size, frame_size; - /* The TCG temp buffer is at the top of the frame, immediately - below the frame pointer. */ + /* + * The TCG temp buffer is at the top of the frame, immediately + * below the frame pointer. Use the logical (aligned) offset here; + * the stack bias is applied in temp_allocate_frame(). + */ tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long); - tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size, - tmp_buf_size); + tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size); - /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is - otherwise the minimal frame usable by callees. */ + /* + * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is + * otherwise the minimal frame usable by callees. + */ frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS; frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size; frame_size += TCG_TARGET_STACK_ALIGN - 1; diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h index 181f86507a..92c91dcde9 100644 --- a/tcg/tcg-internal.h +++ b/tcg/tcg-internal.h @@ -27,6 +27,13 @@ #define TCG_HIGHWATER 1024 +typedef struct TCGHelperInfo { + void *func; + const char *name; + unsigned flags; + unsigned typemask; +} TCGHelperInfo; + extern TCGContext tcg_init_ctx; extern TCGContext **tcg_ctxs; extern unsigned int tcg_cur_ctxs; @@ -37,4 +44,19 @@ bool tcg_region_alloc(TCGContext *s); void tcg_region_initial_alloc(TCGContext *s); void tcg_region_prologue_set(TCGContext *s); +static inline void *tcg_call_func(TCGOp *op) +{ + return (void *)(uintptr_t)op->args[TCGOP_CALLO(op) + TCGOP_CALLI(op)]; +} + +static inline const TCGHelperInfo *tcg_call_info(TCGOp *op) +{ + return (void *)(uintptr_t)op->args[TCGOP_CALLO(op) + TCGOP_CALLI(op) + 1]; +} + +static inline unsigned tcg_call_flags(TCGOp *op) +{ + return tcg_call_info(op)->flags; +} + #endif /* TCG_INTERNAL_H */ @@ -60,6 +60,10 @@ #include "exec/log.h" #include "tcg-internal.h" +#ifdef CONFIG_TCG_INTERPRETER +#include <ffi.h> +#endif + /* Forward declarations for functions declared in tcg-target.c.inc and used here. */ static void tcg_target_init(TCGContext *s); @@ -143,7 +147,12 @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2); static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, TCGReg base, intptr_t ofs); +#ifdef CONFIG_TCG_INTERPRETER +static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target, + ffi_cif *cif); +#else static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target); +#endif static bool tcg_target_const_match(int64_t val, TCGType type, int ct); #ifdef TCG_TARGET_NEED_LDST_LABELS static int tcg_out_ldst_finalize(TCGContext *s); @@ -532,13 +541,6 @@ void tcg_pool_reset(TCGContext *s) s->pool_current = NULL; } -typedef struct TCGHelperInfo { - void *func; - const char *name; - unsigned flags; - unsigned sizemask; -} TCGHelperInfo; - #include "exec/helper-proto.h" static const TCGHelperInfo all_helpers[] = { @@ -546,6 +548,19 @@ static const TCGHelperInfo all_helpers[] = { }; static GHashTable *helper_table; +#ifdef CONFIG_TCG_INTERPRETER +static GHashTable *ffi_table; + +static ffi_type * const typecode_to_ffi[8] = { + [dh_typecode_void] = &ffi_type_void, + [dh_typecode_i32] = &ffi_type_uint32, + [dh_typecode_s32] = &ffi_type_sint32, + [dh_typecode_i64] = &ffi_type_uint64, + [dh_typecode_s64] = &ffi_type_sint64, + [dh_typecode_ptr] = &ffi_type_pointer, +}; +#endif + static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)]; static void process_op_defs(TCGContext *s); static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type, @@ -589,6 +604,47 @@ static void tcg_context_init(unsigned max_cpus) (gpointer)&all_helpers[i]); } +#ifdef CONFIG_TCG_INTERPRETER + /* g_direct_hash/equal for direct comparisons on uint32_t. */ + ffi_table = g_hash_table_new(NULL, NULL); + for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) { + struct { + ffi_cif cif; + ffi_type *args[]; + } *ca; + uint32_t typemask = all_helpers[i].typemask; + gpointer hash = (gpointer)(uintptr_t)typemask; + ffi_status status; + int nargs; + + if (g_hash_table_lookup(ffi_table, hash)) { + continue; + } + + /* Ignoring the return type, find the last non-zero field. */ + nargs = 32 - clz32(typemask >> 3); + nargs = DIV_ROUND_UP(nargs, 3); + + ca = g_malloc0(sizeof(*ca) + nargs * sizeof(ffi_type *)); + ca->cif.rtype = typecode_to_ffi[typemask & 7]; + ca->cif.nargs = nargs; + + if (nargs != 0) { + ca->cif.arg_types = ca->args; + for (i = 0; i < nargs; ++i) { + int typecode = extract32(typemask, (i + 1) * 3, 3); + ca->args[i] = typecode_to_ffi[typecode]; + } + } + + status = ffi_prep_cif(&ca->cif, FFI_DEFAULT_ABI, nargs, + ca->cif.rtype, ca->cif.arg_types); + assert(status == FFI_OK); + + g_hash_table_insert(ffi_table, hash, (gpointer)&ca->cif); + } +#endif + tcg_target_init(s); process_op_defs(s); @@ -729,10 +785,16 @@ void tcg_prologue_init(TCGContext *s) } #endif - /* Assert that goto_ptr is implemented completely. */ +#ifndef CONFIG_TCG_INTERPRETER + /* + * Assert that goto_ptr is implemented completely, setting an epilogue. + * For tci, we use NULL as the signal to return from the interpreter, + * so skip this check. + */ if (TCG_TARGET_HAS_goto_ptr) { tcg_debug_assert(tcg_code_gen_epilogue != NULL); } +#endif } void tcg_func_start(TCGContext *s) @@ -1395,13 +1457,12 @@ bool tcg_op_supported(TCGOpcode op) void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) { int i, real_args, nb_rets, pi; - unsigned sizemask, flags; - TCGHelperInfo *info; + unsigned typemask; + const TCGHelperInfo *info; TCGOp *op; info = g_hash_table_lookup(helper_table, (gpointer)func); - flags = info->flags; - sizemask = info->sizemask; + typemask = info->typemask; #ifdef CONFIG_PLUGIN /* detect non-plugin helpers */ @@ -1414,36 +1475,41 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) && !defined(CONFIG_TCG_INTERPRETER) /* We have 64-bit values in one register, but need to pass as two separate parameters. Split them. */ - int orig_sizemask = sizemask; + int orig_typemask = typemask; int orig_nargs = nargs; TCGv_i64 retl, reth; TCGTemp *split_args[MAX_OPC_PARAM]; retl = NULL; reth = NULL; - if (sizemask != 0) { - for (i = real_args = 0; i < nargs; ++i) { - int is_64bit = sizemask & (1 << (i+1)*2); - if (is_64bit) { - TCGv_i64 orig = temp_tcgv_i64(args[i]); - TCGv_i32 h = tcg_temp_new_i32(); - TCGv_i32 l = tcg_temp_new_i32(); - tcg_gen_extr_i64_i32(l, h, orig); - split_args[real_args++] = tcgv_i32_temp(h); - split_args[real_args++] = tcgv_i32_temp(l); - } else { - split_args[real_args++] = args[i]; - } + typemask = 0; + for (i = real_args = 0; i < nargs; ++i) { + int argtype = extract32(orig_typemask, (i + 1) * 3, 3); + bool is_64bit = (argtype & ~1) == dh_typecode_i64; + + if (is_64bit) { + TCGv_i64 orig = temp_tcgv_i64(args[i]); + TCGv_i32 h = tcg_temp_new_i32(); + TCGv_i32 l = tcg_temp_new_i32(); + tcg_gen_extr_i64_i32(l, h, orig); + split_args[real_args++] = tcgv_i32_temp(h); + typemask |= dh_typecode_i32 << (real_args * 3); + split_args[real_args++] = tcgv_i32_temp(l); + typemask |= dh_typecode_i32 << (real_args * 3); + } else { + split_args[real_args++] = args[i]; + typemask |= argtype << (real_args * 3); } - nargs = real_args; - args = split_args; - sizemask = 0; } + nargs = real_args; + args = split_args; #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 for (i = 0; i < nargs; ++i) { - int is_64bit = sizemask & (1 << (i+1)*2); - int is_signed = sizemask & (2 << (i+1)*2); - if (!is_64bit) { + int argtype = extract32(typemask, (i + 1) * 3, 3); + bool is_32bit = (argtype & ~1) == dh_typecode_i32; + bool is_signed = argtype & 1; + + if (is_32bit) { TCGv_i64 temp = tcg_temp_new_i64(); TCGv_i64 orig = temp_tcgv_i64(args[i]); if (is_signed) { @@ -1462,7 +1528,7 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) if (ret != NULL) { #if defined(__sparc__) && !defined(__arch64__) \ && !defined(CONFIG_TCG_INTERPRETER) - if (orig_sizemask & 1) { + if ((typemask & 6) == dh_typecode_i64) { /* The 32-bit ABI is going to return the 64-bit value in the %o0/%o1 register pair. Prepare for this by using two return temporaries, and reassemble below. */ @@ -1476,7 +1542,7 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) nb_rets = 1; } #else - if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) { + if (TCG_TARGET_REG_BITS < 64 && (typemask & 6) == dh_typecode_i64) { #ifdef HOST_WORDS_BIGENDIAN op->args[pi++] = temp_arg(ret + 1); op->args[pi++] = temp_arg(ret); @@ -1497,25 +1563,39 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) real_args = 0; for (i = 0; i < nargs; i++) { - int is_64bit = sizemask & (1 << (i+1)*2); - if (TCG_TARGET_REG_BITS < 64 && is_64bit) { -#ifdef TCG_TARGET_CALL_ALIGN_ARGS - /* some targets want aligned 64 bit args */ - if (real_args & 1) { - op->args[pi++] = TCG_CALL_DUMMY_ARG; - real_args++; - } + int argtype = extract32(typemask, (i + 1) * 3, 3); + bool is_64bit = (argtype & ~1) == dh_typecode_i64; + bool want_align = false; + +#if defined(CONFIG_TCG_INTERPRETER) + /* + * Align all arguments, so that they land in predictable places + * for passing off to ffi_call. + */ + want_align = true; +#elif defined(TCG_TARGET_CALL_ALIGN_ARGS) + /* Some targets want aligned 64 bit args */ + want_align = is_64bit; #endif - /* If stack grows up, then we will be placing successive - arguments at lower addresses, which means we need to - reverse the order compared to how we would normally - treat either big or little-endian. For those arguments - that will wind up in registers, this still works for - HPPA (the only current STACK_GROWSUP target) since the - argument registers are *also* allocated in decreasing - order. If another such target is added, this logic may - have to get more complicated to differentiate between - stack arguments and register arguments. */ + + if (TCG_TARGET_REG_BITS < 64 && want_align && (real_args & 1)) { + op->args[pi++] = TCG_CALL_DUMMY_ARG; + real_args++; + } + + if (TCG_TARGET_REG_BITS < 64 && is_64bit) { + /* + * If stack grows up, then we will be placing successive + * arguments at lower addresses, which means we need to + * reverse the order compared to how we would normally + * treat either big or little-endian. For those arguments + * that will wind up in registers, this still works for + * HPPA (the only current STACK_GROWSUP target) since the + * argument registers are *also* allocated in decreasing + * order. If another such target is added, this logic may + * have to get more complicated to differentiate between + * stack arguments and register arguments. + */ #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP) op->args[pi++] = temp_arg(args[i] + 1); op->args[pi++] = temp_arg(args[i]); @@ -1531,7 +1611,7 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) real_args++; } op->args[pi++] = (uintptr_t)func; - op->args[pi++] = flags; + op->args[pi++] = (uintptr_t)info; TCGOP_CALLI(op) = real_args; /* Make sure the fields didn't overflow. */ @@ -1542,7 +1622,9 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) && !defined(CONFIG_TCG_INTERPRETER) /* Free all of the parts we allocated above. */ for (i = real_args = 0; i < orig_nargs; ++i) { - int is_64bit = orig_sizemask & (1 << (i+1)*2); + int argtype = extract32(orig_typemask, (i + 1) * 3, 3); + bool is_64bit = (argtype & ~1) == dh_typecode_i64; + if (is_64bit) { tcg_temp_free_internal(args[real_args++]); tcg_temp_free_internal(args[real_args++]); @@ -1550,7 +1632,7 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) real_args++; } } - if (orig_sizemask & 1) { + if ((orig_typemask & 6) == dh_typecode_i64) { /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them. Note that describing these as TCGv_i64 eliminates an unnecessary zero-extension that tcg_gen_concat_i32_i64 would create. */ @@ -1560,8 +1642,10 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) } #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 for (i = 0; i < nargs; ++i) { - int is_64bit = sizemask & (1 << (i+1)*2); - if (!is_64bit) { + int argtype = extract32(typemask, (i + 1) * 3, 3); + bool is_32bit = (argtype & ~1) == dh_typecode_i32; + + if (is_32bit) { tcg_temp_free_internal(args[i]); } } @@ -1646,19 +1730,6 @@ static char *tcg_get_arg_str(TCGContext *s, char *buf, return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg)); } -/* Find helper name. */ -static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val) -{ - const char *ret = NULL; - if (helper_table) { - TCGHelperInfo *info = g_hash_table_lookup(helper_table, (gpointer)val); - if (info) { - ret = info->name; - } - } - return ret; -} - static const char * const cond_name[] = { [TCG_COND_NEVER] = "never", @@ -1749,15 +1820,28 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) col += qemu_log(" " TARGET_FMT_lx, a); } } else if (c == INDEX_op_call) { + const TCGHelperInfo *info = tcg_call_info(op); + void *func = tcg_call_func(op); + /* variable number of arguments */ nb_oargs = TCGOP_CALLO(op); nb_iargs = TCGOP_CALLI(op); nb_cargs = def->nb_cargs; - /* function name, flags, out args */ - col += qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name, - tcg_find_helper(s, op->args[nb_oargs + nb_iargs]), - op->args[nb_oargs + nb_iargs + 1], nb_oargs); + col += qemu_log(" %s ", def->name); + + /* + * Print the function name from TCGHelperInfo, if available. + * Note that plugins have a template function for the info, + * but the actual function pointer comes from the plugin. + */ + if (func == info->func) { + col += qemu_log("%s", info->name); + } else { + col += qemu_log("plugin(%p)", func); + } + + col += qemu_log("$0x%x,$%d", info->flags, nb_oargs); for (i = 0; i < nb_oargs; i++) { col += qemu_log(",%s", tcg_get_arg_str(s, buf, sizeof(buf), op->args[i])); @@ -2144,7 +2228,6 @@ static void reachable_code_pass(TCGContext *s) QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { bool remove = dead; TCGLabel *label; - int call_flags; switch (op->opc) { case INDEX_op_set_label: @@ -2189,8 +2272,7 @@ static void reachable_code_pass(TCGContext *s) case INDEX_op_call: /* Notice noreturn helper calls, raising exceptions. */ - call_flags = op->args[TCGOP_CALLO(op) + TCGOP_CALLI(op) + 1]; - if (call_flags & TCG_CALL_NO_RETURN) { + if (tcg_call_flags(op) & TCG_CALL_NO_RETURN) { dead = true; } break; @@ -2391,7 +2473,7 @@ static void liveness_pass_1(TCGContext *s) nb_oargs = TCGOP_CALLO(op); nb_iargs = TCGOP_CALLI(op); - call_flags = op->args[nb_oargs + nb_iargs + 1]; + call_flags = tcg_call_flags(op); /* pure functions can be removed if their result is unused */ if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) { @@ -2706,7 +2788,7 @@ static bool liveness_pass_2(TCGContext *s) if (opc == INDEX_op_call) { nb_oargs = TCGOP_CALLO(op); nb_iargs = TCGOP_CALLI(op); - call_flags = op->args[nb_oargs + nb_iargs + 1]; + call_flags = tcg_call_flags(op); } else { nb_iargs = def->nb_iargs; nb_oargs = def->nb_oargs; @@ -2933,20 +3015,42 @@ static void check_regs(TCGContext *s) static void temp_allocate_frame(TCGContext *s, TCGTemp *ts) { -#if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64) - /* Sparc64 stack is accessed with offset of 2047 */ - s->current_frame_offset = (s->current_frame_offset + - (tcg_target_long)sizeof(tcg_target_long) - 1) & - ~(sizeof(tcg_target_long) - 1); -#endif - if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) > - s->frame_end) { - tcg_abort(); + intptr_t off, size, align; + + switch (ts->type) { + case TCG_TYPE_I32: + size = align = 4; + break; + case TCG_TYPE_I64: + case TCG_TYPE_V64: + size = align = 8; + break; + case TCG_TYPE_V128: + size = align = 16; + break; + case TCG_TYPE_V256: + /* Note that we do not require aligned storage for V256. */ + size = 32, align = 16; + break; + default: + g_assert_not_reached(); } - ts->mem_offset = s->current_frame_offset; + + assert(align <= TCG_TARGET_STACK_ALIGN); + off = ROUND_UP(s->current_frame_offset, align); + + /* If we've exhausted the stack frame, restart with a smaller TB. */ + if (off + size > s->frame_end) { + tcg_raise_tb_overflow(s); + } + s->current_frame_offset = off + size; + + ts->mem_offset = off; +#if defined(__sparc__) + ts->mem_offset += TCG_TARGET_STACK_BIAS; +#endif ts->mem_base = s->frame_temp; ts->mem_allocated = 1; - s->current_frame_offset += sizeof(tcg_target_long); } static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet); @@ -3777,6 +3881,7 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) const int nb_oargs = TCGOP_CALLO(op); const int nb_iargs = TCGOP_CALLI(op); const TCGLifeData arg_life = op->life; + const TCGHelperInfo *info; int flags, nb_regs, i; TCGReg reg; TCGArg arg; @@ -3787,8 +3892,9 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) int allocate_args; TCGRegSet allocated_regs; - func_addr = (tcg_insn_unit *)(intptr_t)op->args[nb_oargs + nb_iargs]; - flags = op->args[nb_oargs + nb_iargs + 1]; + func_addr = tcg_call_func(op); + info = tcg_call_info(op); + flags = info->flags; nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs); if (nb_regs > nb_iargs) { @@ -3880,7 +3986,16 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) save_globals(s, allocated_regs); } +#ifdef CONFIG_TCG_INTERPRETER + { + gpointer hash = (gpointer)(uintptr_t)info->typemask; + ffi_cif *cif = g_hash_table_lookup(ffi_table, hash); + assert(cif != NULL); + tcg_out_call(s, func_addr, cif); + } +#else tcg_out_call(s, func_addr); +#endif /* assign output registers and emit moves if needed */ for(i = 0; i < nb_oargs; i++) { @@ -18,59 +18,31 @@ */ #include "qemu/osdep.h" - -/* Enable TCI assertions only when debugging TCG (and without NDEBUG defined). - * Without assertions, the interpreter runs much faster. */ -#if defined(CONFIG_DEBUG_TCG) -# define tci_assert(cond) assert(cond) -#else -# define tci_assert(cond) ((void)(cond)) -#endif - #include "qemu-common.h" #include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */ #include "exec/cpu_ldst.h" #include "tcg/tcg-op.h" #include "qemu/compiler.h" +#include <ffi.h> -#if MAX_OPC_PARAM_IARGS != 6 -# error Fix needed, number of supported input arguments changed! -#endif -#if TCG_TARGET_REG_BITS == 32 -typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong, - tcg_target_ulong, tcg_target_ulong, - tcg_target_ulong, tcg_target_ulong, - tcg_target_ulong, tcg_target_ulong, - tcg_target_ulong, tcg_target_ulong, - tcg_target_ulong, tcg_target_ulong); + +/* + * Enable TCI assertions only when debugging TCG (and without NDEBUG defined). + * Without assertions, the interpreter runs much faster. + */ +#if defined(CONFIG_DEBUG_TCG) +# define tci_assert(cond) assert(cond) #else -typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong, - tcg_target_ulong, tcg_target_ulong, - tcg_target_ulong, tcg_target_ulong); +# define tci_assert(cond) ((void)(cond)) #endif __thread uintptr_t tci_tb_ptr; -static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index) -{ - tci_assert(index < TCG_TARGET_NB_REGS); - return regs[index]; -} - -static void -tci_write_reg(tcg_target_ulong *regs, TCGReg index, tcg_target_ulong value) -{ - tci_assert(index < TCG_TARGET_NB_REGS); - tci_assert(index != TCG_AREG0); - tci_assert(index != TCG_REG_CALL_STACK); - regs[index] = value; -} - static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index, uint32_t low_index, uint64_t value) { - tci_write_reg(regs, low_index, value); - tci_write_reg(regs, high_index, value >> 32); + regs[low_index] = (uint32_t)value; + regs[high_index] = value >> 32; } /* Create a 64 bit value from two 32 bit values. */ @@ -79,49 +51,6 @@ static uint64_t tci_uint64(uint32_t high, uint32_t low) return ((uint64_t)high << 32) + low; } -/* Read constant byte from bytecode. */ -static uint8_t tci_read_b(const uint8_t **tb_ptr) -{ - return *(tb_ptr[0]++); -} - -/* Read register number from bytecode. */ -static TCGReg tci_read_r(const uint8_t **tb_ptr) -{ - uint8_t regno = tci_read_b(tb_ptr); - tci_assert(regno < TCG_TARGET_NB_REGS); - return regno; -} - -/* Read constant (native size) from bytecode. */ -static tcg_target_ulong tci_read_i(const uint8_t **tb_ptr) -{ - tcg_target_ulong value = *(const tcg_target_ulong *)(*tb_ptr); - *tb_ptr += sizeof(value); - return value; -} - -/* Read unsigned constant (32 bit) from bytecode. */ -static uint32_t tci_read_i32(const uint8_t **tb_ptr) -{ - uint32_t value = *(const uint32_t *)(*tb_ptr); - *tb_ptr += sizeof(value); - return value; -} - -/* Read signed constant (32 bit) from bytecode. */ -static int32_t tci_read_s32(const uint8_t **tb_ptr) -{ - int32_t value = *(const int32_t *)(*tb_ptr); - *tb_ptr += sizeof(value); - return value; -} - -static tcg_target_ulong tci_read_label(const uint8_t **tb_ptr) -{ - return tci_read_i(tb_ptr); -} - /* * Load sets of arguments all at once. The naming convention is: * tci_args_<arguments> @@ -133,223 +62,147 @@ static tcg_target_ulong tci_read_label(const uint8_t **tb_ptr) * I = immediate (tcg_target_ulong) * l = label or pointer * m = immediate (TCGMemOpIdx) + * n = immediate (call return length) * r = register * s = signed ldst offset */ -static void check_size(const uint8_t *start, const uint8_t **tb_ptr) +static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0) { - const uint8_t *old_code_ptr = start - 2; - uint8_t op_size = old_code_ptr[1]; - tci_assert(*tb_ptr == old_code_ptr + op_size); + int diff = sextract32(insn, 12, 20); + *l0 = diff ? (void *)tb_ptr + diff : NULL; } -static void tci_args_l(const uint8_t **tb_ptr, void **l0) +static void tci_args_r(uint32_t insn, TCGReg *r0) { - const uint8_t *start = *tb_ptr; - - *l0 = (void *)tci_read_label(tb_ptr); - - check_size(start, tb_ptr); + *r0 = extract32(insn, 8, 4); } -static void tci_args_rr(const uint8_t **tb_ptr, - TCGReg *r0, TCGReg *r1) +static void tci_args_nl(uint32_t insn, const void *tb_ptr, + uint8_t *n0, void **l1) { - const uint8_t *start = *tb_ptr; - - *r0 = tci_read_r(tb_ptr); - *r1 = tci_read_r(tb_ptr); - - check_size(start, tb_ptr); + *n0 = extract32(insn, 8, 4); + *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; } -static void tci_args_ri(const uint8_t **tb_ptr, - TCGReg *r0, tcg_target_ulong *i1) +static void tci_args_rl(uint32_t insn, const void *tb_ptr, + TCGReg *r0, void **l1) { - const uint8_t *start = *tb_ptr; - - *r0 = tci_read_r(tb_ptr); - *i1 = tci_read_i32(tb_ptr); - - check_size(start, tb_ptr); + *r0 = extract32(insn, 8, 4); + *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; } -#if TCG_TARGET_REG_BITS == 64 -static void tci_args_rI(const uint8_t **tb_ptr, - TCGReg *r0, tcg_target_ulong *i1) +static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1) { - const uint8_t *start = *tb_ptr; - - *r0 = tci_read_r(tb_ptr); - *i1 = tci_read_i(tb_ptr); - - check_size(start, tb_ptr); + *r0 = extract32(insn, 8, 4); + *r1 = extract32(insn, 12, 4); } -#endif -static void tci_args_rrm(const uint8_t **tb_ptr, - TCGReg *r0, TCGReg *r1, TCGMemOpIdx *m2) +static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1) { - const uint8_t *start = *tb_ptr; - - *r0 = tci_read_r(tb_ptr); - *r1 = tci_read_r(tb_ptr); - *m2 = tci_read_i32(tb_ptr); - - check_size(start, tb_ptr); + *r0 = extract32(insn, 8, 4); + *i1 = sextract32(insn, 12, 20); } -static void tci_args_rrr(const uint8_t **tb_ptr, - TCGReg *r0, TCGReg *r1, TCGReg *r2) +static void tci_args_rrm(uint32_t insn, TCGReg *r0, + TCGReg *r1, TCGMemOpIdx *m2) { - const uint8_t *start = *tb_ptr; - - *r0 = tci_read_r(tb_ptr); - *r1 = tci_read_r(tb_ptr); - *r2 = tci_read_r(tb_ptr); - - check_size(start, tb_ptr); + *r0 = extract32(insn, 8, 4); + *r1 = extract32(insn, 12, 4); + *m2 = extract32(insn, 20, 12); } -static void tci_args_rrs(const uint8_t **tb_ptr, - TCGReg *r0, TCGReg *r1, int32_t *i2) +static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2) { - const uint8_t *start = *tb_ptr; - - *r0 = tci_read_r(tb_ptr); - *r1 = tci_read_r(tb_ptr); - *i2 = tci_read_s32(tb_ptr); - - check_size(start, tb_ptr); + *r0 = extract32(insn, 8, 4); + *r1 = extract32(insn, 12, 4); + *r2 = extract32(insn, 16, 4); } -static void tci_args_rrcl(const uint8_t **tb_ptr, - TCGReg *r0, TCGReg *r1, TCGCond *c2, void **l3) +static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2) { - const uint8_t *start = *tb_ptr; - - *r0 = tci_read_r(tb_ptr); - *r1 = tci_read_r(tb_ptr); - *c2 = tci_read_b(tb_ptr); - *l3 = (void *)tci_read_label(tb_ptr); + *r0 = extract32(insn, 8, 4); + *r1 = extract32(insn, 12, 4); + *i2 = sextract32(insn, 16, 16); +} - check_size(start, tb_ptr); +static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, + uint8_t *i2, uint8_t *i3) +{ + *r0 = extract32(insn, 8, 4); + *r1 = extract32(insn, 12, 4); + *i2 = extract32(insn, 16, 6); + *i3 = extract32(insn, 22, 6); } -static void tci_args_rrrc(const uint8_t **tb_ptr, +static void tci_args_rrrc(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3) { - const uint8_t *start = *tb_ptr; - - *r0 = tci_read_r(tb_ptr); - *r1 = tci_read_r(tb_ptr); - *r2 = tci_read_r(tb_ptr); - *c3 = tci_read_b(tb_ptr); - - check_size(start, tb_ptr); + *r0 = extract32(insn, 8, 4); + *r1 = extract32(insn, 12, 4); + *r2 = extract32(insn, 16, 4); + *c3 = extract32(insn, 20, 4); } -static void tci_args_rrrm(const uint8_t **tb_ptr, +static void tci_args_rrrm(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGMemOpIdx *m3) { - const uint8_t *start = *tb_ptr; - - *r0 = tci_read_r(tb_ptr); - *r1 = tci_read_r(tb_ptr); - *r2 = tci_read_r(tb_ptr); - *m3 = tci_read_i32(tb_ptr); - - check_size(start, tb_ptr); + *r0 = extract32(insn, 8, 4); + *r1 = extract32(insn, 12, 4); + *r2 = extract32(insn, 16, 4); + *m3 = extract32(insn, 20, 12); } -static void tci_args_rrrbb(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1, +static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2, uint8_t *i3, uint8_t *i4) { - const uint8_t *start = *tb_ptr; - - *r0 = tci_read_r(tb_ptr); - *r1 = tci_read_r(tb_ptr); - *r2 = tci_read_r(tb_ptr); - *i3 = tci_read_b(tb_ptr); - *i4 = tci_read_b(tb_ptr); - - check_size(start, tb_ptr); + *r0 = extract32(insn, 8, 4); + *r1 = extract32(insn, 12, 4); + *r2 = extract32(insn, 16, 4); + *i3 = extract32(insn, 20, 6); + *i4 = extract32(insn, 26, 6); } -static void tci_args_rrrrm(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1, - TCGReg *r2, TCGReg *r3, TCGMemOpIdx *m4) +static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, + TCGReg *r2, TCGReg *r3, TCGReg *r4) { - const uint8_t *start = *tb_ptr; - - *r0 = tci_read_r(tb_ptr); - *r1 = tci_read_r(tb_ptr); - *r2 = tci_read_r(tb_ptr); - *r3 = tci_read_r(tb_ptr); - *m4 = tci_read_i32(tb_ptr); - - check_size(start, tb_ptr); + *r0 = extract32(insn, 8, 4); + *r1 = extract32(insn, 12, 4); + *r2 = extract32(insn, 16, 4); + *r3 = extract32(insn, 20, 4); + *r4 = extract32(insn, 24, 4); } -#if TCG_TARGET_REG_BITS == 32 -static void tci_args_rrrr(const uint8_t **tb_ptr, +static void tci_args_rrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3) { - const uint8_t *start = *tb_ptr; - - *r0 = tci_read_r(tb_ptr); - *r1 = tci_read_r(tb_ptr); - *r2 = tci_read_r(tb_ptr); - *r3 = tci_read_r(tb_ptr); - - check_size(start, tb_ptr); + *r0 = extract32(insn, 8, 4); + *r1 = extract32(insn, 12, 4); + *r2 = extract32(insn, 16, 4); + *r3 = extract32(insn, 20, 4); } -static void tci_args_rrrrcl(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1, - TCGReg *r2, TCGReg *r3, TCGCond *c4, void **l5) -{ - const uint8_t *start = *tb_ptr; - - *r0 = tci_read_r(tb_ptr); - *r1 = tci_read_r(tb_ptr); - *r2 = tci_read_r(tb_ptr); - *r3 = tci_read_r(tb_ptr); - *c4 = tci_read_b(tb_ptr); - *l5 = (void *)tci_read_label(tb_ptr); - - check_size(start, tb_ptr); -} - -static void tci_args_rrrrrc(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1, +static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5) { - const uint8_t *start = *tb_ptr; - - *r0 = tci_read_r(tb_ptr); - *r1 = tci_read_r(tb_ptr); - *r2 = tci_read_r(tb_ptr); - *r3 = tci_read_r(tb_ptr); - *r4 = tci_read_r(tb_ptr); - *c5 = tci_read_b(tb_ptr); - - check_size(start, tb_ptr); + *r0 = extract32(insn, 8, 4); + *r1 = extract32(insn, 12, 4); + *r2 = extract32(insn, 16, 4); + *r3 = extract32(insn, 20, 4); + *r4 = extract32(insn, 24, 4); + *c5 = extract32(insn, 28, 4); } -static void tci_args_rrrrrr(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1, +static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5) { - const uint8_t *start = *tb_ptr; - - *r0 = tci_read_r(tb_ptr); - *r1 = tci_read_r(tb_ptr); - *r2 = tci_read_r(tb_ptr); - *r3 = tci_read_r(tb_ptr); - *r4 = tci_read_r(tb_ptr); - *r5 = tci_read_r(tb_ptr); - - check_size(start, tb_ptr); + *r0 = extract32(insn, 8, 4); + *r1 = extract32(insn, 12, 4); + *r2 = extract32(insn, 16, 4); + *r3 = extract32(insn, 20, 4); + *r4 = extract32(insn, 24, 4); + *r5 = extract32(insn, 28, 4); } -#endif static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition) { @@ -435,34 +288,155 @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) return result; } -#define qemu_ld_ub \ - cpu_ldub_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) -#define qemu_ld_leuw \ - cpu_lduw_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) -#define qemu_ld_leul \ - cpu_ldl_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) -#define qemu_ld_leq \ - cpu_ldq_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) -#define qemu_ld_beuw \ - cpu_lduw_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) -#define qemu_ld_beul \ - cpu_ldl_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) -#define qemu_ld_beq \ - cpu_ldq_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) -#define qemu_st_b(X) \ - cpu_stb_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) -#define qemu_st_lew(X) \ - cpu_stw_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) -#define qemu_st_lel(X) \ - cpu_stl_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) -#define qemu_st_leq(X) \ - cpu_stq_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) -#define qemu_st_bew(X) \ - cpu_stw_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) -#define qemu_st_bel(X) \ - cpu_stl_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) -#define qemu_st_beq(X) \ - cpu_stq_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) +static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, + TCGMemOpIdx oi, const void *tb_ptr) +{ + MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE); + uintptr_t ra = (uintptr_t)tb_ptr; + +#ifdef CONFIG_SOFTMMU + switch (mop) { + case MO_UB: + return helper_ret_ldub_mmu(env, taddr, oi, ra); + case MO_SB: + return helper_ret_ldsb_mmu(env, taddr, oi, ra); + case MO_LEUW: + return helper_le_lduw_mmu(env, taddr, oi, ra); + case MO_LESW: + return helper_le_ldsw_mmu(env, taddr, oi, ra); + case MO_LEUL: + return helper_le_ldul_mmu(env, taddr, oi, ra); + case MO_LESL: + return helper_le_ldsl_mmu(env, taddr, oi, ra); + case MO_LEQ: + return helper_le_ldq_mmu(env, taddr, oi, ra); + case MO_BEUW: + return helper_be_lduw_mmu(env, taddr, oi, ra); + case MO_BESW: + return helper_be_ldsw_mmu(env, taddr, oi, ra); + case MO_BEUL: + return helper_be_ldul_mmu(env, taddr, oi, ra); + case MO_BESL: + return helper_be_ldsl_mmu(env, taddr, oi, ra); + case MO_BEQ: + return helper_be_ldq_mmu(env, taddr, oi, ra); + default: + g_assert_not_reached(); + } +#else + void *haddr = g2h(env_cpu(env), taddr); + uint64_t ret; + + set_helper_retaddr(ra); + switch (mop) { + case MO_UB: + ret = ldub_p(haddr); + break; + case MO_SB: + ret = ldsb_p(haddr); + break; + case MO_LEUW: + ret = lduw_le_p(haddr); + break; + case MO_LESW: + ret = ldsw_le_p(haddr); + break; + case MO_LEUL: + ret = (uint32_t)ldl_le_p(haddr); + break; + case MO_LESL: + ret = (int32_t)ldl_le_p(haddr); + break; + case MO_LEQ: + ret = ldq_le_p(haddr); + break; + case MO_BEUW: + ret = lduw_be_p(haddr); + break; + case MO_BESW: + ret = ldsw_be_p(haddr); + break; + case MO_BEUL: + ret = (uint32_t)ldl_be_p(haddr); + break; + case MO_BESL: + ret = (int32_t)ldl_be_p(haddr); + break; + case MO_BEQ: + ret = ldq_be_p(haddr); + break; + default: + g_assert_not_reached(); + } + clear_helper_retaddr(); + return ret; +#endif +} + +static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, + TCGMemOpIdx oi, const void *tb_ptr) +{ + MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE); + uintptr_t ra = (uintptr_t)tb_ptr; + +#ifdef CONFIG_SOFTMMU + switch (mop) { + case MO_UB: + helper_ret_stb_mmu(env, taddr, val, oi, ra); + break; + case MO_LEUW: + helper_le_stw_mmu(env, taddr, val, oi, ra); + break; + case MO_LEUL: + helper_le_stl_mmu(env, taddr, val, oi, ra); + break; + case MO_LEQ: + helper_le_stq_mmu(env, taddr, val, oi, ra); + break; + case MO_BEUW: + helper_be_stw_mmu(env, taddr, val, oi, ra); + break; + case MO_BEUL: + helper_be_stl_mmu(env, taddr, val, oi, ra); + break; + case MO_BEQ: + helper_be_stq_mmu(env, taddr, val, oi, ra); + break; + default: + g_assert_not_reached(); + } +#else + void *haddr = g2h(env_cpu(env), taddr); + + set_helper_retaddr(ra); + switch (mop) { + case MO_UB: + stb_p(haddr, val); + break; + case MO_LEUW: + stw_le_p(haddr, val); + break; + case MO_LEUL: + stl_le_p(haddr, val); + break; + case MO_LEQ: + stq_le_p(haddr, val); + break; + case MO_BEUW: + stw_be_p(haddr, val); + break; + case MO_BEUL: + stl_be_p(haddr, val); + break; + case MO_BEQ: + stq_be_p(haddr, val); + break; + default: + g_assert_not_reached(); + } + clear_helper_retaddr(); +#endif +} #if TCG_TARGET_REG_BITS == 64 # define CASE_32_64(x) \ @@ -485,135 +459,171 @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, const void *v_tb_ptr) { - const uint8_t *tb_ptr = v_tb_ptr; + const uint32_t *tb_ptr = v_tb_ptr; tcg_target_ulong regs[TCG_TARGET_NB_REGS]; - long tcg_temps[CPU_TEMP_BUF_NLONGS]; - uintptr_t sp_value = (uintptr_t)(tcg_temps + CPU_TEMP_BUF_NLONGS); + uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE) + / sizeof(uint64_t)]; + void *call_slots[TCG_STATIC_CALL_ARGS_SIZE / sizeof(uint64_t)]; regs[TCG_AREG0] = (tcg_target_ulong)env; - regs[TCG_REG_CALL_STACK] = sp_value; + regs[TCG_REG_CALL_STACK] = (uintptr_t)stack; + /* Other call_slots entries initialized at first use (see below). */ + call_slots[0] = NULL; tci_assert(tb_ptr); for (;;) { - TCGOpcode opc = tb_ptr[0]; - TCGReg r0, r1, r2, r3; + uint32_t insn; + TCGOpcode opc; + TCGReg r0, r1, r2, r3, r4, r5; tcg_target_ulong t1; TCGCond condition; target_ulong taddr; uint8_t pos, len; uint32_t tmp32; uint64_t tmp64; -#if TCG_TARGET_REG_BITS == 32 - TCGReg r4, r5; uint64_t T1, T2; -#endif TCGMemOpIdx oi; int32_t ofs; void *ptr; - /* Skip opcode and size entry. */ - tb_ptr += 2; + insn = *tb_ptr++; + opc = extract32(insn, 0, 8); switch (opc) { case INDEX_op_call: - tci_args_l(&tb_ptr, &ptr); + /* + * Set up the ffi_avalue array once, delayed until now + * because many TB's do not make any calls. In tcg_gen_callN, + * we arranged for every real argument to be "left-aligned" + * in each 64-bit slot. + */ + if (unlikely(call_slots[0] == NULL)) { + for (int i = 0; i < ARRAY_SIZE(call_slots); ++i) { + call_slots[i] = &stack[i]; + } + } + + tci_args_nl(insn, tb_ptr, &len, &ptr); + + /* Helper functions may need to access the "return address" */ tci_tb_ptr = (uintptr_t)tb_ptr; -#if TCG_TARGET_REG_BITS == 32 - tmp64 = ((helper_function)ptr)(tci_read_reg(regs, TCG_REG_R0), - tci_read_reg(regs, TCG_REG_R1), - tci_read_reg(regs, TCG_REG_R2), - tci_read_reg(regs, TCG_REG_R3), - tci_read_reg(regs, TCG_REG_R4), - tci_read_reg(regs, TCG_REG_R5), - tci_read_reg(regs, TCG_REG_R6), - tci_read_reg(regs, TCG_REG_R7), - tci_read_reg(regs, TCG_REG_R8), - tci_read_reg(regs, TCG_REG_R9), - tci_read_reg(regs, TCG_REG_R10), - tci_read_reg(regs, TCG_REG_R11)); - tci_write_reg(regs, TCG_REG_R0, tmp64); - tci_write_reg(regs, TCG_REG_R1, tmp64 >> 32); -#else - tmp64 = ((helper_function)ptr)(tci_read_reg(regs, TCG_REG_R0), - tci_read_reg(regs, TCG_REG_R1), - tci_read_reg(regs, TCG_REG_R2), - tci_read_reg(regs, TCG_REG_R3), - tci_read_reg(regs, TCG_REG_R4), - tci_read_reg(regs, TCG_REG_R5)); - tci_write_reg(regs, TCG_REG_R0, tmp64); -#endif + + { + void **pptr = ptr; + ffi_call(pptr[1], pptr[0], stack, call_slots); + } + + /* Any result winds up "left-aligned" in the stack[0] slot. */ + switch (len) { + case 0: /* void */ + break; + case 1: /* uint32_t */ + /* + * Note that libffi has an odd special case in that it will + * always widen an integral result to ffi_arg. + */ + if (sizeof(ffi_arg) == 4) { + regs[TCG_REG_R0] = *(uint32_t *)stack; + break; + } + /* fall through */ + case 2: /* uint64_t */ + if (TCG_TARGET_REG_BITS == 32) { + tci_write_reg64(regs, TCG_REG_R1, TCG_REG_R0, stack[0]); + } else { + regs[TCG_REG_R0] = stack[0]; + } + break; + default: + g_assert_not_reached(); + } break; + case INDEX_op_br: - tci_args_l(&tb_ptr, &ptr); + tci_args_l(insn, tb_ptr, &ptr); tb_ptr = ptr; continue; case INDEX_op_setcond_i32: - tci_args_rrrc(&tb_ptr, &r0, &r1, &r2, &condition); + tci_args_rrrc(insn, &r0, &r1, &r2, &condition); regs[r0] = tci_compare32(regs[r1], regs[r2], condition); break; + case INDEX_op_movcond_i32: + tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); + tmp32 = tci_compare32(regs[r1], regs[r2], condition); + regs[r0] = regs[tmp32 ? r3 : r4]; + break; #if TCG_TARGET_REG_BITS == 32 case INDEX_op_setcond2_i32: - tci_args_rrrrrc(&tb_ptr, &r0, &r1, &r2, &r3, &r4, &condition); + tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); T1 = tci_uint64(regs[r2], regs[r1]); T2 = tci_uint64(regs[r4], regs[r3]); regs[r0] = tci_compare64(T1, T2, condition); break; #elif TCG_TARGET_REG_BITS == 64 case INDEX_op_setcond_i64: - tci_args_rrrc(&tb_ptr, &r0, &r1, &r2, &condition); + tci_args_rrrc(insn, &r0, &r1, &r2, &condition); regs[r0] = tci_compare64(regs[r1], regs[r2], condition); break; + case INDEX_op_movcond_i64: + tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); + tmp32 = tci_compare64(regs[r1], regs[r2], condition); + regs[r0] = regs[tmp32 ? r3 : r4]; + break; #endif CASE_32_64(mov) - tci_args_rr(&tb_ptr, &r0, &r1); + tci_args_rr(insn, &r0, &r1); regs[r0] = regs[r1]; break; - case INDEX_op_tci_movi_i32: - tci_args_ri(&tb_ptr, &r0, &t1); + case INDEX_op_tci_movi: + tci_args_ri(insn, &r0, &t1); regs[r0] = t1; break; + case INDEX_op_tci_movl: + tci_args_rl(insn, tb_ptr, &r0, &ptr); + regs[r0] = *(tcg_target_ulong *)ptr; + break; /* Load/store operations (32 bit). */ CASE_32_64(ld8u) - tci_args_rrs(&tb_ptr, &r0, &r1, &ofs); + tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); regs[r0] = *(uint8_t *)ptr; break; CASE_32_64(ld8s) - tci_args_rrs(&tb_ptr, &r0, &r1, &ofs); + tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); regs[r0] = *(int8_t *)ptr; break; CASE_32_64(ld16u) - tci_args_rrs(&tb_ptr, &r0, &r1, &ofs); + tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); regs[r0] = *(uint16_t *)ptr; break; CASE_32_64(ld16s) - tci_args_rrs(&tb_ptr, &r0, &r1, &ofs); + tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); regs[r0] = *(int16_t *)ptr; break; case INDEX_op_ld_i32: CASE_64(ld32u) - tci_args_rrs(&tb_ptr, &r0, &r1, &ofs); + tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); regs[r0] = *(uint32_t *)ptr; break; CASE_32_64(st8) - tci_args_rrs(&tb_ptr, &r0, &r1, &ofs); + tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); *(uint8_t *)ptr = regs[r0]; break; CASE_32_64(st16) - tci_args_rrs(&tb_ptr, &r0, &r1, &ofs); + tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); *(uint16_t *)ptr = regs[r0]; break; case INDEX_op_st_i32: CASE_64(st32) - tci_args_rrs(&tb_ptr, &r0, &r1, &ofs); + tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); *(uint32_t *)ptr = regs[r0]; break; @@ -621,180 +631,240 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, /* Arithmetic operations (mixed 32/64 bit). */ CASE_32_64(add) - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = regs[r1] + regs[r2]; break; CASE_32_64(sub) - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = regs[r1] - regs[r2]; break; CASE_32_64(mul) - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = regs[r1] * regs[r2]; break; CASE_32_64(and) - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = regs[r1] & regs[r2]; break; CASE_32_64(or) - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = regs[r1] | regs[r2]; break; CASE_32_64(xor) - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = regs[r1] ^ regs[r2]; break; +#if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64 + CASE_32_64(andc) + tci_args_rrr(insn, &r0, &r1, &r2); + regs[r0] = regs[r1] & ~regs[r2]; + break; +#endif +#if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64 + CASE_32_64(orc) + tci_args_rrr(insn, &r0, &r1, &r2); + regs[r0] = regs[r1] | ~regs[r2]; + break; +#endif +#if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64 + CASE_32_64(eqv) + tci_args_rrr(insn, &r0, &r1, &r2); + regs[r0] = ~(regs[r1] ^ regs[r2]); + break; +#endif +#if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64 + CASE_32_64(nand) + tci_args_rrr(insn, &r0, &r1, &r2); + regs[r0] = ~(regs[r1] & regs[r2]); + break; +#endif +#if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64 + CASE_32_64(nor) + tci_args_rrr(insn, &r0, &r1, &r2); + regs[r0] = ~(regs[r1] | regs[r2]); + break; +#endif /* Arithmetic operations (32 bit). */ case INDEX_op_div_i32: - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2]; break; case INDEX_op_divu_i32: - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2]; break; case INDEX_op_rem_i32: - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2]; break; case INDEX_op_remu_i32: - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2]; break; +#if TCG_TARGET_HAS_clz_i32 + case INDEX_op_clz_i32: + tci_args_rrr(insn, &r0, &r1, &r2); + tmp32 = regs[r1]; + regs[r0] = tmp32 ? clz32(tmp32) : regs[r2]; + break; +#endif +#if TCG_TARGET_HAS_ctz_i32 + case INDEX_op_ctz_i32: + tci_args_rrr(insn, &r0, &r1, &r2); + tmp32 = regs[r1]; + regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2]; + break; +#endif +#if TCG_TARGET_HAS_ctpop_i32 + case INDEX_op_ctpop_i32: + tci_args_rr(insn, &r0, &r1); + regs[r0] = ctpop32(regs[r1]); + break; +#endif /* Shift/rotate operations (32 bit). */ case INDEX_op_shl_i32: - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31); break; case INDEX_op_shr_i32: - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31); break; case INDEX_op_sar_i32: - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31); break; #if TCG_TARGET_HAS_rot_i32 case INDEX_op_rotl_i32: - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = rol32(regs[r1], regs[r2] & 31); break; case INDEX_op_rotr_i32: - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = ror32(regs[r1], regs[r2] & 31); break; #endif #if TCG_TARGET_HAS_deposit_i32 case INDEX_op_deposit_i32: - tci_args_rrrbb(&tb_ptr, &r0, &r1, &r2, &pos, &len); + tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); regs[r0] = deposit32(regs[r1], pos, len, regs[r2]); break; #endif +#if TCG_TARGET_HAS_extract_i32 + case INDEX_op_extract_i32: + tci_args_rrbb(insn, &r0, &r1, &pos, &len); + regs[r0] = extract32(regs[r1], pos, len); + break; +#endif +#if TCG_TARGET_HAS_sextract_i32 + case INDEX_op_sextract_i32: + tci_args_rrbb(insn, &r0, &r1, &pos, &len); + regs[r0] = sextract32(regs[r1], pos, len); + break; +#endif case INDEX_op_brcond_i32: - tci_args_rrcl(&tb_ptr, &r0, &r1, &condition, &ptr); - if (tci_compare32(regs[r0], regs[r1], condition)) { + tci_args_rl(insn, tb_ptr, &r0, &ptr); + if ((uint32_t)regs[r0]) { tb_ptr = ptr; } break; -#if TCG_TARGET_REG_BITS == 32 +#if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32 case INDEX_op_add2_i32: - tci_args_rrrrrr(&tb_ptr, &r0, &r1, &r2, &r3, &r4, &r5); + tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); T1 = tci_uint64(regs[r3], regs[r2]); T2 = tci_uint64(regs[r5], regs[r4]); tci_write_reg64(regs, r1, r0, T1 + T2); break; +#endif +#if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32 case INDEX_op_sub2_i32: - tci_args_rrrrrr(&tb_ptr, &r0, &r1, &r2, &r3, &r4, &r5); + tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); T1 = tci_uint64(regs[r3], regs[r2]); T2 = tci_uint64(regs[r5], regs[r4]); tci_write_reg64(regs, r1, r0, T1 - T2); break; - case INDEX_op_brcond2_i32: - tci_args_rrrrcl(&tb_ptr, &r0, &r1, &r2, &r3, &condition, &ptr); - T1 = tci_uint64(regs[r1], regs[r0]); - T2 = tci_uint64(regs[r3], regs[r2]); - if (tci_compare64(T1, T2, condition)) { - tb_ptr = ptr; - continue; - } - break; +#endif +#if TCG_TARGET_HAS_mulu2_i32 case INDEX_op_mulu2_i32: - tci_args_rrrr(&tb_ptr, &r0, &r1, &r2, &r3); - tci_write_reg64(regs, r1, r0, (uint64_t)regs[r2] * regs[r3]); + tci_args_rrrr(insn, &r0, &r1, &r2, &r3); + tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3]; + tci_write_reg64(regs, r1, r0, tmp64); break; -#endif /* TCG_TARGET_REG_BITS == 32 */ +#endif +#if TCG_TARGET_HAS_muls2_i32 + case INDEX_op_muls2_i32: + tci_args_rrrr(insn, &r0, &r1, &r2, &r3); + tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3]; + tci_write_reg64(regs, r1, r0, tmp64); + break; +#endif #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 CASE_32_64(ext8s) - tci_args_rr(&tb_ptr, &r0, &r1); + tci_args_rr(insn, &r0, &r1); regs[r0] = (int8_t)regs[r1]; break; #endif #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 CASE_32_64(ext16s) - tci_args_rr(&tb_ptr, &r0, &r1); + tci_args_rr(insn, &r0, &r1); regs[r0] = (int16_t)regs[r1]; break; #endif #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64 CASE_32_64(ext8u) - tci_args_rr(&tb_ptr, &r0, &r1); + tci_args_rr(insn, &r0, &r1); regs[r0] = (uint8_t)regs[r1]; break; #endif #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64 CASE_32_64(ext16u) - tci_args_rr(&tb_ptr, &r0, &r1); + tci_args_rr(insn, &r0, &r1); regs[r0] = (uint16_t)regs[r1]; break; #endif #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 CASE_32_64(bswap16) - tci_args_rr(&tb_ptr, &r0, &r1); + tci_args_rr(insn, &r0, &r1); regs[r0] = bswap16(regs[r1]); break; #endif #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64 CASE_32_64(bswap32) - tci_args_rr(&tb_ptr, &r0, &r1); + tci_args_rr(insn, &r0, &r1); regs[r0] = bswap32(regs[r1]); break; #endif #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64 CASE_32_64(not) - tci_args_rr(&tb_ptr, &r0, &r1); + tci_args_rr(insn, &r0, &r1); regs[r0] = ~regs[r1]; break; #endif #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64 CASE_32_64(neg) - tci_args_rr(&tb_ptr, &r0, &r1); + tci_args_rr(insn, &r0, &r1); regs[r0] = -regs[r1]; break; #endif #if TCG_TARGET_REG_BITS == 64 - case INDEX_op_tci_movi_i64: - tci_args_rI(&tb_ptr, &r0, &t1); - regs[r0] = t1; - break; - /* Load/store operations (64 bit). */ case INDEX_op_ld32s_i64: - tci_args_rrs(&tb_ptr, &r0, &r1, &ofs); + tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); regs[r0] = *(int32_t *)ptr; break; case INDEX_op_ld_i64: - tci_args_rrs(&tb_ptr, &r0, &r1, &ofs); + tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); regs[r0] = *(uint64_t *)ptr; break; case INDEX_op_st_i64: - tci_args_rrs(&tb_ptr, &r0, &r1, &ofs); + tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); *(uint64_t *)ptr = regs[r0]; break; @@ -802,71 +872,131 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, /* Arithmetic operations (64 bit). */ case INDEX_op_div_i64: - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2]; break; case INDEX_op_divu_i64: - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2]; break; case INDEX_op_rem_i64: - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2]; break; case INDEX_op_remu_i64: - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2]; break; +#if TCG_TARGET_HAS_clz_i64 + case INDEX_op_clz_i64: + tci_args_rrr(insn, &r0, &r1, &r2); + regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2]; + break; +#endif +#if TCG_TARGET_HAS_ctz_i64 + case INDEX_op_ctz_i64: + tci_args_rrr(insn, &r0, &r1, &r2); + regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2]; + break; +#endif +#if TCG_TARGET_HAS_ctpop_i64 + case INDEX_op_ctpop_i64: + tci_args_rr(insn, &r0, &r1); + regs[r0] = ctpop64(regs[r1]); + break; +#endif +#if TCG_TARGET_HAS_mulu2_i64 + case INDEX_op_mulu2_i64: + tci_args_rrrr(insn, &r0, &r1, &r2, &r3); + mulu64(®s[r0], ®s[r1], regs[r2], regs[r3]); + break; +#endif +#if TCG_TARGET_HAS_muls2_i64 + case INDEX_op_muls2_i64: + tci_args_rrrr(insn, &r0, &r1, &r2, &r3); + muls64(®s[r0], ®s[r1], regs[r2], regs[r3]); + break; +#endif +#if TCG_TARGET_HAS_add2_i64 + case INDEX_op_add2_i64: + tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); + T1 = regs[r2] + regs[r4]; + T2 = regs[r3] + regs[r5] + (T1 < regs[r2]); + regs[r0] = T1; + regs[r1] = T2; + break; +#endif +#if TCG_TARGET_HAS_add2_i64 + case INDEX_op_sub2_i64: + tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); + T1 = regs[r2] - regs[r4]; + T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]); + regs[r0] = T1; + regs[r1] = T2; + break; +#endif /* Shift/rotate operations (64 bit). */ case INDEX_op_shl_i64: - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = regs[r1] << (regs[r2] & 63); break; case INDEX_op_shr_i64: - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = regs[r1] >> (regs[r2] & 63); break; case INDEX_op_sar_i64: - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63); break; #if TCG_TARGET_HAS_rot_i64 case INDEX_op_rotl_i64: - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = rol64(regs[r1], regs[r2] & 63); break; case INDEX_op_rotr_i64: - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = ror64(regs[r1], regs[r2] & 63); break; #endif #if TCG_TARGET_HAS_deposit_i64 case INDEX_op_deposit_i64: - tci_args_rrrbb(&tb_ptr, &r0, &r1, &r2, &pos, &len); + tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); regs[r0] = deposit64(regs[r1], pos, len, regs[r2]); break; #endif +#if TCG_TARGET_HAS_extract_i64 + case INDEX_op_extract_i64: + tci_args_rrbb(insn, &r0, &r1, &pos, &len); + regs[r0] = extract64(regs[r1], pos, len); + break; +#endif +#if TCG_TARGET_HAS_sextract_i64 + case INDEX_op_sextract_i64: + tci_args_rrbb(insn, &r0, &r1, &pos, &len); + regs[r0] = sextract64(regs[r1], pos, len); + break; +#endif case INDEX_op_brcond_i64: - tci_args_rrcl(&tb_ptr, &r0, &r1, &condition, &ptr); - if (tci_compare64(regs[r0], regs[r1], condition)) { + tci_args_rl(insn, tb_ptr, &r0, &ptr); + if (regs[r0]) { tb_ptr = ptr; } break; case INDEX_op_ext32s_i64: case INDEX_op_ext_i32_i64: - tci_args_rr(&tb_ptr, &r0, &r1); + tci_args_rr(insn, &r0, &r1); regs[r0] = (int32_t)regs[r1]; break; case INDEX_op_ext32u_i64: case INDEX_op_extu_i32_i64: - tci_args_rr(&tb_ptr, &r0, &r1); + tci_args_rr(insn, &r0, &r1); regs[r0] = (uint32_t)regs[r1]; break; #if TCG_TARGET_HAS_bswap64_i64 case INDEX_op_bswap64_i64: - tci_args_rr(&tb_ptr, &r0, &r1); + tci_args_rr(insn, &r0, &r1); regs[r0] = bswap64(regs[r1]); break; #endif @@ -875,104 +1005,48 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, /* QEMU specific operations. */ case INDEX_op_exit_tb: - tci_args_l(&tb_ptr, &ptr); + tci_args_l(insn, tb_ptr, &ptr); return (uintptr_t)ptr; case INDEX_op_goto_tb: - tci_args_l(&tb_ptr, &ptr); + tci_args_l(insn, tb_ptr, &ptr); tb_ptr = *(void **)ptr; break; + case INDEX_op_goto_ptr: + tci_args_r(insn, &r0); + ptr = (void *)regs[r0]; + if (!ptr) { + return 0; + } + tb_ptr = ptr; + break; + case INDEX_op_qemu_ld_i32: if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { - tci_args_rrm(&tb_ptr, &r0, &r1, &oi); + tci_args_rrm(insn, &r0, &r1, &oi); taddr = regs[r1]; } else { - tci_args_rrrm(&tb_ptr, &r0, &r1, &r2, &oi); + tci_args_rrrm(insn, &r0, &r1, &r2, &oi); taddr = tci_uint64(regs[r2], regs[r1]); } - switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) { - case MO_UB: - tmp32 = qemu_ld_ub; - break; - case MO_SB: - tmp32 = (int8_t)qemu_ld_ub; - break; - case MO_LEUW: - tmp32 = qemu_ld_leuw; - break; - case MO_LESW: - tmp32 = (int16_t)qemu_ld_leuw; - break; - case MO_LEUL: - tmp32 = qemu_ld_leul; - break; - case MO_BEUW: - tmp32 = qemu_ld_beuw; - break; - case MO_BESW: - tmp32 = (int16_t)qemu_ld_beuw; - break; - case MO_BEUL: - tmp32 = qemu_ld_beul; - break; - default: - g_assert_not_reached(); - } + tmp32 = tci_qemu_ld(env, taddr, oi, tb_ptr); regs[r0] = tmp32; break; case INDEX_op_qemu_ld_i64: if (TCG_TARGET_REG_BITS == 64) { - tci_args_rrm(&tb_ptr, &r0, &r1, &oi); + tci_args_rrm(insn, &r0, &r1, &oi); taddr = regs[r1]; } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { - tci_args_rrrm(&tb_ptr, &r0, &r1, &r2, &oi); + tci_args_rrrm(insn, &r0, &r1, &r2, &oi); taddr = regs[r2]; } else { - tci_args_rrrrm(&tb_ptr, &r0, &r1, &r2, &r3, &oi); + tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); taddr = tci_uint64(regs[r3], regs[r2]); + oi = regs[r4]; } - switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) { - case MO_UB: - tmp64 = qemu_ld_ub; - break; - case MO_SB: - tmp64 = (int8_t)qemu_ld_ub; - break; - case MO_LEUW: - tmp64 = qemu_ld_leuw; - break; - case MO_LESW: - tmp64 = (int16_t)qemu_ld_leuw; - break; - case MO_LEUL: - tmp64 = qemu_ld_leul; - break; - case MO_LESL: - tmp64 = (int32_t)qemu_ld_leul; - break; - case MO_LEQ: - tmp64 = qemu_ld_leq; - break; - case MO_BEUW: - tmp64 = qemu_ld_beuw; - break; - case MO_BESW: - tmp64 = (int16_t)qemu_ld_beuw; - break; - case MO_BEUL: - tmp64 = qemu_ld_beul; - break; - case MO_BESL: - tmp64 = (int32_t)qemu_ld_beul; - break; - case MO_BEQ: - tmp64 = qemu_ld_beq; - break; - default: - g_assert_not_reached(); - } + tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr); if (TCG_TARGET_REG_BITS == 32) { tci_write_reg64(regs, r1, r0, tmp64); } else { @@ -982,74 +1056,33 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, case INDEX_op_qemu_st_i32: if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { - tci_args_rrm(&tb_ptr, &r0, &r1, &oi); + tci_args_rrm(insn, &r0, &r1, &oi); taddr = regs[r1]; } else { - tci_args_rrrm(&tb_ptr, &r0, &r1, &r2, &oi); + tci_args_rrrm(insn, &r0, &r1, &r2, &oi); taddr = tci_uint64(regs[r2], regs[r1]); } tmp32 = regs[r0]; - switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) { - case MO_UB: - qemu_st_b(tmp32); - break; - case MO_LEUW: - qemu_st_lew(tmp32); - break; - case MO_LEUL: - qemu_st_lel(tmp32); - break; - case MO_BEUW: - qemu_st_bew(tmp32); - break; - case MO_BEUL: - qemu_st_bel(tmp32); - break; - default: - g_assert_not_reached(); - } + tci_qemu_st(env, taddr, tmp32, oi, tb_ptr); break; case INDEX_op_qemu_st_i64: if (TCG_TARGET_REG_BITS == 64) { - tci_args_rrm(&tb_ptr, &r0, &r1, &oi); + tci_args_rrm(insn, &r0, &r1, &oi); taddr = regs[r1]; tmp64 = regs[r0]; } else { if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { - tci_args_rrrm(&tb_ptr, &r0, &r1, &r2, &oi); + tci_args_rrrm(insn, &r0, &r1, &r2, &oi); taddr = regs[r2]; } else { - tci_args_rrrrm(&tb_ptr, &r0, &r1, &r2, &r3, &oi); + tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); taddr = tci_uint64(regs[r3], regs[r2]); + oi = regs[r4]; } tmp64 = tci_uint64(regs[r1], regs[r0]); } - switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) { - case MO_UB: - qemu_st_b(tmp64); - break; - case MO_LEUW: - qemu_st_lew(tmp64); - break; - case MO_LEUL: - qemu_st_lel(tmp64); - break; - case MO_LEQ: - qemu_st_leq(tmp64); - break; - case MO_BEUW: - qemu_st_bew(tmp64); - break; - case MO_BEUL: - qemu_st_bel(tmp64); - break; - case MO_BEQ: - qemu_st_beq(tmp64); - break; - default: - g_assert_not_reached(); - } + tci_qemu_st(env, taddr, tmp64, oi, tb_ptr); break; case INDEX_op_mb: @@ -1105,82 +1138,71 @@ static const char *str_c(TCGCond c) /* Disassemble TCI bytecode. */ int print_insn_tci(bfd_vma addr, disassemble_info *info) { - uint8_t buf[256]; - int length, status; + const uint32_t *tb_ptr = (const void *)(uintptr_t)addr; const TCGOpDef *def; const char *op_name; + uint32_t insn; TCGOpcode op; - TCGReg r0, r1, r2, r3; -#if TCG_TARGET_REG_BITS == 32 - TCGReg r4, r5; -#endif + TCGReg r0, r1, r2, r3, r4, r5; tcg_target_ulong i1; int32_t s2; TCGCond c; TCGMemOpIdx oi; uint8_t pos, len; void *ptr; - const uint8_t *tb_ptr; - - status = info->read_memory_func(addr, buf, 2, info); - if (status != 0) { - info->memory_error_func(status, addr, info); - return -1; - } - op = buf[0]; - length = buf[1]; - if (length < 2) { - info->fprintf_func(info->stream, "invalid length %d", length); - return 1; - } + /* TCI is always the host, so we don't need to load indirect. */ + insn = *tb_ptr++; - status = info->read_memory_func(addr + 2, buf + 2, length - 2, info); - if (status != 0) { - info->memory_error_func(status, addr + 2, info); - return -1; - } + info->fprintf_func(info->stream, "%08x ", insn); + op = extract32(insn, 0, 8); def = &tcg_op_defs[op]; op_name = def->name; - tb_ptr = buf + 2; switch (op) { case INDEX_op_br: - case INDEX_op_call: case INDEX_op_exit_tb: case INDEX_op_goto_tb: - tci_args_l(&tb_ptr, &ptr); + tci_args_l(insn, tb_ptr, &ptr); info->fprintf_func(info->stream, "%-12s %p", op_name, ptr); break; + case INDEX_op_goto_ptr: + tci_args_r(insn, &r0); + info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0)); + break; + + case INDEX_op_call: + tci_args_nl(insn, tb_ptr, &len, &ptr); + info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr); + break; + case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: - tci_args_rrcl(&tb_ptr, &r0, &r1, &c, &ptr); - info->fprintf_func(info->stream, "%-12s %s, %s, %s, %p", - op_name, str_r(r0), str_r(r1), str_c(c), ptr); + tci_args_rl(insn, tb_ptr, &r0, &ptr); + info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p", + op_name, str_r(r0), ptr); break; case INDEX_op_setcond_i32: case INDEX_op_setcond_i64: - tci_args_rrrc(&tb_ptr, &r0, &r1, &r2, &c); + tci_args_rrrc(insn, &r0, &r1, &r2, &c); info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c)); break; - case INDEX_op_tci_movi_i32: - tci_args_ri(&tb_ptr, &r0, &i1); + case INDEX_op_tci_movi: + tci_args_ri(insn, &r0, &i1); info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx, op_name, str_r(r0), i1); break; -#if TCG_TARGET_REG_BITS == 64 - case INDEX_op_tci_movi_i64: - tci_args_rI(&tb_ptr, &r0, &i1); - info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx, - op_name, str_r(r0), i1); + case INDEX_op_tci_movl: + tci_args_rl(insn, tb_ptr, &r0, &ptr); + info->fprintf_func(info->stream, "%-12s %s, %p", + op_name, str_r(r0), ptr); break; -#endif case INDEX_op_ld8u_i32: case INDEX_op_ld8u_i64: @@ -1201,7 +1223,7 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info) case INDEX_op_st32_i64: case INDEX_op_st_i32: case INDEX_op_st_i64: - tci_args_rrs(&tb_ptr, &r0, &r1, &s2); + tci_args_rrs(insn, &r0, &r1, &s2); info->fprintf_func(info->stream, "%-12s %s, %s, %d", op_name, str_r(r0), str_r(r1), s2); break; @@ -1228,7 +1250,9 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info) case INDEX_op_not_i64: case INDEX_op_neg_i32: case INDEX_op_neg_i64: - tci_args_rr(&tb_ptr, &r0, &r1); + case INDEX_op_ctpop_i32: + case INDEX_op_ctpop_i64: + tci_args_rr(insn, &r0, &r1); info->fprintf_func(info->stream, "%-12s %s, %s", op_name, str_r(r0), str_r(r1)); break; @@ -1245,6 +1269,16 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info) case INDEX_op_or_i64: case INDEX_op_xor_i32: case INDEX_op_xor_i64: + case INDEX_op_andc_i32: + case INDEX_op_andc_i64: + case INDEX_op_orc_i32: + case INDEX_op_orc_i64: + case INDEX_op_eqv_i32: + case INDEX_op_eqv_i64: + case INDEX_op_nand_i32: + case INDEX_op_nand_i64: + case INDEX_op_nor_i32: + case INDEX_op_nor_i64: case INDEX_op_div_i32: case INDEX_op_div_i64: case INDEX_op_rem_i32: @@ -1263,48 +1297,59 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info) case INDEX_op_rotl_i64: case INDEX_op_rotr_i32: case INDEX_op_rotr_i64: - tci_args_rrr(&tb_ptr, &r0, &r1, &r2); + case INDEX_op_clz_i32: + case INDEX_op_clz_i64: + case INDEX_op_ctz_i32: + case INDEX_op_ctz_i64: + tci_args_rrr(insn, &r0, &r1, &r2); info->fprintf_func(info->stream, "%-12s %s, %s, %s", op_name, str_r(r0), str_r(r1), str_r(r2)); break; case INDEX_op_deposit_i32: case INDEX_op_deposit_i64: - tci_args_rrrbb(&tb_ptr, &r0, &r1, &r2, &pos, &len); + tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d", op_name, str_r(r0), str_r(r1), str_r(r2), pos, len); break; -#if TCG_TARGET_REG_BITS == 32 + case INDEX_op_extract_i32: + case INDEX_op_extract_i64: + case INDEX_op_sextract_i32: + case INDEX_op_sextract_i64: + tci_args_rrbb(insn, &r0, &r1, &pos, &len); + info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d", + op_name, str_r(r0), str_r(r1), pos, len); + break; + + case INDEX_op_movcond_i32: + case INDEX_op_movcond_i64: case INDEX_op_setcond2_i32: - tci_args_rrrrrc(&tb_ptr, &r0, &r1, &r2, &r3, &r4, &c); + tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c); info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", op_name, str_r(r0), str_r(r1), str_r(r2), str_r(r3), str_r(r4), str_c(c)); break; - case INDEX_op_brcond2_i32: - tci_args_rrrrcl(&tb_ptr, &r0, &r1, &r2, &r3, &c, &ptr); - info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %p", - op_name, str_r(r0), str_r(r1), - str_r(r2), str_r(r3), str_c(c), ptr); - break; - case INDEX_op_mulu2_i32: - tci_args_rrrr(&tb_ptr, &r0, &r1, &r2, &r3); + case INDEX_op_mulu2_i64: + case INDEX_op_muls2_i32: + case INDEX_op_muls2_i64: + tci_args_rrrr(insn, &r0, &r1, &r2, &r3); info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", op_name, str_r(r0), str_r(r1), str_r(r2), str_r(r3)); break; case INDEX_op_add2_i32: + case INDEX_op_add2_i64: case INDEX_op_sub2_i32: - tci_args_rrrrrr(&tb_ptr, &r0, &r1, &r2, &r3, &r4, &r5); + case INDEX_op_sub2_i64: + tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", op_name, str_r(r0), str_r(r1), str_r(r2), str_r(r3), str_r(r4), str_r(r5)); break; -#endif case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_st_i64: @@ -1317,30 +1362,38 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info) len += DIV_ROUND_UP(TARGET_LONG_BITS, TCG_TARGET_REG_BITS); switch (len) { case 2: - tci_args_rrm(&tb_ptr, &r0, &r1, &oi); + tci_args_rrm(insn, &r0, &r1, &oi); info->fprintf_func(info->stream, "%-12s %s, %s, %x", op_name, str_r(r0), str_r(r1), oi); break; case 3: - tci_args_rrrm(&tb_ptr, &r0, &r1, &r2, &oi); + tci_args_rrrm(insn, &r0, &r1, &r2, &oi); info->fprintf_func(info->stream, "%-12s %s, %s, %s, %x", op_name, str_r(r0), str_r(r1), str_r(r2), oi); break; case 4: - tci_args_rrrrm(&tb_ptr, &r0, &r1, &r2, &r3, &oi); - info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %x", + tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); + info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s", op_name, str_r(r0), str_r(r1), - str_r(r2), str_r(r3), oi); + str_r(r2), str_r(r3), str_r(r4)); break; default: g_assert_not_reached(); } break; + case 0: + /* tcg_out_nop_fill uses zeros */ + if (insn == 0) { + info->fprintf_func(info->stream, "align"); + break; + } + /* fall through */ + default: info->fprintf_func(info->stream, "illegal opcode %d", op); break; } - return length; + return sizeof(insn); } diff --git a/tcg/tci/README b/tcg/tci/README index 9bb7d7a5d3..f72a40a395 100644 --- a/tcg/tci/README +++ b/tcg/tci/README @@ -23,10 +23,12 @@ This is what TCI (Tiny Code Interpreter) does. Like each TCG host frontend, TCI implements the code generator in tcg-target.c.inc, tcg-target.h. Both files are in directory tcg/tci. -The additional file tcg/tci.c adds the interpreter. +The additional file tcg/tci.c adds the interpreter and disassembler. -The bytecode consists of opcodes (same numeric values as those used by -TCG), command length and arguments of variable size and number. +The bytecode consists of opcodes (with only a few exceptions, with +the same same numeric values and semantics as used by TCG), and up +to six arguments packed into a 32-bit integer. See comments in tci.c +for details on the encoding. 3) Usage @@ -39,11 +41,6 @@ suggest using this option. Setting it automatically would need additional code in configure which must be fixed when new native TCG implementations are added. -System emulation should work on any 32 or 64 bit host. -User mode emulation might work. Maybe a new linker script (*.ld) -is needed. Byte order might be wrong (on big endian hosts) -and need fixes in configure. - For hosts with native TCG, the interpreter TCI can be enabled by configure --enable-tcg-interpreter @@ -118,13 +115,6 @@ u1 = linux-user-test works in the interpreter. These opcodes raise a runtime exception, so it is possible to see where code must be added. -* The pseudo code is not optimized and still ugly. For hosts with special - alignment requirements, it needs some fixes (maybe aligned bytecode - would also improve speed for hosts which support byte alignment). - -* A better disassembler for the pseudo code would be nice (a very primitive - disassembler is included in tcg-target.c.inc). - * It might be useful to have a runtime option which selects the native TCG or TCI, so QEMU would have to include two TCGs. Today, selecting TCI is a configure option, so you need two compilations of QEMU. diff --git a/tcg/tci/tcg-target-con-set.h b/tcg/tci/tcg-target-con-set.h index 316730f32c..ae2dc3b844 100644 --- a/tcg/tci/tcg-target-con-set.h +++ b/tcg/tci/tcg-target-con-set.h @@ -9,6 +9,7 @@ * Each operand should be a sequence of constraint letters as defined by * tcg-target-con-str.h; the constraint combination is inclusive or. */ +C_O0_I1(r) C_O0_I2(r, r) C_O0_I3(r, r, r) C_O0_I4(r, r, r, r) diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc index 823ecd5d35..9651e7a8f1 100644 --- a/tcg/tci/tcg-target.c.inc +++ b/tcg/tci/tcg-target.c.inc @@ -22,24 +22,14 @@ * THE SOFTWARE. */ -/* TODO list: - * - See TODO comments in code. - */ - -/* Marker for missing code. */ -#define TODO() \ - do { \ - fprintf(stderr, "TODO %s:%u: %s()\n", \ - __FILE__, __LINE__, __func__); \ - tcg_abort(); \ - } while (0) - -/* Bitfield n...m (in 32 bit value). */ -#define BITS(n, m) (((0xffffffffU << (31 - n)) >> (31 - n + m)) << m) +#include "../tcg-pool.c.inc" static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) { switch (op) { + case INDEX_op_goto_ptr: + return C_O0_I1(r); + case INDEX_op_ld8u_i32: case INDEX_op_ld8s_i32: case INDEX_op_ld16u_i32: @@ -73,6 +63,12 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_bswap32_i32: case INDEX_op_bswap32_i64: case INDEX_op_bswap64_i64: + case INDEX_op_extract_i32: + case INDEX_op_extract_i64: + case INDEX_op_sextract_i32: + case INDEX_op_sextract_i64: + case INDEX_op_ctpop_i32: + case INDEX_op_ctpop_i64: return C_O1_I1(r, r); case INDEX_op_st8_i32: @@ -128,24 +124,37 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_setcond_i64: case INDEX_op_deposit_i32: case INDEX_op_deposit_i64: + case INDEX_op_clz_i32: + case INDEX_op_clz_i64: + case INDEX_op_ctz_i32: + case INDEX_op_ctz_i64: return C_O1_I2(r, r, r); case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: return C_O0_I2(r, r); -#if TCG_TARGET_REG_BITS == 32 - /* TODO: Support R, R, R, R, RI, RI? Will it be faster? */ case INDEX_op_add2_i32: + case INDEX_op_add2_i64: case INDEX_op_sub2_i32: + case INDEX_op_sub2_i64: return C_O2_I4(r, r, r, r, r, r); + +#if TCG_TARGET_REG_BITS == 32 case INDEX_op_brcond2_i32: return C_O0_I4(r, r, r, r); +#endif + case INDEX_op_mulu2_i32: + case INDEX_op_mulu2_i64: + case INDEX_op_muls2_i32: + case INDEX_op_muls2_i64: return C_O2_I2(r, r, r, r); + + case INDEX_op_movcond_i32: + case INDEX_op_movcond_i64: case INDEX_op_setcond2_i32: return C_O1_I4(r, r, r, r, r); -#endif case INDEX_op_qemu_ld_i32: return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS @@ -170,8 +179,6 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) } static const int tcg_target_reg_alloc_order[] = { - TCG_REG_R0, - TCG_REG_R1, TCG_REG_R2, TCG_REG_R3, TCG_REG_R4, @@ -186,29 +193,16 @@ static const int tcg_target_reg_alloc_order[] = { TCG_REG_R13, TCG_REG_R14, TCG_REG_R15, + TCG_REG_R1, + TCG_REG_R0, }; #if MAX_OPC_PARAM_IARGS != 6 # error Fix needed, number of supported input arguments changed! #endif -static const int tcg_target_call_iarg_regs[] = { - TCG_REG_R0, - TCG_REG_R1, - TCG_REG_R2, - TCG_REG_R3, - TCG_REG_R4, - TCG_REG_R5, -#if TCG_TARGET_REG_BITS == 32 - /* 32 bit hosts need 2 * MAX_OPC_PARAM_IARGS registers. */ - TCG_REG_R6, - TCG_REG_R7, - TCG_REG_R8, - TCG_REG_R9, - TCG_REG_R10, - TCG_REG_R11, -#endif -}; +/* No call arguments via registers. All will be stored on the "stack". */ +static const int tcg_target_call_iarg_regs[] = { }; static const int tcg_target_call_oarg_regs[] = { TCG_REG_R0, @@ -241,317 +235,281 @@ static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) { - /* tcg_out_reloc always uses the same type, addend. */ - tcg_debug_assert(type == sizeof(tcg_target_long)); - tcg_debug_assert(addend == 0); - tcg_debug_assert(value != 0); - if (TCG_TARGET_REG_BITS == 32) { - tcg_patch32(code_ptr, value); - } else { - tcg_patch64(code_ptr, value); - } - return true; -} + intptr_t diff = value - (intptr_t)(code_ptr + 1); -/* Write value (native size). */ -static void tcg_out_i(TCGContext *s, tcg_target_ulong v) -{ - if (TCG_TARGET_REG_BITS == 32) { - tcg_out32(s, v); - } else { - tcg_out64(s, v); - } -} - -/* Write opcode. */ -static void tcg_out_op_t(TCGContext *s, TCGOpcode op) -{ - tcg_out8(s, op); - tcg_out8(s, 0); -} - -/* Write register. */ -static void tcg_out_r(TCGContext *s, TCGArg t0) -{ - tcg_debug_assert(t0 < TCG_TARGET_NB_REGS); - tcg_out8(s, t0); -} + tcg_debug_assert(addend == 0); + tcg_debug_assert(type == 20); -/* Write label. */ -static void tci_out_label(TCGContext *s, TCGLabel *label) -{ - if (label->has_value) { - tcg_out_i(s, label->u.value); - tcg_debug_assert(label->u.value); - } else { - tcg_out_reloc(s, s->code_ptr, sizeof(tcg_target_ulong), label, 0); - s->code_ptr += sizeof(tcg_target_ulong); + if (diff == sextract32(diff, 0, type)) { + tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff)); + return true; } + return false; } static void stack_bounds_check(TCGReg base, target_long offset) { if (base == TCG_REG_CALL_STACK) { - tcg_debug_assert(offset < 0); - tcg_debug_assert(offset >= -(CPU_TEMP_BUF_NLONGS * sizeof(long))); + tcg_debug_assert(offset >= 0); + tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE + + TCG_STATIC_FRAME_SIZE)); } } static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0) { - uint8_t *old_code_ptr = s->code_ptr; + tcg_insn_unit insn = 0; - tcg_out_op_t(s, op); - tci_out_label(s, l0); - - old_code_ptr[1] = s->code_ptr - old_code_ptr; + tcg_out_reloc(s, s->code_ptr, 20, l0, 0); + insn = deposit32(insn, 0, 8, op); + tcg_out32(s, insn); } static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0) { - uint8_t *old_code_ptr = s->code_ptr; - - tcg_out_op_t(s, op); - tcg_out_i(s, (uintptr_t)p0); + tcg_insn_unit insn = 0; + intptr_t diff; - old_code_ptr[1] = s->code_ptr - old_code_ptr; + /* Special case for exit_tb: map null -> 0. */ + if (p0 == NULL) { + diff = 0; + } else { + diff = p0 - (void *)(s->code_ptr + 1); + tcg_debug_assert(diff != 0); + if (diff != sextract32(diff, 0, 20)) { + tcg_raise_tb_overflow(s); + } + } + insn = deposit32(insn, 0, 8, op); + insn = deposit32(insn, 12, 20, diff); + tcg_out32(s, insn); } -static void tcg_out_op_v(TCGContext *s, TCGOpcode op) +static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0) { - uint8_t *old_code_ptr = s->code_ptr; + tcg_insn_unit insn = 0; - tcg_out_op_t(s, op); + insn = deposit32(insn, 0, 8, op); + insn = deposit32(insn, 8, 4, r0); + tcg_out32(s, insn); +} - old_code_ptr[1] = s->code_ptr - old_code_ptr; +static void tcg_out_op_v(TCGContext *s, TCGOpcode op) +{ + tcg_out32(s, (uint8_t)op); } static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1) { - uint8_t *old_code_ptr = s->code_ptr; - - tcg_out_op_t(s, op); - tcg_out_r(s, r0); - tcg_out32(s, i1); + tcg_insn_unit insn = 0; - old_code_ptr[1] = s->code_ptr - old_code_ptr; + tcg_debug_assert(i1 == sextract32(i1, 0, 20)); + insn = deposit32(insn, 0, 8, op); + insn = deposit32(insn, 8, 4, r0); + insn = deposit32(insn, 12, 20, i1); + tcg_out32(s, insn); } -#if TCG_TARGET_REG_BITS == 64 -static void tcg_out_op_rI(TCGContext *s, TCGOpcode op, - TCGReg r0, uint64_t i1) +static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1) { - uint8_t *old_code_ptr = s->code_ptr; + tcg_insn_unit insn = 0; - tcg_out_op_t(s, op); - tcg_out_r(s, r0); - tcg_out64(s, i1); - - old_code_ptr[1] = s->code_ptr - old_code_ptr; + tcg_out_reloc(s, s->code_ptr, 20, l1, 0); + insn = deposit32(insn, 0, 8, op); + insn = deposit32(insn, 8, 4, r0); + tcg_out32(s, insn); } -#endif static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1) { - uint8_t *old_code_ptr = s->code_ptr; + tcg_insn_unit insn = 0; - tcg_out_op_t(s, op); - tcg_out_r(s, r0); - tcg_out_r(s, r1); - - old_code_ptr[1] = s->code_ptr - old_code_ptr; + insn = deposit32(insn, 0, 8, op); + insn = deposit32(insn, 8, 4, r0); + insn = deposit32(insn, 12, 4, r1); + tcg_out32(s, insn); } static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1, TCGArg m2) { - uint8_t *old_code_ptr = s->code_ptr; - - tcg_out_op_t(s, op); - tcg_out_r(s, r0); - tcg_out_r(s, r1); - tcg_out32(s, m2); + tcg_insn_unit insn = 0; - old_code_ptr[1] = s->code_ptr - old_code_ptr; + tcg_debug_assert(m2 == extract32(m2, 0, 12)); + insn = deposit32(insn, 0, 8, op); + insn = deposit32(insn, 8, 4, r0); + insn = deposit32(insn, 12, 4, r1); + insn = deposit32(insn, 20, 12, m2); + tcg_out32(s, insn); } static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1, TCGReg r2) { - uint8_t *old_code_ptr = s->code_ptr; + tcg_insn_unit insn = 0; - tcg_out_op_t(s, op); - tcg_out_r(s, r0); - tcg_out_r(s, r1); - tcg_out_r(s, r2); - - old_code_ptr[1] = s->code_ptr - old_code_ptr; + insn = deposit32(insn, 0, 8, op); + insn = deposit32(insn, 8, 4, r0); + insn = deposit32(insn, 12, 4, r1); + insn = deposit32(insn, 16, 4, r2); + tcg_out32(s, insn); } static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1, intptr_t i2) { - uint8_t *old_code_ptr = s->code_ptr; - - tcg_out_op_t(s, op); - tcg_out_r(s, r0); - tcg_out_r(s, r1); - tcg_debug_assert(i2 == (int32_t)i2); - tcg_out32(s, i2); + tcg_insn_unit insn = 0; - old_code_ptr[1] = s->code_ptr - old_code_ptr; + tcg_debug_assert(i2 == sextract32(i2, 0, 16)); + insn = deposit32(insn, 0, 8, op); + insn = deposit32(insn, 8, 4, r0); + insn = deposit32(insn, 12, 4, r1); + insn = deposit32(insn, 16, 16, i2); + tcg_out32(s, insn); } -static void tcg_out_op_rrcl(TCGContext *s, TCGOpcode op, - TCGReg r0, TCGReg r1, TCGCond c2, TCGLabel *l3) +static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0, + TCGReg r1, uint8_t b2, uint8_t b3) { - uint8_t *old_code_ptr = s->code_ptr; + tcg_insn_unit insn = 0; - tcg_out_op_t(s, op); - tcg_out_r(s, r0); - tcg_out_r(s, r1); - tcg_out8(s, c2); - tci_out_label(s, l3); - - old_code_ptr[1] = s->code_ptr - old_code_ptr; + tcg_debug_assert(b2 == extract32(b2, 0, 6)); + tcg_debug_assert(b3 == extract32(b3, 0, 6)); + insn = deposit32(insn, 0, 8, op); + insn = deposit32(insn, 8, 4, r0); + insn = deposit32(insn, 12, 4, r1); + insn = deposit32(insn, 16, 6, b2); + insn = deposit32(insn, 22, 6, b3); + tcg_out32(s, insn); } static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3) { - uint8_t *old_code_ptr = s->code_ptr; - - tcg_out_op_t(s, op); - tcg_out_r(s, r0); - tcg_out_r(s, r1); - tcg_out_r(s, r2); - tcg_out8(s, c3); + tcg_insn_unit insn = 0; - old_code_ptr[1] = s->code_ptr - old_code_ptr; + insn = deposit32(insn, 0, 8, op); + insn = deposit32(insn, 8, 4, r0); + insn = deposit32(insn, 12, 4, r1); + insn = deposit32(insn, 16, 4, r2); + insn = deposit32(insn, 20, 4, c3); + tcg_out32(s, insn); } static void tcg_out_op_rrrm(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1, TCGReg r2, TCGArg m3) { - uint8_t *old_code_ptr = s->code_ptr; + tcg_insn_unit insn = 0; - tcg_out_op_t(s, op); - tcg_out_r(s, r0); - tcg_out_r(s, r1); - tcg_out_r(s, r2); - tcg_out32(s, m3); - - old_code_ptr[1] = s->code_ptr - old_code_ptr; + tcg_debug_assert(m3 == extract32(m3, 0, 12)); + insn = deposit32(insn, 0, 8, op); + insn = deposit32(insn, 8, 4, r0); + insn = deposit32(insn, 12, 4, r1); + insn = deposit32(insn, 16, 4, r2); + insn = deposit32(insn, 20, 12, m3); + tcg_out32(s, insn); } static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4) { - uint8_t *old_code_ptr = s->code_ptr; - - tcg_out_op_t(s, op); - tcg_out_r(s, r0); - tcg_out_r(s, r1); - tcg_out_r(s, r2); - tcg_out8(s, b3); - tcg_out8(s, b4); + tcg_insn_unit insn = 0; - old_code_ptr[1] = s->code_ptr - old_code_ptr; + tcg_debug_assert(b3 == extract32(b3, 0, 6)); + tcg_debug_assert(b4 == extract32(b4, 0, 6)); + insn = deposit32(insn, 0, 8, op); + insn = deposit32(insn, 8, 4, r0); + insn = deposit32(insn, 12, 4, r1); + insn = deposit32(insn, 16, 4, r2); + insn = deposit32(insn, 20, 6, b3); + insn = deposit32(insn, 26, 6, b4); + tcg_out32(s, insn); } -static void tcg_out_op_rrrrm(TCGContext *s, TCGOpcode op, TCGReg r0, - TCGReg r1, TCGReg r2, TCGReg r3, TCGArg m4) +static void tcg_out_op_rrrrr(TCGContext *s, TCGOpcode op, TCGReg r0, + TCGReg r1, TCGReg r2, TCGReg r3, TCGReg r4) { - uint8_t *old_code_ptr = s->code_ptr; + tcg_insn_unit insn = 0; - tcg_out_op_t(s, op); - tcg_out_r(s, r0); - tcg_out_r(s, r1); - tcg_out_r(s, r2); - tcg_out_r(s, r3); - tcg_out32(s, m4); - - old_code_ptr[1] = s->code_ptr - old_code_ptr; + insn = deposit32(insn, 0, 8, op); + insn = deposit32(insn, 8, 4, r0); + insn = deposit32(insn, 12, 4, r1); + insn = deposit32(insn, 16, 4, r2); + insn = deposit32(insn, 20, 4, r3); + insn = deposit32(insn, 24, 4, r4); + tcg_out32(s, insn); } -#if TCG_TARGET_REG_BITS == 32 static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3) { - uint8_t *old_code_ptr = s->code_ptr; - - tcg_out_op_t(s, op); - tcg_out_r(s, r0); - tcg_out_r(s, r1); - tcg_out_r(s, r2); - tcg_out_r(s, r3); - - old_code_ptr[1] = s->code_ptr - old_code_ptr; -} - -static void tcg_out_op_rrrrcl(TCGContext *s, TCGOpcode op, - TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3, - TCGCond c4, TCGLabel *l5) -{ - uint8_t *old_code_ptr = s->code_ptr; + tcg_insn_unit insn = 0; - tcg_out_op_t(s, op); - tcg_out_r(s, r0); - tcg_out_r(s, r1); - tcg_out_r(s, r2); - tcg_out_r(s, r3); - tcg_out8(s, c4); - tci_out_label(s, l5); - - old_code_ptr[1] = s->code_ptr - old_code_ptr; + insn = deposit32(insn, 0, 8, op); + insn = deposit32(insn, 8, 4, r0); + insn = deposit32(insn, 12, 4, r1); + insn = deposit32(insn, 16, 4, r2); + insn = deposit32(insn, 20, 4, r3); + tcg_out32(s, insn); } static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3, TCGReg r4, TCGCond c5) { - uint8_t *old_code_ptr = s->code_ptr; - - tcg_out_op_t(s, op); - tcg_out_r(s, r0); - tcg_out_r(s, r1); - tcg_out_r(s, r2); - tcg_out_r(s, r3); - tcg_out_r(s, r4); - tcg_out8(s, c5); + tcg_insn_unit insn = 0; - old_code_ptr[1] = s->code_ptr - old_code_ptr; + insn = deposit32(insn, 0, 8, op); + insn = deposit32(insn, 8, 4, r0); + insn = deposit32(insn, 12, 4, r1); + insn = deposit32(insn, 16, 4, r2); + insn = deposit32(insn, 20, 4, r3); + insn = deposit32(insn, 24, 4, r4); + insn = deposit32(insn, 28, 4, c5); + tcg_out32(s, insn); } static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3, TCGReg r4, TCGReg r5) { - uint8_t *old_code_ptr = s->code_ptr; + tcg_insn_unit insn = 0; - tcg_out_op_t(s, op); - tcg_out_r(s, r0); - tcg_out_r(s, r1); - tcg_out_r(s, r2); - tcg_out_r(s, r3); - tcg_out_r(s, r4); - tcg_out_r(s, r5); + insn = deposit32(insn, 0, 8, op); + insn = deposit32(insn, 8, 4, r0); + insn = deposit32(insn, 12, 4, r1); + insn = deposit32(insn, 16, 4, r2); + insn = deposit32(insn, 20, 4, r3); + insn = deposit32(insn, 24, 4, r4); + insn = deposit32(insn, 28, 4, r5); + tcg_out32(s, insn); +} - old_code_ptr[1] = s->code_ptr - old_code_ptr; +static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val, + TCGReg base, intptr_t offset) +{ + stack_bounds_check(base, offset); + if (offset != sextract32(offset, 0, 16)) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset); + tcg_out_op_rrr(s, (TCG_TARGET_REG_BITS == 32 + ? INDEX_op_add_i32 : INDEX_op_add_i64), + TCG_REG_TMP, TCG_REG_TMP, base); + base = TCG_REG_TMP; + offset = 0; + } + tcg_out_op_rrs(s, op, val, base, offset); } -#endif static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base, intptr_t offset) { - stack_bounds_check(base, offset); switch (type) { case TCG_TYPE_I32: - tcg_out_op_rrs(s, INDEX_op_ld_i32, val, base, offset); + tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset); break; #if TCG_TARGET_REG_BITS == 64 case TCG_TYPE_I64: - tcg_out_op_rrs(s, INDEX_op_ld_i64, val, base, offset); + tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset); break; #endif default: @@ -581,24 +539,46 @@ static void tcg_out_movi(TCGContext *s, TCGType type, { switch (type) { case TCG_TYPE_I32: - tcg_out_op_ri(s, INDEX_op_tci_movi_i32, ret, arg); - break; #if TCG_TARGET_REG_BITS == 64 + arg = (int32_t)arg; + /* fall through */ case TCG_TYPE_I64: - tcg_out_op_rI(s, INDEX_op_tci_movi_i64, ret, arg); - break; #endif + break; default: g_assert_not_reached(); } + + if (arg == sextract32(arg, 0, 20)) { + tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg); + } else { + tcg_insn_unit insn = 0; + + new_pool_label(s, arg, 20, s->code_ptr, 0); + insn = deposit32(insn, 0, 8, INDEX_op_tci_movl); + insn = deposit32(insn, 8, 4, ret); + tcg_out32(s, insn); + } } -static inline void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg) +static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func, + ffi_cif *cif) { - uint8_t *old_code_ptr = s->code_ptr; - tcg_out_op_t(s, INDEX_op_call); - tcg_out_i(s, (uintptr_t)arg); - old_code_ptr[1] = s->code_ptr - old_code_ptr; + tcg_insn_unit insn = 0; + uint8_t which; + + if (cif->rtype == &ffi_type_void) { + which = 0; + } else if (cif->rtype->size == 4) { + which = 1; + } else { + tcg_debug_assert(cif->rtype->size == 8); + which = 2; + } + new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif); + insn = deposit32(insn, 0, 8, INDEX_op_call); + insn = deposit32(insn, 8, 4, which); + tcg_out32(s, insn); } #if TCG_TARGET_REG_BITS == 64 @@ -629,6 +609,10 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, set_jmp_reset_offset(s, args[0]); break; + case INDEX_op_goto_ptr: + tcg_out_op_r(s, opc, args[0]); + break; + case INDEX_op_br: tcg_out_op_l(s, opc, arg_label(args[0])); break; @@ -637,12 +621,11 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]); break; -#if TCG_TARGET_REG_BITS == 32 + CASE_32_64(movcond) case INDEX_op_setcond2_i32: tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2], args[3], args[4], args[5]); break; -#endif CASE_32_64(ld8u) CASE_32_64(ld8s) @@ -657,8 +640,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_st_i32: CASE_64(st32) CASE_64(st) - stack_bounds_check(args[1], args[2]); - tcg_out_op_rrs(s, opc, args[0], args[1], args[2]); + tcg_out_ldst(s, opc, args[0], args[1], args[2]); break; CASE_32_64(add) @@ -681,6 +663,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, CASE_32_64(divu) /* Optional (TCG_TARGET_HAS_div_*). */ CASE_32_64(rem) /* Optional (TCG_TARGET_HAS_div_*). */ CASE_32_64(remu) /* Optional (TCG_TARGET_HAS_div_*). */ + CASE_32_64(clz) /* Optional (TCG_TARGET_HAS_clz_*). */ + CASE_32_64(ctz) /* Optional (TCG_TARGET_HAS_ctz_*). */ tcg_out_op_rrr(s, opc, args[0], args[1], args[2]); break; @@ -696,8 +680,24 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, } break; + CASE_32_64(extract) /* Optional (TCG_TARGET_HAS_extract_*). */ + CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */ + { + TCGArg pos = args[2], len = args[3]; + TCGArg max = tcg_op_defs[opc].flags & TCG_OPF_64BIT ? 64 : 32; + + tcg_debug_assert(pos < max); + tcg_debug_assert(pos + len <= max); + + tcg_out_op_rrbb(s, opc, args[0], args[1], pos, len); + } + break; + CASE_32_64(brcond) - tcg_out_op_rrcl(s, opc, args[0], args[1], args[2], arg_label(args[3])); + tcg_out_op_rrrc(s, (opc == INDEX_op_brcond_i32 + ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64), + TCG_REG_TMP, args[0], args[1], args[2]); + tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3])); break; CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */ @@ -713,23 +713,28 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, CASE_32_64(bswap16) /* Optional (TCG_TARGET_HAS_bswap16_*). */ CASE_32_64(bswap32) /* Optional (TCG_TARGET_HAS_bswap32_*). */ CASE_64(bswap64) /* Optional (TCG_TARGET_HAS_bswap64_i64). */ + CASE_32_64(ctpop) /* Optional (TCG_TARGET_HAS_ctpop_*). */ tcg_out_op_rr(s, opc, args[0], args[1]); break; -#if TCG_TARGET_REG_BITS == 32 - case INDEX_op_add2_i32: - case INDEX_op_sub2_i32: + CASE_32_64(add2) + CASE_32_64(sub2) tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2], args[3], args[4], args[5]); break; + +#if TCG_TARGET_REG_BITS == 32 case INDEX_op_brcond2_i32: - tcg_out_op_rrrrcl(s, opc, args[0], args[1], args[2], - args[3], args[4], arg_label(args[5])); + tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP, + args[0], args[1], args[2], args[3], args[4]); + tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5])); break; - case INDEX_op_mulu2_i32: +#endif + + CASE_32_64(mulu2) + CASE_32_64(muls2) tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]); break; -#endif case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_st_i32: @@ -747,8 +752,9 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { tcg_out_op_rrrm(s, opc, args[0], args[1], args[2], args[3]); } else { - tcg_out_op_rrrrm(s, opc, args[0], args[1], - args[2], args[3], args[4]); + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[4]); + tcg_out_op_rrrrr(s, opc, args[0], args[1], + args[2], args[3], TCG_REG_TMP); } break; @@ -794,6 +800,11 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) return ct & TCG_CT_CONST; } +static void tcg_out_nop_fill(tcg_insn_unit *p, int count) +{ + memset(p, 0, sizeof(*p) * count); +} + static void tcg_target_init(TCGContext *s) { #if defined(CONFIG_DEBUG_TCG_INTERPRETER) @@ -810,17 +821,22 @@ static void tcg_target_init(TCGContext *s) tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1; /* Registers available for 64 bit operations. */ tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1; - /* TODO: Which registers should be set here? */ - tcg_target_call_clobber_regs = BIT(TCG_TARGET_NB_REGS) - 1; + /* + * The interpreter "registers" are in the local stack frame and + * cannot be clobbered by the called helper functions. However, + * the interpreter assumes a 64-bit return value and assigns to + * the return value registers. + */ + tcg_target_call_clobber_regs = + MAKE_64BIT_MASK(TCG_REG_R0, 64 / TCG_TARGET_REG_BITS); s->reserved_regs = 0; + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); - /* We use negative offsets from "sp" so that we can distinguish - stores that might pretend to be call arguments. */ - tcg_set_frame(s, TCG_REG_CALL_STACK, - -CPU_TEMP_BUF_NLONGS * sizeof(long), - CPU_TEMP_BUF_NLONGS * sizeof(long)); + /* The call arguments come first, followed by the temp storage. */ + tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, + TCG_STATIC_FRAME_SIZE); } /* Generate global QEMU prologue and epilogue code. */ diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h index d0b5f3fa64..7b6089f304 100644 --- a/tcg/tci/tcg-target.h +++ b/tcg/tci/tcg-target.h @@ -41,7 +41,7 @@ #define TCG_TARGET_H #define TCG_TARGET_INTERPRETER 1 -#define TCG_TARGET_INSN_UNIT_SIZE 1 +#define TCG_TARGET_INSN_UNIT_SIZE 4 #define TCG_TARGET_TLB_DISPLACEMENT_BITS 32 #define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) @@ -68,26 +68,26 @@ #define TCG_TARGET_HAS_ext16s_i32 1 #define TCG_TARGET_HAS_ext8u_i32 1 #define TCG_TARGET_HAS_ext16u_i32 1 -#define TCG_TARGET_HAS_andc_i32 0 +#define TCG_TARGET_HAS_andc_i32 1 #define TCG_TARGET_HAS_deposit_i32 1 -#define TCG_TARGET_HAS_extract_i32 0 -#define TCG_TARGET_HAS_sextract_i32 0 +#define TCG_TARGET_HAS_extract_i32 1 +#define TCG_TARGET_HAS_sextract_i32 1 #define TCG_TARGET_HAS_extract2_i32 0 -#define TCG_TARGET_HAS_eqv_i32 0 -#define TCG_TARGET_HAS_nand_i32 0 -#define TCG_TARGET_HAS_nor_i32 0 -#define TCG_TARGET_HAS_clz_i32 0 -#define TCG_TARGET_HAS_ctz_i32 0 -#define TCG_TARGET_HAS_ctpop_i32 0 +#define TCG_TARGET_HAS_eqv_i32 1 +#define TCG_TARGET_HAS_nand_i32 1 +#define TCG_TARGET_HAS_nor_i32 1 +#define TCG_TARGET_HAS_clz_i32 1 +#define TCG_TARGET_HAS_ctz_i32 1 +#define TCG_TARGET_HAS_ctpop_i32 1 #define TCG_TARGET_HAS_neg_i32 1 #define TCG_TARGET_HAS_not_i32 1 -#define TCG_TARGET_HAS_orc_i32 0 +#define TCG_TARGET_HAS_orc_i32 1 #define TCG_TARGET_HAS_rot_i32 1 -#define TCG_TARGET_HAS_movcond_i32 0 -#define TCG_TARGET_HAS_muls2_i32 0 +#define TCG_TARGET_HAS_movcond_i32 1 +#define TCG_TARGET_HAS_muls2_i32 1 #define TCG_TARGET_HAS_muluh_i32 0 #define TCG_TARGET_HAS_mulsh_i32 0 -#define TCG_TARGET_HAS_goto_ptr 0 +#define TCG_TARGET_HAS_goto_ptr 1 #define TCG_TARGET_HAS_direct_jump 0 #define TCG_TARGET_HAS_qemu_st8_i32 0 @@ -98,8 +98,8 @@ #define TCG_TARGET_HAS_bswap32_i64 1 #define TCG_TARGET_HAS_bswap64_i64 1 #define TCG_TARGET_HAS_deposit_i64 1 -#define TCG_TARGET_HAS_extract_i64 0 -#define TCG_TARGET_HAS_sextract_i64 0 +#define TCG_TARGET_HAS_extract_i64 1 +#define TCG_TARGET_HAS_sextract_i64 1 #define TCG_TARGET_HAS_extract2_i64 0 #define TCG_TARGET_HAS_div_i64 1 #define TCG_TARGET_HAS_rem_i64 1 @@ -109,25 +109,25 @@ #define TCG_TARGET_HAS_ext8u_i64 1 #define TCG_TARGET_HAS_ext16u_i64 1 #define TCG_TARGET_HAS_ext32u_i64 1 -#define TCG_TARGET_HAS_andc_i64 0 -#define TCG_TARGET_HAS_eqv_i64 0 -#define TCG_TARGET_HAS_nand_i64 0 -#define TCG_TARGET_HAS_nor_i64 0 -#define TCG_TARGET_HAS_clz_i64 0 -#define TCG_TARGET_HAS_ctz_i64 0 -#define TCG_TARGET_HAS_ctpop_i64 0 +#define TCG_TARGET_HAS_andc_i64 1 +#define TCG_TARGET_HAS_eqv_i64 1 +#define TCG_TARGET_HAS_nand_i64 1 +#define TCG_TARGET_HAS_nor_i64 1 +#define TCG_TARGET_HAS_clz_i64 1 +#define TCG_TARGET_HAS_ctz_i64 1 +#define TCG_TARGET_HAS_ctpop_i64 1 #define TCG_TARGET_HAS_neg_i64 1 #define TCG_TARGET_HAS_not_i64 1 -#define TCG_TARGET_HAS_orc_i64 0 +#define TCG_TARGET_HAS_orc_i64 1 #define TCG_TARGET_HAS_rot_i64 1 -#define TCG_TARGET_HAS_movcond_i64 0 -#define TCG_TARGET_HAS_muls2_i64 0 -#define TCG_TARGET_HAS_add2_i32 0 -#define TCG_TARGET_HAS_sub2_i32 0 -#define TCG_TARGET_HAS_mulu2_i32 0 -#define TCG_TARGET_HAS_add2_i64 0 -#define TCG_TARGET_HAS_sub2_i64 0 -#define TCG_TARGET_HAS_mulu2_i64 0 +#define TCG_TARGET_HAS_movcond_i64 1 +#define TCG_TARGET_HAS_muls2_i64 1 +#define TCG_TARGET_HAS_add2_i32 1 +#define TCG_TARGET_HAS_sub2_i32 1 +#define TCG_TARGET_HAS_mulu2_i32 1 +#define TCG_TARGET_HAS_add2_i64 1 +#define TCG_TARGET_HAS_sub2_i64 1 +#define TCG_TARGET_HAS_mulu2_i64 1 #define TCG_TARGET_HAS_muluh_i64 0 #define TCG_TARGET_HAS_mulsh_i64 0 #else @@ -156,15 +156,17 @@ typedef enum { TCG_REG_R14, TCG_REG_R15, + TCG_REG_TMP = TCG_REG_R13, TCG_AREG0 = TCG_REG_R14, TCG_REG_CALL_STACK = TCG_REG_R15, } TCGReg; /* Used for function call generation. */ #define TCG_TARGET_CALL_STACK_OFFSET 0 -#define TCG_TARGET_STACK_ALIGN 16 +#define TCG_TARGET_STACK_ALIGN 8 #define HAVE_TCG_QEMU_TB_EXEC +#define TCG_TARGET_NEED_POOL_LABELS /* We could notice __i386__ or __s390x__ and reduce the barriers depending on the host. But if you want performance, you use the normal backend. diff --git a/tests/docker/dockerfiles/alpine.docker b/tests/docker/dockerfiles/alpine.docker index 7eeecacc46..7e6997e301 100644 --- a/tests/docker/dockerfiles/alpine.docker +++ b/tests/docker/dockerfiles/alpine.docker @@ -22,6 +22,7 @@ ENV PACKAGES \ libaio-dev \ libbpf-dev \ libcap-ng-dev \ + libffi-dev \ libjpeg-turbo-dev \ libnfs-dev \ libpng-dev \ diff --git a/tests/docker/dockerfiles/centos8.docker b/tests/docker/dockerfiles/centos8.docker index efc1349cc8..03e0440e03 100644 --- a/tests/docker/dockerfiles/centos8.docker +++ b/tests/docker/dockerfiles/centos8.docker @@ -17,6 +17,7 @@ ENV PACKAGES \ libbpf-devel \ libepoxy-devel \ libfdt-devel \ + libffi-devel \ libgcrypt-devel \ lzo-devel \ make \ diff --git a/tests/docker/dockerfiles/debian10.docker b/tests/docker/dockerfiles/debian10.docker index 63cf835ec5..4ffe47671e 100644 --- a/tests/docker/dockerfiles/debian10.docker +++ b/tests/docker/dockerfiles/debian10.docker @@ -26,6 +26,7 @@ RUN apt update && \ gdb-multiarch \ gettext \ git \ + libffi-dev \ libncurses5-dev \ ninja-build \ pkg-config \ diff --git a/tests/docker/dockerfiles/fedora-i386-cross.docker b/tests/docker/dockerfiles/fedora-i386-cross.docker index 66cdb06c19..8004fd8ee5 100644 --- a/tests/docker/dockerfiles/fedora-i386-cross.docker +++ b/tests/docker/dockerfiles/fedora-i386-cross.docker @@ -6,6 +6,7 @@ ENV PACKAGES \ findutils \ gcc \ git \ + libffi-devel.i686 \ libtasn1-devel.i686 \ libzstd-devel.i686 \ make \ diff --git a/tests/docker/dockerfiles/fedora-win32-cross.docker b/tests/docker/dockerfiles/fedora-win32-cross.docker index 3733df63e9..a638afb525 100644 --- a/tests/docker/dockerfiles/fedora-win32-cross.docker +++ b/tests/docker/dockerfiles/fedora-win32-cross.docker @@ -19,6 +19,7 @@ ENV PACKAGES \ mingw32-gmp \ mingw32-gnutls \ mingw32-gtk3 \ + mingw32-libffi \ mingw32-libjpeg-turbo \ mingw32-libpng \ mingw32-libtasn1 \ diff --git a/tests/docker/dockerfiles/fedora-win64-cross.docker b/tests/docker/dockerfiles/fedora-win64-cross.docker index 2564ce4979..f53007ac86 100644 --- a/tests/docker/dockerfiles/fedora-win64-cross.docker +++ b/tests/docker/dockerfiles/fedora-win64-cross.docker @@ -18,6 +18,7 @@ ENV PACKAGES \ mingw64-glib2 \ mingw64-gmp \ mingw64-gtk3 \ + mingw64-libffi \ mingw64-libjpeg-turbo \ mingw64-libpng \ mingw64-libtasn1 \ diff --git a/tests/docker/dockerfiles/fedora.docker b/tests/docker/dockerfiles/fedora.docker index 0979c0e1f4..00cac5d61c 100644 --- a/tests/docker/dockerfiles/fedora.docker +++ b/tests/docker/dockerfiles/fedora.docker @@ -33,6 +33,7 @@ ENV PACKAGES \ libepoxy-devel \ libfdt-devel \ libbpf-devel \ + libffi-devel \ libiscsi-devel \ libjpeg-devel \ libpmem-devel \ diff --git a/tests/docker/dockerfiles/ubuntu.docker b/tests/docker/dockerfiles/ubuntu.docker index 98a527361c..24d1647a65 100644 --- a/tests/docker/dockerfiles/ubuntu.docker +++ b/tests/docker/dockerfiles/ubuntu.docker @@ -28,6 +28,7 @@ ENV PACKAGES \ libdrm-dev \ libepoxy-dev \ libfdt-dev \ + libffi-dev \ libgbm-dev \ libgnutls28-dev \ libgtk-3-dev \ diff --git a/tests/docker/dockerfiles/ubuntu1804.docker b/tests/docker/dockerfiles/ubuntu1804.docker index c0d3642507..2f1ec7c42b 100644 --- a/tests/docker/dockerfiles/ubuntu1804.docker +++ b/tests/docker/dockerfiles/ubuntu1804.docker @@ -16,6 +16,7 @@ ENV PACKAGES \ libdrm-dev \ libepoxy-dev \ libfdt-dev \ + libffi-dev \ libgbm-dev \ libgtk-3-dev \ libibverbs-dev \ diff --git a/tests/docker/dockerfiles/ubuntu2004.docker b/tests/docker/dockerfiles/ubuntu2004.docker index f1e0ebad49..fe993fe2a3 100644 --- a/tests/docker/dockerfiles/ubuntu2004.docker +++ b/tests/docker/dockerfiles/ubuntu2004.docker @@ -19,6 +19,7 @@ ENV PACKAGES flex bison \ libdrm-dev \ libepoxy-dev \ libfdt-dev \ + libffi-dev \ libgbm-dev \ libgtk-3-dev \ libibverbs-dev \ diff --git a/tests/qtest/fuzz/generic_fuzz.c b/tests/qtest/fuzz/generic_fuzz.c index cea7d4058e..6c67522717 100644 --- a/tests/qtest/fuzz/generic_fuzz.c +++ b/tests/qtest/fuzz/generic_fuzz.c @@ -841,9 +841,9 @@ static void generic_pre_fuzz(QTestState *s) g_hash_table_iter_init(&iter, fuzzable_memoryregions); while (g_hash_table_iter_next(&iter, (gpointer)&mr, NULL)) { - printf(" * %s (size %lx)\n", + printf(" * %s (size 0x%" PRIx64 ")\n", object_get_canonical_path_component(&(mr->parent_obj)), - (uint64_t)mr->size); + memory_region_size(mr)); } if (!g_hash_table_size(fuzzable_memoryregions)) { diff --git a/tests/qtest/fuzz/qos_fuzz.h b/tests/qtest/fuzz/qos_fuzz.h index 477f11b02b..63d8459b71 100644 --- a/tests/qtest/fuzz/qos_fuzz.h +++ b/tests/qtest/fuzz/qos_fuzz.h @@ -10,8 +10,8 @@ * See the COPYING file in the top-level directory. */ -#ifndef _QOS_FUZZ_H_ -#define _QOS_FUZZ_H_ +#ifndef QOS_FUZZ_H +#define QOS_FUZZ_H #include "tests/qtest/fuzz/fuzz.h" #include "tests/qtest/libqos/qgraph.h" diff --git a/tests/tcg/Makefile.target b/tests/tcg/Makefile.target index b29fae4630..63cf1b2573 100644 --- a/tests/tcg/Makefile.target +++ b/tests/tcg/Makefile.target @@ -81,8 +81,10 @@ LDFLAGS= QEMU_OPTS= -# If TCG debugging is enabled things are a lot slower -ifeq ($(CONFIG_DEBUG_TCG),y) +# If TCG debugging, or TCI is enabled things are a lot slower +ifneq ($(CONFIG_TCG_INTERPRETER),) +TIMEOUT=90 +else ifneq ($(CONFIG_DEBUG_TCG),) TIMEOUT=60 else TIMEOUT=15 diff --git a/tests/tcg/minilib/minilib.h b/tests/tcg/minilib/minilib.h index e23361380a..17d0f2f314 100644 --- a/tests/tcg/minilib/minilib.h +++ b/tests/tcg/minilib/minilib.h @@ -9,8 +9,8 @@ * SPDX-License-Identifier: GPL-2.0-only */ -#ifndef _MINILIB_H_ -#define _MINILIB_H_ +#ifndef MINILIB_H +#define MINILIB_H /* * Provided by the individual arch diff --git a/tests/tcg/multiarch/linux-test.c b/tests/tcg/multiarch/linux-test.c index 96bbad5823..c8c6aeddeb 100644 --- a/tests/tcg/multiarch/linux-test.c +++ b/tests/tcg/multiarch/linux-test.c @@ -496,6 +496,15 @@ static void test_signal(void) sigemptyset(&act.sa_mask); act.sa_flags = 0; chk_error(sigaction(SIGSEGV, &act, NULL)); + + if (sigaction(SIGKILL, &act, NULL) == 0) { + error("sigaction(SIGKILL, &act, NULL) must not succeed"); + } + if (sigaction(SIGSTOP, &act, NULL) == 0) { + error("sigaction(SIGSTOP, &act, NULL) must not succeed"); + } + chk_error(sigaction(SIGKILL, NULL, &act)); + chk_error(sigaction(SIGSTOP, NULL, &act)); } #define SHM_SIZE 32768 diff --git a/util/oslib-win32.c b/util/oslib-win32.c index ee3a3692d8..af559ef339 100644 --- a/util/oslib-win32.c +++ b/util/oslib-win32.c @@ -58,7 +58,11 @@ void *qemu_try_memalign(size_t alignment, size_t size) void *ptr; g_assert(size != 0); - g_assert(is_power_of_2(alignment)); + if (alignment < sizeof(void *)) { + alignment = sizeof(void *); + } else { + g_assert(is_power_of_2(alignment)); + } ptr = _aligned_malloc(size, alignment); trace_qemu_memalign(alignment, size, ptr); return ptr; |