aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/arm/Makefile.objs24
-rw-r--r--target/arm/cpu.c232
-rw-r--r--target/arm/cpu.h2
-rw-r--r--target/arm/helper.c498
-rw-r--r--target/arm/internals.h69
-rw-r--r--target/arm/op_helper.c262
-rw-r--r--target/arm/tlb_helper.c200
-rw-r--r--target/arm/translate-a64.c128
-rw-r--r--target/arm/translate.c91
-rw-r--r--target/arm/translate.h5
-rw-r--r--target/arm/vfp_helper.c199
-rw-r--r--target/ppc/fpu_helper.c841
-rw-r--r--target/ppc/helper.h320
-rw-r--r--target/ppc/int_helper.c26
-rw-r--r--target/ppc/internal.h12
-rw-r--r--target/ppc/kvm.c11
-rw-r--r--target/ppc/kvm_ppc.h10
-rw-r--r--target/ppc/machine.c2
-rw-r--r--target/ppc/mem_helper.c25
-rw-r--r--target/ppc/translate/vsx-impl.inc.c567
-rw-r--r--target/ppc/translate/vsx-ops.inc.c70
-rw-r--r--target/s390x/cpu_features.c352
-rw-r--r--target/s390x/cpu_features_def.h352
-rw-r--r--target/s390x/cpu_features_def.inc.h369
-rw-r--r--target/s390x/gen-features.c30
25 files changed, 2323 insertions, 2374 deletions
diff --git a/target/arm/Makefile.objs b/target/arm/Makefile.objs
index dfa736a375..5c154f01c5 100644
--- a/target/arm/Makefile.objs
+++ b/target/arm/Makefile.objs
@@ -1,16 +1,15 @@
obj-y += arm-semi.o
-obj-$(CONFIG_SOFTMMU) += machine.o psci.o arch_dump.o monitor.o
+obj-y += helper.o vfp_helper.o
+obj-y += cpu.o gdbstub.o
+obj-$(TARGET_AARCH64) += cpu64.o gdbstub64.o
+
+obj-$(CONFIG_SOFTMMU) += machine.o arch_dump.o monitor.o
+obj-$(CONFIG_SOFTMMU) += arm-powerctl.o
+
obj-$(CONFIG_KVM) += kvm.o
obj-$(call land,$(CONFIG_KVM),$(call lnot,$(TARGET_AARCH64))) += kvm32.o
obj-$(call land,$(CONFIG_KVM),$(TARGET_AARCH64)) += kvm64.o
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
-obj-y += translate.o op_helper.o helper.o cpu.o
-obj-y += neon_helper.o iwmmxt_helper.o vec_helper.o vfp_helper.o
-obj-y += gdbstub.o
-obj-$(TARGET_AARCH64) += cpu64.o translate-a64.o helper-a64.o gdbstub64.o
-obj-$(TARGET_AARCH64) += pauth_helper.o
-obj-y += crypto_helper.o
-obj-$(CONFIG_SOFTMMU) += arm-powerctl.o
DECODETREE = $(SRC_PATH)/scripts/decodetree.py
@@ -33,4 +32,13 @@ target/arm/translate-sve.o: target/arm/decode-sve.inc.c
target/arm/translate.o: target/arm/decode-vfp.inc.c
target/arm/translate.o: target/arm/decode-vfp-uncond.inc.c
+obj-y += tlb_helper.o
+obj-y += translate.o op_helper.o
+obj-y += crypto_helper.o
+obj-y += iwmmxt_helper.o vec_helper.o neon_helper.o
+
+obj-$(CONFIG_SOFTMMU) += psci.o
+
+obj-$(TARGET_AARCH64) += translate-a64.o helper-a64.o
obj-$(TARGET_AARCH64) += translate-sve.o sve_helper.o
+obj-$(TARGET_AARCH64) += pauth_helper.o
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 376db154f0..f21261c8ff 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -19,6 +19,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/qemu-print.h"
#include "qemu-common.h"
#include "target/arm/idau.h"
#include "qemu/module.h"
@@ -676,6 +677,231 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
#endif
}
+#ifdef TARGET_AARCH64
+
+static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ uint32_t psr = pstate_read(env);
+ int i;
+ int el = arm_current_el(env);
+ const char *ns_status;
+
+ qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
+ for (i = 0; i < 32; i++) {
+ if (i == 31) {
+ qemu_fprintf(f, " SP=%016" PRIx64 "\n", env->xregs[i]);
+ } else {
+ qemu_fprintf(f, "X%02d=%016" PRIx64 "%s", i, env->xregs[i],
+ (i + 2) % 3 ? " " : "\n");
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
+ ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
+ } else {
+ ns_status = "";
+ }
+ qemu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
+ psr,
+ psr & PSTATE_N ? 'N' : '-',
+ psr & PSTATE_Z ? 'Z' : '-',
+ psr & PSTATE_C ? 'C' : '-',
+ psr & PSTATE_V ? 'V' : '-',
+ ns_status,
+ el,
+ psr & PSTATE_SP ? 'h' : 't');
+
+ if (cpu_isar_feature(aa64_bti, cpu)) {
+ qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
+ }
+ if (!(flags & CPU_DUMP_FPU)) {
+ qemu_fprintf(f, "\n");
+ return;
+ }
+ if (fp_exception_el(env, el) != 0) {
+ qemu_fprintf(f, " FPU disabled\n");
+ return;
+ }
+ qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
+ vfp_get_fpcr(env), vfp_get_fpsr(env));
+
+ if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
+ int j, zcr_len = sve_zcr_len_for_el(env, el);
+
+ for (i = 0; i <= FFR_PRED_NUM; i++) {
+ bool eol;
+ if (i == FFR_PRED_NUM) {
+ qemu_fprintf(f, "FFR=");
+ /* It's last, so end the line. */
+ eol = true;
+ } else {
+ qemu_fprintf(f, "P%02d=", i);
+ switch (zcr_len) {
+ case 0:
+ eol = i % 8 == 7;
+ break;
+ case 1:
+ eol = i % 6 == 5;
+ break;
+ case 2:
+ case 3:
+ eol = i % 3 == 2;
+ break;
+ default:
+ /* More than one quadword per predicate. */
+ eol = true;
+ break;
+ }
+ }
+ for (j = zcr_len / 4; j >= 0; j--) {
+ int digits;
+ if (j * 4 + 4 <= zcr_len + 1) {
+ digits = 16;
+ } else {
+ digits = (zcr_len % 4 + 1) * 4;
+ }
+ qemu_fprintf(f, "%0*" PRIx64 "%s", digits,
+ env->vfp.pregs[i].p[j],
+ j ? ":" : eol ? "\n" : " ");
+ }
+ }
+
+ for (i = 0; i < 32; i++) {
+ if (zcr_len == 0) {
+ qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64 "%s",
+ i, env->vfp.zregs[i].d[1],
+ env->vfp.zregs[i].d[0], i & 1 ? "\n" : " ");
+ } else if (zcr_len == 1) {
+ qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64
+ ":%016" PRIx64 ":%016" PRIx64 "\n",
+ i, env->vfp.zregs[i].d[3], env->vfp.zregs[i].d[2],
+ env->vfp.zregs[i].d[1], env->vfp.zregs[i].d[0]);
+ } else {
+ for (j = zcr_len; j >= 0; j--) {
+ bool odd = (zcr_len - j) % 2 != 0;
+ if (j == zcr_len) {
+ qemu_fprintf(f, "Z%02d[%x-%x]=", i, j, j - 1);
+ } else if (!odd) {
+ if (j > 0) {
+ qemu_fprintf(f, " [%x-%x]=", j, j - 1);
+ } else {
+ qemu_fprintf(f, " [%x]=", j);
+ }
+ }
+ qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%s",
+ env->vfp.zregs[i].d[j * 2 + 1],
+ env->vfp.zregs[i].d[j * 2],
+ odd || j == 0 ? "\n" : ":");
+ }
+ }
+ }
+ } else {
+ for (i = 0; i < 32; i++) {
+ uint64_t *q = aa64_vfp_qreg(env, i);
+ qemu_fprintf(f, "Q%02d=%016" PRIx64 ":%016" PRIx64 "%s",
+ i, q[1], q[0], (i & 1 ? "\n" : " "));
+ }
+ }
+}
+
+#else
+
+static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ g_assert_not_reached();
+}
+
+#endif
+
+static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ int i;
+
+ if (is_a64(env)) {
+ aarch64_cpu_dump_state(cs, f, flags);
+ return;
+ }
+
+ for (i = 0; i < 16; i++) {
+ qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
+ if ((i % 4) == 3) {
+ qemu_fprintf(f, "\n");
+ } else {
+ qemu_fprintf(f, " ");
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ uint32_t xpsr = xpsr_read(env);
+ const char *mode;
+ const char *ns_status = "";
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ ns_status = env->v7m.secure ? "S " : "NS ";
+ }
+
+ if (xpsr & XPSR_EXCP) {
+ mode = "handler";
+ } else {
+ if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
+ mode = "unpriv-thread";
+ } else {
+ mode = "priv-thread";
+ }
+ }
+
+ qemu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
+ xpsr,
+ xpsr & XPSR_N ? 'N' : '-',
+ xpsr & XPSR_Z ? 'Z' : '-',
+ xpsr & XPSR_C ? 'C' : '-',
+ xpsr & XPSR_V ? 'V' : '-',
+ xpsr & XPSR_T ? 'T' : 'A',
+ ns_status,
+ mode);
+ } else {
+ uint32_t psr = cpsr_read(env);
+ const char *ns_status = "";
+
+ if (arm_feature(env, ARM_FEATURE_EL3) &&
+ (psr & CPSR_M) != ARM_CPU_MODE_MON) {
+ ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
+ }
+
+ qemu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
+ psr,
+ psr & CPSR_N ? 'N' : '-',
+ psr & CPSR_Z ? 'Z' : '-',
+ psr & CPSR_C ? 'C' : '-',
+ psr & CPSR_V ? 'V' : '-',
+ psr & CPSR_T ? 'T' : 'A',
+ ns_status,
+ aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
+ }
+
+ if (flags & CPU_DUMP_FPU) {
+ int numvfpregs = 0;
+ if (arm_feature(env, ARM_FEATURE_VFP)) {
+ numvfpregs += 16;
+ }
+ if (arm_feature(env, ARM_FEATURE_VFP3)) {
+ numvfpregs += 16;
+ }
+ for (i = 0; i < numvfpregs; i++) {
+ uint64_t v = *aa32_vfp_dreg(env, i);
+ qemu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
+ i * 2, (uint32_t)v,
+ i * 2 + 1, (uint32_t)(v >> 32),
+ i, v);
+ }
+ qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
+ }
+}
+
uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz)
{
uint32_t Aff1 = idx / clustersz;
@@ -2340,8 +2566,6 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_write_register = arm_cpu_gdb_write_register;
#ifndef CONFIG_USER_ONLY
cc->do_interrupt = arm_cpu_do_interrupt;
- cc->do_unaligned_access = arm_cpu_do_unaligned_access;
- cc->do_transaction_failed = arm_cpu_do_transaction_failed;
cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
cc->asidx_from_attrs = arm_asidx_from_attrs;
cc->vmsd = &vmstate_arm_cpu;
@@ -2364,6 +2588,10 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
#ifdef CONFIG_TCG
cc->tcg_initialize = arm_translate_init;
cc->tlb_fill = arm_cpu_tlb_fill;
+#if !defined(CONFIG_USER_ONLY)
+ cc->do_unaligned_access = arm_cpu_do_unaligned_access;
+ cc->do_transaction_failed = arm_cpu_do_transaction_failed;
+#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
#endif
}
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index f9da672be5..a9be18660f 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -929,8 +929,6 @@ void arm_cpu_do_interrupt(CPUState *cpu);
void arm_v7m_cpu_do_interrupt(CPUState *cpu);
bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req);
-void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags);
-
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
MemTxAttrs *attrs);
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 7caea52561..9a1fe3b72e 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -1,3 +1,10 @@
+/*
+ * ARM generic helpers.
+ *
+ * This code is licensed under the GNU GPL v2 or later.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "target/arm/idau.h"
@@ -7,7 +14,6 @@
#include "exec/gdbstub.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
-#include "sysemu/arch_init.h"
#include "sysemu/sysemu.h"
#include "qemu/bitops.h"
#include "qemu/crc32c.h"
@@ -19,7 +25,6 @@
#include "hw/semihosting/semihost.h"
#include "sysemu/cpus.h"
#include "sysemu/kvm.h"
-#include "fpu/softfloat.h"
#include "qemu/range.h"
#include "qapi/qapi-commands-machine-target.h"
#include "qapi/error.h"
@@ -28,38 +33,12 @@
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
#ifndef CONFIG_USER_ONLY
-/* Cacheability and shareability attributes for a memory access */
-typedef struct ARMCacheAttrs {
- unsigned int attrs:8; /* as in the MAIR register encoding */
- unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
-} ARMCacheAttrs;
-
-static bool get_phys_addr(CPUARMState *env, target_ulong address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
- target_ulong *page_size,
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
target_ulong *page_size_ptr,
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
-
-/* Security attributes for an address, as returned by v8m_security_lookup. */
-typedef struct V8M_SAttributes {
- bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
- bool ns;
- bool nsc;
- uint8_t sregion;
- bool srvalid;
- uint8_t iregion;
- bool irvalid;
-} V8M_SAttributes;
-
-static void v8m_security_lookup(CPUARMState *env, uint32_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- V8M_SAttributes *sattrs);
#endif
static void switch_mode(CPUARMState *env, int mode);
@@ -7524,7 +7503,8 @@ void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
{
- /* The TT instructions can be used by unprivileged code, but in
+ /*
+ * The TT instructions can be used by unprivileged code, but in
* user-only emulation we don't have the MPU.
* Luckily since we know we are NonSecure unprivileged (and that in
* turn means that the A flag wasn't specified), all the bits in the
@@ -7700,22 +7680,41 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
return target_el;
}
-/*
- * Return true if the v7M CPACR permits access to the FPU for the specified
- * security state and privilege level.
- */
-static bool v7m_cpacr_pass(CPUARMState *env, bool is_secure, bool is_priv)
+void arm_log_exception(int idx)
{
- switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
- case 0:
- case 2: /* UNPREDICTABLE: we treat like 0 */
- return false;
- case 1:
- return is_priv;
- case 3:
- return true;
- default:
- g_assert_not_reached();
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ const char *exc = NULL;
+ static const char * const excnames[] = {
+ [EXCP_UDEF] = "Undefined Instruction",
+ [EXCP_SWI] = "SVC",
+ [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
+ [EXCP_DATA_ABORT] = "Data Abort",
+ [EXCP_IRQ] = "IRQ",
+ [EXCP_FIQ] = "FIQ",
+ [EXCP_BKPT] = "Breakpoint",
+ [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
+ [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
+ [EXCP_HVC] = "Hypervisor Call",
+ [EXCP_HYP_TRAP] = "Hypervisor Trap",
+ [EXCP_SMC] = "Secure Monitor Call",
+ [EXCP_VIRQ] = "Virtual IRQ",
+ [EXCP_VFIQ] = "Virtual FIQ",
+ [EXCP_SEMIHOST] = "Semihosting call",
+ [EXCP_NOCP] = "v7M NOCP UsageFault",
+ [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
+ [EXCP_STKOF] = "v8M STKOF UsageFault",
+ [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
+ [EXCP_LSERR] = "v8M LSERR UsageFault",
+ [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
+ };
+
+ if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
+ exc = excnames[idx];
+ }
+ if (!exc) {
+ exc = "unknown";
+ }
+ qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
}
}
@@ -7796,7 +7795,8 @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
return true;
pend_fault:
- /* By pending the exception at this point we are making
+ /*
+ * By pending the exception at this point we are making
* the IMPDEF choice "overridden exceptions pended" (see the
* MergeExcInfo() pseudocode). The other choice would be to not
* pend them now and then make a choice about which to throw away
@@ -7871,7 +7871,8 @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
return true;
pend_fault:
- /* By pending the exception at this point we are making
+ /*
+ * By pending the exception at this point we are making
* the IMPDEF choice "overridden exceptions pended" (see the
* MergeExcInfo() pseudocode). The other choice would be to not
* pend them now and then make a choice about which to throw away
@@ -7972,7 +7973,8 @@ void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
*/
}
-/* Write to v7M CONTROL.SPSEL bit for the specified security bank.
+/*
+ * Write to v7M CONTROL.SPSEL bit for the specified security bank.
* This may change the current stack pointer between Main and Process
* stack pointers if it is done for the CONTROL register for the current
* security state.
@@ -8000,7 +8002,8 @@ static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
}
}
-/* Write to v7M CONTROL.SPSEL bit. This may change the current
+/*
+ * Write to v7M CONTROL.SPSEL bit. This may change the current
* stack pointer between Main and Process stack pointers.
*/
static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
@@ -8010,7 +8013,8 @@ static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
{
- /* Write a new value to v7m.exception, thus transitioning into or out
+ /*
+ * Write a new value to v7m.exception, thus transitioning into or out
* of Handler mode; this may result in a change of active stack pointer.
*/
bool new_is_psp, old_is_psp = v7m_using_psp(env);
@@ -8036,7 +8040,8 @@ static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
return;
}
- /* All the banked state is accessed by looking at env->v7m.secure
+ /*
+ * All the banked state is accessed by looking at env->v7m.secure
* except for the stack pointer; rearrange the SP appropriately.
*/
new_ss_msp = env->v7m.other_ss_msp;
@@ -8063,7 +8068,8 @@ static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
{
- /* Handle v7M BXNS:
+ /*
+ * Handle v7M BXNS:
* - if the return value is a magic value, do exception return (like BX)
* - otherwise bit 0 of the return value is the target security state
*/
@@ -8078,7 +8084,8 @@ void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
}
if (dest >= min_magic) {
- /* This is an exception return magic value; put it where
+ /*
+ * This is an exception return magic value; put it where
* do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
* Note that if we ever add gen_ss_advance() singlestep support to
* M profile this should count as an "instruction execution complete"
@@ -8103,7 +8110,8 @@ void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
{
- /* Handle v7M BLXNS:
+ /*
+ * Handle v7M BLXNS:
* - bit 0 of the destination address is the target security state
*/
@@ -8116,7 +8124,8 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
assert(env->v7m.secure);
if (dest & 1) {
- /* target is Secure, so this is just a normal BLX,
+ /*
+ * Target is Secure, so this is just a normal BLX,
* except that the low bit doesn't indicate Thumb/not.
*/
env->regs[14] = nextinst;
@@ -8147,7 +8156,8 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
env->regs[13] = sp;
env->regs[14] = 0xfeffffff;
if (arm_v7m_is_handler_mode(env)) {
- /* Write a dummy value to IPSR, to avoid leaking the current secure
+ /*
+ * Write a dummy value to IPSR, to avoid leaking the current secure
* exception number to non-secure code. This is guaranteed not
* to cause write_v7m_exception() to actually change stacks.
*/
@@ -8162,7 +8172,8 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
bool spsel)
{
- /* Return a pointer to the location where we currently store the
+ /*
+ * Return a pointer to the location where we currently store the
* stack pointer for the requested security state and thread mode.
* This pointer will become invalid if the CPU state is updated
* such that the stack pointers are switched around (eg changing
@@ -8208,7 +8219,8 @@ static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
- /* We don't do a get_phys_addr() here because the rules for vector
+ /*
+ * We don't do a get_phys_addr() here because the rules for vector
* loads are special: they always use the default memory map, and
* the default memory map permits reads from all addresses.
* Since there's no easy way to pass through to pmsav8_mpu_lookup()
@@ -8239,7 +8251,8 @@ static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
return true;
load_fail:
- /* All vector table fetch fails are reported as HardFault, with
+ /*
+ * All vector table fetch fails are reported as HardFault, with
* HFSR.VECTTBL and .FORCED set. (FORCED is set because
* technically the underlying exception is a MemManage or BusFault
* that is escalated to HardFault.) This is a terminal exception,
@@ -8271,7 +8284,8 @@ static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
bool ignore_faults)
{
- /* For v8M, push the callee-saves register part of the stack frame.
+ /*
+ * For v8M, push the callee-saves register part of the stack frame.
* Compare the v8M pseudocode PushCalleeStack().
* In the tailchaining case this may not be the current stack.
*/
@@ -8322,7 +8336,8 @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
return true;
}
- /* Write as much of the stack frame as we can. A write failure may
+ /*
+ * Write as much of the stack frame as we can. A write failure may
* cause us to pend a derived exception.
*/
sig = v7m_integrity_sig(env, lr);
@@ -8346,7 +8361,8 @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
bool ignore_stackfaults)
{
- /* Do the "take the exception" parts of exception entry,
+ /*
+ * Do the "take the exception" parts of exception entry,
* but not the pushing of state to the stack. This is
* similar to the pseudocode ExceptionTaken() function.
*/
@@ -8371,13 +8387,15 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
if (arm_feature(env, ARM_FEATURE_V8)) {
if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
(lr & R_V7M_EXCRET_S_MASK)) {
- /* The background code (the owner of the registers in the
+ /*
+ * The background code (the owner of the registers in the
* exception frame) is Secure. This means it may either already
* have or now needs to push callee-saves registers.
*/
if (targets_secure) {
if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
- /* We took an exception from Secure to NonSecure
+ /*
+ * We took an exception from Secure to NonSecure
* (which means the callee-saved registers got stacked)
* and are now tailchaining to a Secure exception.
* Clear DCRS so eventual return from this Secure
@@ -8386,7 +8404,8 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
lr &= ~R_V7M_EXCRET_DCRS_MASK;
}
} else {
- /* We're going to a non-secure exception; push the
+ /*
+ * We're going to a non-secure exception; push the
* callee-saves registers to the stack now, if they're
* not already saved.
*/
@@ -8408,14 +8427,16 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
lr |= R_V7M_EXCRET_SPSEL_MASK;
}
- /* Clear registers if necessary to prevent non-secure exception
+ /*
+ * Clear registers if necessary to prevent non-secure exception
* code being able to see register values from secure code.
* Where register values become architecturally UNKNOWN we leave
* them with their previous values.
*/
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
if (!targets_secure) {
- /* Always clear the caller-saved registers (they have been
+ /*
+ * Always clear the caller-saved registers (they have been
* pushed to the stack earlier in v7m_push_stack()).
* Clear callee-saved registers if the background code is
* Secure (in which case these regs were saved in
@@ -8436,7 +8457,8 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
}
if (push_failed && !ignore_stackfaults) {
- /* Derived exception on callee-saves register stacking:
+ /*
+ * Derived exception on callee-saves register stacking:
* we might now want to take a different exception which
* targets a different security state, so try again from the top.
*/
@@ -8453,7 +8475,8 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
return;
}
- /* Now we've done everything that might cause a derived exception
+ /*
+ * Now we've done everything that might cause a derived exception
* we can go ahead and activate whichever exception we're going to
* take (which might now be the derived exception).
*/
@@ -8656,7 +8679,8 @@ void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
static bool v7m_push_stack(ARMCPU *cpu)
{
- /* Do the "set up stack frame" part of exception entry,
+ /*
+ * Do the "set up stack frame" part of exception entry,
* similar to pseudocode PushStack().
* Return true if we generate a derived exception (and so
* should ignore further stack faults trying to process
@@ -8724,7 +8748,8 @@ static bool v7m_push_stack(ARMCPU *cpu)
}
}
- /* Write as much of the stack frame as we can. If we fail a stack
+ /*
+ * Write as much of the stack frame as we can. If we fail a stack
* write this will result in a derived exception being pended
* (which may be taken in preference to the one we started with
* if it has higher priority).
@@ -8841,7 +8866,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
bool ftype;
bool restore_s16_s31;
- /* If we're not in Handler mode then jumps to magic exception-exit
+ /*
+ * If we're not in Handler mode then jumps to magic exception-exit
* addresses don't have magic behaviour. However for the v8M
* security extensions the magic secure-function-return has to
* work in thread mode too, so to avoid doing an extra check in
@@ -8855,7 +8881,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
return;
}
- /* In the spec pseudocode ExceptionReturn() is called directly
+ /*
+ * In the spec pseudocode ExceptionReturn() is called directly
* from BXWritePC() and gets the full target PC value including
* bit zero. In QEMU's implementation we treat it as a normal
* jump-to-register (which is then caught later on), and so split
@@ -8888,7 +8915,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
}
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
- /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
+ /*
+ * EXC_RETURN.ES validation check (R_SMFL). We must do this before
* we pick which FAULTMASK to clear.
*/
if (!env->v7m.secure &&
@@ -8902,7 +8930,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
}
if (env->v7m.exception != ARMV7M_EXCP_NMI) {
- /* Auto-clear FAULTMASK on return from other than NMI.
+ /*
+ * Auto-clear FAULTMASK on return from other than NMI.
* If the security extension is implemented then this only
* happens if the raw execution priority is >= 0; the
* value of the ES bit in the exception return value indicates
@@ -8927,7 +8956,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
/* still an irq active now */
break;
case 1:
- /* we returned to base exception level, no nesting.
+ /*
+ * We returned to base exception level, no nesting.
* (In the pseudocode this is written using "NestedActivation != 1"
* where we have 'rettobase == false'.)
*/
@@ -8944,7 +8974,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
if (arm_feature(env, ARM_FEATURE_V8)) {
if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
- /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
+ /*
+ * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
* we choose to take the UsageFault.
*/
if ((excret & R_V7M_EXCRET_S_MASK) ||
@@ -8963,7 +8994,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
break;
case 13: /* Return to Thread using Process stack */
case 9: /* Return to Thread using Main stack */
- /* We only need to check NONBASETHRDENA for v7M, because in
+ /*
+ * We only need to check NONBASETHRDENA for v7M, because in
* v8M this bit does not exist (it is RES1).
*/
if (!rettobase &&
@@ -9021,7 +9053,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
}
if (ufault) {
- /* Bad exception return: instead of popping the exception
+ /*
+ * Bad exception return: instead of popping the exception
* stack, directly take a usage fault on the current stack.
*/
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
@@ -9051,7 +9084,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
switch_v7m_security_state(env, return_to_secure);
{
- /* The stack pointer we should be reading the exception frame from
+ /*
+ * The stack pointer we should be reading the exception frame from
* depends on bits in the magic exception return type value (and
* for v8M isn't necessarily the stack pointer we will eventually
* end up resuming execution with). Get a pointer to the location
@@ -9124,7 +9158,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
if (!pop_ok) {
- /* v7m_stack_read() pended a fault, so take it (as a tail
+ /*
+ * v7m_stack_read() pended a fault, so take it (as a tail
* chained exception on the same stack frame)
*/
qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
@@ -9132,7 +9167,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
return;
}
- /* Returning from an exception with a PC with bit 0 set is defined
+ /*
+ * Returning from an exception with a PC with bit 0 set is defined
* behaviour on v8M (bit 0 is ignored), but for v7M it was specified
* to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
* the lsbit, and there are several RTOSes out there which incorrectly
@@ -9150,13 +9186,15 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
}
if (arm_feature(env, ARM_FEATURE_V8)) {
- /* For v8M we have to check whether the xPSR exception field
+ /*
+ * For v8M we have to check whether the xPSR exception field
* matches the EXCRET value for return to handler/thread
* before we commit to changing the SP and xPSR.
*/
bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
if (return_to_handler != will_be_handler) {
- /* Take an INVPC UsageFault on the current stack.
+ /*
+ * Take an INVPC UsageFault on the current stack.
* By this point we will have switched to the security state
* for the background state, so this UsageFault will target
* that state.
@@ -9271,7 +9309,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
frameptr += 0x40;
}
}
- /* Undo stack alignment (the SPREALIGN bit indicates that the original
+ /*
+ * Undo stack alignment (the SPREALIGN bit indicates that the original
* pre-exception SP was not 8-aligned and we added a padding word to
* align it, so we undo this by ORing in the bit that increases it
* from the current 8-aligned value to the 8-unaligned value. (Adding 4
@@ -9297,13 +9336,15 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
V7M_CONTROL, SFPA, sfpa);
}
- /* The restored xPSR exception field will be zero if we're
+ /*
+ * The restored xPSR exception field will be zero if we're
* resuming in Thread mode. If that doesn't match what the
* exception return excret specified then this is a UsageFault.
* v7M requires we make this check here; v8M did it earlier.
*/
if (return_to_handler != arm_v7m_is_handler_mode(env)) {
- /* Take an INVPC UsageFault by pushing the stack again;
+ /*
+ * Take an INVPC UsageFault by pushing the stack again;
* we know we're v7M so this is never a Secure UsageFault.
*/
bool ignore_stackfaults;
@@ -9325,7 +9366,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
static bool do_v7m_function_return(ARMCPU *cpu)
{
- /* v8M security extensions magic function return.
+ /*
+ * v8M security extensions magic function return.
* We may either:
* (1) throw an exception (longjump)
* (2) return true if we successfully handled the function return
@@ -9355,7 +9397,8 @@ static bool do_v7m_function_return(ARMCPU *cpu)
frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
frameptr = *frame_sp_p;
- /* These loads may throw an exception (for MPU faults). We want to
+ /*
+ * These loads may throw an exception (for MPU faults). We want to
* do them as secure, so work out what MMU index that is.
*/
mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
@@ -9395,48 +9438,11 @@ static bool do_v7m_function_return(ARMCPU *cpu)
return true;
}
-static void arm_log_exception(int idx)
-{
- if (qemu_loglevel_mask(CPU_LOG_INT)) {
- const char *exc = NULL;
- static const char * const excnames[] = {
- [EXCP_UDEF] = "Undefined Instruction",
- [EXCP_SWI] = "SVC",
- [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
- [EXCP_DATA_ABORT] = "Data Abort",
- [EXCP_IRQ] = "IRQ",
- [EXCP_FIQ] = "FIQ",
- [EXCP_BKPT] = "Breakpoint",
- [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
- [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
- [EXCP_HVC] = "Hypervisor Call",
- [EXCP_HYP_TRAP] = "Hypervisor Trap",
- [EXCP_SMC] = "Secure Monitor Call",
- [EXCP_VIRQ] = "Virtual IRQ",
- [EXCP_VFIQ] = "Virtual FIQ",
- [EXCP_SEMIHOST] = "Semihosting call",
- [EXCP_NOCP] = "v7M NOCP UsageFault",
- [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
- [EXCP_STKOF] = "v8M STKOF UsageFault",
- [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
- [EXCP_LSERR] = "v8M LSERR UsageFault",
- [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
- };
-
- if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
- exc = excnames[idx];
- }
- if (!exc) {
- exc = "unknown";
- }
- qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
- }
-}
-
static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
uint32_t addr, uint16_t *insn)
{
- /* Load a 16-bit portion of a v7M instruction, returning true on success,
+ /*
+ * Load a 16-bit portion of a v7M instruction, returning true on success,
* or false on failure (in which case we will have pended the appropriate
* exception).
* We need to do the instruction fetch's MPU and SAU checks
@@ -9459,7 +9465,8 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
if (!sattrs.nsc || sattrs.ns) {
- /* This must be the second half of the insn, and it straddles a
+ /*
+ * This must be the second half of the insn, and it straddles a
* region boundary with the second half not being S&NSC.
*/
env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
@@ -9489,7 +9496,8 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
static bool v7m_handle_execute_nsc(ARMCPU *cpu)
{
- /* Check whether this attempt to execute code in a Secure & NS-Callable
+ /*
+ * Check whether this attempt to execute code in a Secure & NS-Callable
* memory region is for an SG instruction; if so, then emulate the
* effect of the SG instruction and return true. Otherwise pend
* the correct kind of exception and return false.
@@ -9498,7 +9506,8 @@ static bool v7m_handle_execute_nsc(ARMCPU *cpu)
ARMMMUIdx mmu_idx;
uint16_t insn;
- /* We should never get here unless get_phys_addr_pmsav8() caused
+ /*
+ * We should never get here unless get_phys_addr_pmsav8() caused
* an exception for NS executing in S&NSC memory.
*/
assert(!env->v7m.secure);
@@ -9516,7 +9525,8 @@ static bool v7m_handle_execute_nsc(ARMCPU *cpu)
}
if (insn != 0xe97f) {
- /* Not an SG instruction first half (we choose the IMPDEF
+ /*
+ * Not an SG instruction first half (we choose the IMPDEF
* early-SG-check option).
*/
goto gen_invep;
@@ -9527,13 +9537,15 @@ static bool v7m_handle_execute_nsc(ARMCPU *cpu)
}
if (insn != 0xe97f) {
- /* Not an SG instruction second half (yes, both halves of the SG
+ /*
+ * Not an SG instruction second half (yes, both halves of the SG
* insn have the same hex value)
*/
goto gen_invep;
}
- /* OK, we have confirmed that we really have an SG instruction.
+ /*
+ * OK, we have confirmed that we really have an SG instruction.
* We know we're NS in S memory so don't need to repeat those checks.
*/
qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
@@ -9562,8 +9574,10 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
arm_log_exception(cs->exception_index);
- /* For exceptions we just mark as pending on the NVIC, and let that
- handle it. */
+ /*
+ * For exceptions we just mark as pending on the NVIC, and let that
+ * handle it.
+ */
switch (cs->exception_index) {
case EXCP_UDEF:
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
@@ -9609,13 +9623,15 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
break;
case EXCP_PREFETCH_ABORT:
case EXCP_DATA_ABORT:
- /* Note that for M profile we don't have a guest facing FSR, but
+ /*
+ * Note that for M profile we don't have a guest facing FSR, but
* the env->exception.fsr will be populated by the code that
* raises the fault, in the A profile short-descriptor format.
*/
switch (env->exception.fsr & 0xf) {
case M_FAKE_FSR_NSC_EXEC:
- /* Exception generated when we try to execute code at an address
+ /*
+ * Exception generated when we try to execute code at an address
* which is marked as Secure & Non-Secure Callable and the CPU
* is in the Non-Secure state. The only instruction which can
* be executed like this is SG (and that only if both halves of
@@ -9628,7 +9644,8 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
}
break;
case M_FAKE_FSR_SFAULT:
- /* Various flavours of SecureFault for attempts to execute or
+ /*
+ * Various flavours of SecureFault for attempts to execute or
* access data in the wrong security state.
*/
switch (cs->exception_index) {
@@ -9670,7 +9687,8 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
break;
default:
- /* All other FSR values are either MPU faults or "can't happen
+ /*
+ * All other FSR values are either MPU faults or "can't happen
* for M profile" cases.
*/
switch (cs->exception_index) {
@@ -9736,7 +9754,8 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
if (arm_feature(env, ARM_FEATURE_V8)) {
lr = R_V7M_EXCRET_RES1_MASK |
R_V7M_EXCRET_DCRS_MASK;
- /* The S bit indicates whether we should return to Secure
+ /*
+ * The S bit indicates whether we should return to Secure
* or NonSecure (ie our current state).
* The ES bit indicates whether we're taking this exception
* to Secure or NonSecure (ie our target state). We set it
@@ -9771,7 +9790,8 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
}
-/* Function used to synchronize QEMU's AArch64 register set with AArch32
+/*
+ * Function used to synchronize QEMU's AArch64 register set with AArch32
* register set. This is necessary when switching between AArch32 and AArch64
* execution state.
*/
@@ -9785,7 +9805,8 @@ void aarch64_sync_32_to_64(CPUARMState *env)
env->xregs[i] = env->regs[i];
}
- /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
+ /*
+ * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
* Otherwise, they come from the banked user regs.
*/
if (mode == ARM_CPU_MODE_FIQ) {
@@ -9798,7 +9819,8 @@ void aarch64_sync_32_to_64(CPUARMState *env)
}
}
- /* Registers x13-x23 are the various mode SP and FP registers. Registers
+ /*
+ * Registers x13-x23 are the various mode SP and FP registers. Registers
* r13 and r14 are only copied if we are in that mode, otherwise we copy
* from the mode banked register.
*/
@@ -9853,7 +9875,8 @@ void aarch64_sync_32_to_64(CPUARMState *env)
env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
}
- /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
+ /*
+ * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
* mode, then we can copy from r8-r14. Otherwise, we copy from the
* FIQ bank for r8-r14.
*/
@@ -9872,7 +9895,8 @@ void aarch64_sync_32_to_64(CPUARMState *env)
env->pc = env->regs[15];
}
-/* Function used to synchronize QEMU's AArch32 register set with AArch64
+/*
+ * Function used to synchronize QEMU's AArch32 register set with AArch64
* register set. This is necessary when switching between AArch32 and AArch64
* execution state.
*/
@@ -9886,7 +9910,8 @@ void aarch64_sync_64_to_32(CPUARMState *env)
env->regs[i] = env->xregs[i];
}
- /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
+ /*
+ * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
* Otherwise, we copy x8-x12 into the banked user regs.
*/
if (mode == ARM_CPU_MODE_FIQ) {
@@ -9899,7 +9924,8 @@ void aarch64_sync_64_to_32(CPUARMState *env)
}
}
- /* Registers r13 & r14 depend on the current mode.
+ /*
+ * Registers r13 & r14 depend on the current mode.
* If we are in a given mode, we copy the corresponding x registers to r13
* and r14. Otherwise, we copy the x register to the banked r13 and r14
* for the mode.
@@ -9910,7 +9936,8 @@ void aarch64_sync_64_to_32(CPUARMState *env)
} else {
env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
- /* HYP is an exception in that it does not have its own banked r14 but
+ /*
+ * HYP is an exception in that it does not have its own banked r14 but
* shares the USR r14
*/
if (mode == ARM_CPU_MODE_HYP) {
@@ -12056,7 +12083,7 @@ static bool v8m_is_sau_exempt(CPUARMState *env,
(address >= 0xe00ff000 && address <= 0xe00fffff);
}
-static void v8m_security_lookup(CPUARMState *env, uint32_t address,
+void v8m_security_lookup(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
V8M_SAttributes *sattrs)
{
@@ -12163,7 +12190,7 @@ static void v8m_security_lookup(CPUARMState *env, uint32_t address,
}
}
-static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
+bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs,
int *prot, bool *is_subpage,
@@ -12567,11 +12594,11 @@ static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
* @fi: set to fault info if the translation fails
* @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
*/
-static bool get_phys_addr(CPUARMState *env, target_ulong address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
- target_ulong *page_size,
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
+bool get_phys_addr(CPUARMState *env, target_ulong address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
+ target_ulong *page_size,
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
{
if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
/* Call ourselves recursively to do the stage 1 and then stage 2
@@ -12753,7 +12780,8 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
return value;
}
case 0x94: /* CONTROL_NS */
- /* We have to handle this here because unprivileged Secure code
+ /*
+ * We have to handle this here because unprivileged Secure code
* can read the NS CONTROL register.
*/
if (!env->v7m.secure) {
@@ -12806,7 +12834,8 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
return env->v7m.faultmask[M_REG_NS];
case 0x98: /* SP_NS */
{
- /* This gives the non-secure SP selected based on whether we're
+ /*
+ * This gives the non-secure SP selected based on whether we're
* currently in handler mode or not, using the NS CONTROL.SPSEL.
*/
bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
@@ -12857,7 +12886,8 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
{
- /* We're passed bits [11..0] of the instruction; extract
+ /*
+ * We're passed bits [11..0] of the instruction; extract
* SYSm and the mask bits.
* Invalid combinations of SYSm and mask are UNPREDICTABLE;
* we choose to treat them as if the mask bits were valid.
@@ -12943,7 +12973,8 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
return;
case 0x98: /* SP_NS */
{
- /* This gives the non-secure SP selected based on whether we're
+ /*
+ * This gives the non-secure SP selected based on whether we're
* currently in handler mode or not, using the NS CONTROL.SPSEL.
*/
bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
@@ -13104,7 +13135,8 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
bool targetsec = env->v7m.secure;
bool is_subpage;
- /* Work out what the security state and privilege level we're
+ /*
+ * Work out what the security state and privilege level we're
* interested in is...
*/
if (alt) {
@@ -13121,12 +13153,14 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
/* ...and then figure out which MMU index this is */
mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
- /* We know that the MPU and SAU don't care about the access type
+ /*
+ * We know that the MPU and SAU don't care about the access type
* for our purposes beyond that we don't want to claim to be
* an insn fetch, so we arbitrarily call this a read.
*/
- /* MPU region info only available for privileged or if
+ /*
+ * MPU region info only available for privileged or if
* inspecting the other MPU state.
*/
if (arm_current_el(env) != 0 || alt) {
@@ -13176,146 +13210,6 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
#endif
-bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
-{
- ARMCPU *cpu = ARM_CPU(cs);
-
-#ifdef CONFIG_USER_ONLY
- cpu->env.exception.vaddress = address;
- if (access_type == MMU_INST_FETCH) {
- cs->exception_index = EXCP_PREFETCH_ABORT;
- } else {
- cs->exception_index = EXCP_DATA_ABORT;
- }
- cpu_loop_exit_restore(cs, retaddr);
-#else
- hwaddr phys_addr;
- target_ulong page_size;
- int prot, ret;
- MemTxAttrs attrs = {};
- ARMMMUFaultInfo fi = {};
-
- /*
- * Walk the page table and (if the mapping exists) add the page
- * to the TLB. On success, return true. Otherwise, if probing,
- * return false. Otherwise populate fsr with ARM DFSR/IFSR fault
- * register format, and signal the fault.
- */
- ret = get_phys_addr(&cpu->env, address, access_type,
- core_to_arm_mmu_idx(&cpu->env, mmu_idx),
- &phys_addr, &attrs, &prot, &page_size, &fi, NULL);
- if (likely(!ret)) {
- /*
- * Map a single [sub]page. Regions smaller than our declared
- * target page size are handled specially, so for those we
- * pass in the exact addresses.
- */
- if (page_size >= TARGET_PAGE_SIZE) {
- phys_addr &= TARGET_PAGE_MASK;
- address &= TARGET_PAGE_MASK;
- }
- tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
- prot, mmu_idx, page_size);
- return true;
- } else if (probe) {
- return false;
- } else {
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr, true);
- arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
- }
-#endif
-}
-
-void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
-{
- /* Implement DC ZVA, which zeroes a fixed-length block of memory.
- * Note that we do not implement the (architecturally mandated)
- * alignment fault for attempts to use this on Device memory
- * (which matches the usual QEMU behaviour of not implementing either
- * alignment faults or any memory attribute handling).
- */
-
- ARMCPU *cpu = env_archcpu(env);
- uint64_t blocklen = 4 << cpu->dcz_blocksize;
- uint64_t vaddr = vaddr_in & ~(blocklen - 1);
-
-#ifndef CONFIG_USER_ONLY
- {
- /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
- * the block size so we might have to do more than one TLB lookup.
- * We know that in fact for any v8 CPU the page size is at least 4K
- * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
- * 1K as an artefact of legacy v5 subpage support being present in the
- * same QEMU executable. So in practice the hostaddr[] array has
- * two entries, given the current setting of TARGET_PAGE_BITS_MIN.
- */
- int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
- void *hostaddr[DIV_ROUND_UP(2 * KiB, 1 << TARGET_PAGE_BITS_MIN)];
- int try, i;
- unsigned mmu_idx = cpu_mmu_index(env, false);
- TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
-
- assert(maxidx <= ARRAY_SIZE(hostaddr));
-
- for (try = 0; try < 2; try++) {
-
- for (i = 0; i < maxidx; i++) {
- hostaddr[i] = tlb_vaddr_to_host(env,
- vaddr + TARGET_PAGE_SIZE * i,
- 1, mmu_idx);
- if (!hostaddr[i]) {
- break;
- }
- }
- if (i == maxidx) {
- /* If it's all in the TLB it's fair game for just writing to;
- * we know we don't need to update dirty status, etc.
- */
- for (i = 0; i < maxidx - 1; i++) {
- memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
- }
- memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
- return;
- }
- /* OK, try a store and see if we can populate the tlb. This
- * might cause an exception if the memory isn't writable,
- * in which case we will longjmp out of here. We must for
- * this purpose use the actual register value passed to us
- * so that we get the fault address right.
- */
- helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
- /* Now we can populate the other TLB entries, if any */
- for (i = 0; i < maxidx; i++) {
- uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
- if (va != (vaddr_in & TARGET_PAGE_MASK)) {
- helper_ret_stb_mmu(env, va, 0, oi, GETPC());
- }
- }
- }
-
- /* Slow path (probably attempt to do this to an I/O device or
- * similar, or clearing of a block of code we have translations
- * cached for). Just do a series of byte writes as the architecture
- * demands. It's not worth trying to use a cpu_physical_memory_map(),
- * memset(), unmap() sequence here because:
- * + we'd need to account for the blocksize being larger than a page
- * + the direct-RAM access case is almost always going to be dealt
- * with in the fastpath code above, so there's no speed benefit
- * + we would have to deal with the map returning NULL because the
- * bounce buffer was in use
- */
- for (i = 0; i < blocklen; i++) {
- helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
- }
- }
-#else
- memset(g2h(vaddr), 0, blocklen);
-#endif
-}
-
/* Note that signed overflow is undefined in C. The following routines are
careful to use unsigned types where modulo arithmetic is required.
Failure to do so _will_ break on newer gcc. */
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 5a02f458f3..232d963875 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -529,11 +529,15 @@ vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
/* Callback function for when a watchpoint or breakpoint triggers. */
void arm_debug_excp_handler(CPUState *cs);
-#ifdef CONFIG_USER_ONLY
+#if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
{
return false;
}
+static inline void arm_handle_psci_call(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
#else
/* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
@@ -765,9 +769,6 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
-void arm_deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
- int mmu_idx, ARMMMUFaultInfo *fi) QEMU_NORETURN;
-
/* Return true if the stage 1 translation regime is using LPAE format page
* tables */
bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
@@ -892,6 +893,27 @@ static inline uint32_t v7m_sp_limit(CPUARMState *env)
}
/**
+ * v7m_cpacr_pass:
+ * Return true if the v7M CPACR permits access to the FPU for the specified
+ * security state and privilege level.
+ */
+static inline bool v7m_cpacr_pass(CPUARMState *env,
+ bool is_secure, bool is_priv)
+{
+ switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
+ case 0:
+ case 2: /* UNPREDICTABLE: we treat like 0 */
+ return false;
+ case 1:
+ return is_priv;
+ case 3:
+ return true;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/**
* aarch32_mode_name(): Return name of the AArch32 CPU mode
* @psr: Program Status Register indicating CPU mode
*
@@ -985,4 +1007,43 @@ static inline int exception_target_el(CPUARMState *env)
return target_el;
}
+#ifndef CONFIG_USER_ONLY
+
+/* Security attributes for an address, as returned by v8m_security_lookup. */
+typedef struct V8M_SAttributes {
+ bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
+ bool ns;
+ bool nsc;
+ uint8_t sregion;
+ bool srvalid;
+ uint8_t iregion;
+ bool irvalid;
+} V8M_SAttributes;
+
+void v8m_security_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ V8M_SAttributes *sattrs);
+
+bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs,
+ int *prot, bool *is_subpage,
+ ARMMMUFaultInfo *fi, uint32_t *mregion);
+
+/* Cacheability and shareability attributes for a memory access */
+typedef struct ARMCacheAttrs {
+ unsigned int attrs:8; /* as in the MAIR register encoding */
+ unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
+} ARMCacheAttrs;
+
+bool get_phys_addr(CPUARMState *env, target_ulong address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
+ target_ulong *page_size,
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
+
+void arm_log_exception(int idx);
+
+#endif /* !CONFIG_USER_ONLY */
+
#endif
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
index 4db254876d..9850993c11 100644
--- a/target/arm/op_helper.c
+++ b/target/arm/op_helper.c
@@ -17,6 +17,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
+#include "qemu/units.h"
#include "qemu/log.h"
#include "qemu/main-loop.h"
#include "cpu.h"
@@ -87,136 +88,6 @@ uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
return val;
}
-#if !defined(CONFIG_USER_ONLY)
-
-static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
- unsigned int target_el,
- bool same_el, bool ea,
- bool s1ptw, bool is_write,
- int fsc)
-{
- uint32_t syn;
-
- /* ISV is only set for data aborts routed to EL2 and
- * never for stage-1 page table walks faulting on stage 2.
- *
- * Furthermore, ISV is only set for certain kinds of load/stores.
- * If the template syndrome does not have ISV set, we should leave
- * it cleared.
- *
- * See ARMv8 specs, D7-1974:
- * ISS encoding for an exception from a Data Abort, the
- * ISV field.
- */
- if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
- syn = syn_data_abort_no_iss(same_el,
- ea, 0, s1ptw, is_write, fsc);
- } else {
- /* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
- * syndrome created at translation time.
- * Now we create the runtime syndrome with the remaining fields.
- */
- syn = syn_data_abort_with_iss(same_el,
- 0, 0, 0, 0, 0,
- ea, 0, s1ptw, is_write, fsc,
- false);
- /* Merge the runtime syndrome with the template syndrome. */
- syn |= template_syn;
- }
- return syn;
-}
-
-void arm_deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
- int mmu_idx, ARMMMUFaultInfo *fi)
-{
- CPUARMState *env = &cpu->env;
- int target_el;
- bool same_el;
- uint32_t syn, exc, fsr, fsc;
- ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
-
- target_el = exception_target_el(env);
- if (fi->stage2) {
- target_el = 2;
- env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
- }
- same_el = (arm_current_el(env) == target_el);
-
- if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
- arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
- /* LPAE format fault status register : bottom 6 bits are
- * status code in the same form as needed for syndrome
- */
- fsr = arm_fi_to_lfsc(fi);
- fsc = extract32(fsr, 0, 6);
- } else {
- fsr = arm_fi_to_sfsc(fi);
- /* Short format FSR : this fault will never actually be reported
- * to an EL that uses a syndrome register. Use a (currently)
- * reserved FSR code in case the constructed syndrome does leak
- * into the guest somehow.
- */
- fsc = 0x3f;
- }
-
- if (access_type == MMU_INST_FETCH) {
- syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
- exc = EXCP_PREFETCH_ABORT;
- } else {
- syn = merge_syn_data_abort(env->exception.syndrome, target_el,
- same_el, fi->ea, fi->s1ptw,
- access_type == MMU_DATA_STORE,
- fsc);
- if (access_type == MMU_DATA_STORE
- && arm_feature(env, ARM_FEATURE_V6)) {
- fsr |= (1 << 11);
- }
- exc = EXCP_DATA_ABORT;
- }
-
- env->exception.vaddress = addr;
- env->exception.fsr = fsr;
- raise_exception(env, exc, syn, target_el);
-}
-
-/* Raise a data fault alignment exception for the specified virtual address */
-void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
- MMUAccessType access_type,
- int mmu_idx, uintptr_t retaddr)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- ARMMMUFaultInfo fi = {};
-
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr, true);
-
- fi.type = ARMFault_Alignment;
- arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
-}
-
-/* arm_cpu_do_transaction_failed: handle a memory system error response
- * (eg "no device/memory present at address") by raising an external abort
- * exception
- */
-void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
- vaddr addr, unsigned size,
- MMUAccessType access_type,
- int mmu_idx, MemTxAttrs attrs,
- MemTxResult response, uintptr_t retaddr)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- ARMMMUFaultInfo fi = {};
-
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr, true);
-
- fi.ea = arm_extabort_type(response);
- fi.type = ARMFault_SyncExternal;
- arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
-}
-
-#endif /* !defined(CONFIG_USER_ONLY) */
-
void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
{
/*
@@ -970,7 +841,8 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
int bt;
uint32_t contextidr;
- /* Links to unimplemented or non-context aware breakpoints are
+ /*
+ * Links to unimplemented or non-context aware breakpoints are
* CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
* as if linked to an UNKNOWN context-aware breakpoint (in which
* case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
@@ -989,7 +861,8 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
bt = extract64(bcr, 20, 4);
- /* We match the whole register even if this is AArch32 using the
+ /*
+ * We match the whole register even if this is AArch32 using the
* short descriptor format (in which case it holds both PROCID and ASID),
* since we don't implement the optional v7 context ID masking.
*/
@@ -1006,7 +879,8 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
case 9: /* linked VMID match (reserved if no EL2) */
case 11: /* linked context ID and VMID match (reserved if no EL2) */
default:
- /* Links to Unlinked context breakpoints must generate no
+ /*
+ * Links to Unlinked context breakpoints must generate no
* events; we choose to do the same for reserved values too.
*/
return false;
@@ -1020,7 +894,8 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
CPUARMState *env = &cpu->env;
uint64_t cr;
int pac, hmc, ssc, wt, lbn;
- /* Note that for watchpoints the check is against the CPU security
+ /*
+ * Note that for watchpoints the check is against the CPU security
* state, not the S/NS attribute on the offending data access.
*/
bool is_secure = arm_is_secure(env);
@@ -1034,7 +909,8 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
}
cr = env->cp15.dbgwcr[n];
if (wp->hitattrs.user) {
- /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
+ /*
+ * The LDRT/STRT/LDT/STT "unprivileged access" instructions should
* match watchpoints as if they were accesses done at EL0, even if
* the CPU is at EL1 or higher.
*/
@@ -1048,7 +924,8 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
}
cr = env->cp15.dbgbcr[n];
}
- /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
+ /*
+ * The WATCHPOINT_HIT flag guarantees us that the watchpoint is
* enabled and that the address and access type match; for breakpoints
* we know the address matched; check the remaining fields, including
* linked breakpoints. We rely on WCR and BCR having the same layout
@@ -1116,7 +993,8 @@ static bool check_watchpoints(ARMCPU *cpu)
CPUARMState *env = &cpu->env;
int n;
- /* If watchpoints are disabled globally or we can't take debug
+ /*
+ * If watchpoints are disabled globally or we can't take debug
* exceptions here then watchpoint firings are ignored.
*/
if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
@@ -1137,7 +1015,8 @@ static bool check_breakpoints(ARMCPU *cpu)
CPUARMState *env = &cpu->env;
int n;
- /* If breakpoints are disabled globally or we can't take debug
+ /*
+ * If breakpoints are disabled globally or we can't take debug
* exceptions here then breakpoint firings are ignored.
*/
if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
@@ -1164,7 +1043,8 @@ void HELPER(check_breakpoints)(CPUARMState *env)
bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
{
- /* Called by core code when a CPU watchpoint fires; need to check if this
+ /*
+ * Called by core code when a CPU watchpoint fires; need to check if this
* is also an architectural watchpoint match.
*/
ARMCPU *cpu = ARM_CPU(cs);
@@ -1177,7 +1057,8 @@ vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
- /* In BE32 system mode, target memory is stored byteswapped (on a
+ /*
+ * In BE32 system mode, target memory is stored byteswapped (on a
* little-endian host system), and by the time we reach here (via an
* opcode helper) the addresses of subword accesses have been adjusted
* to account for that, which means that watchpoints will not match.
@@ -1196,7 +1077,8 @@ vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
void arm_debug_excp_handler(CPUState *cs)
{
- /* Called by core code when a watchpoint or breakpoint fires;
+ /*
+ * Called by core code when a watchpoint or breakpoint fires;
* need to check which one and raise the appropriate exception.
*/
ARMCPU *cpu = ARM_CPU(cs);
@@ -1220,7 +1102,8 @@ void arm_debug_excp_handler(CPUState *cs)
uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
- /* (1) GDB breakpoints should be handled first.
+ /*
+ * (1) GDB breakpoints should be handled first.
* (2) Do not raise a CPU exception if no CPU breakpoint has fired,
* since singlestep is also done by generating a debug internal
* exception.
@@ -1231,7 +1114,8 @@ void arm_debug_excp_handler(CPUState *cs)
}
env->exception.fsr = arm_debug_exception_fsr(env);
- /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
+ /*
+ * FAR is UNKNOWN: clear vaddress to avoid potentially exposing
* values to the guest that it shouldn't be able to see at its
* exception/security level.
*/
@@ -1307,3 +1191,95 @@ uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
return ((uint32_t)x >> shift) | (x << (32 - shift));
}
}
+
+void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
+{
+ /*
+ * Implement DC ZVA, which zeroes a fixed-length block of memory.
+ * Note that we do not implement the (architecturally mandated)
+ * alignment fault for attempts to use this on Device memory
+ * (which matches the usual QEMU behaviour of not implementing either
+ * alignment faults or any memory attribute handling).
+ */
+
+ ARMCPU *cpu = env_archcpu(env);
+ uint64_t blocklen = 4 << cpu->dcz_blocksize;
+ uint64_t vaddr = vaddr_in & ~(blocklen - 1);
+
+#ifndef CONFIG_USER_ONLY
+ {
+ /*
+ * Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
+ * the block size so we might have to do more than one TLB lookup.
+ * We know that in fact for any v8 CPU the page size is at least 4K
+ * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
+ * 1K as an artefact of legacy v5 subpage support being present in the
+ * same QEMU executable. So in practice the hostaddr[] array has
+ * two entries, given the current setting of TARGET_PAGE_BITS_MIN.
+ */
+ int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
+ void *hostaddr[DIV_ROUND_UP(2 * KiB, 1 << TARGET_PAGE_BITS_MIN)];
+ int try, i;
+ unsigned mmu_idx = cpu_mmu_index(env, false);
+ TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
+
+ assert(maxidx <= ARRAY_SIZE(hostaddr));
+
+ for (try = 0; try < 2; try++) {
+
+ for (i = 0; i < maxidx; i++) {
+ hostaddr[i] = tlb_vaddr_to_host(env,
+ vaddr + TARGET_PAGE_SIZE * i,
+ 1, mmu_idx);
+ if (!hostaddr[i]) {
+ break;
+ }
+ }
+ if (i == maxidx) {
+ /*
+ * If it's all in the TLB it's fair game for just writing to;
+ * we know we don't need to update dirty status, etc.
+ */
+ for (i = 0; i < maxidx - 1; i++) {
+ memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
+ }
+ memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
+ return;
+ }
+ /*
+ * OK, try a store and see if we can populate the tlb. This
+ * might cause an exception if the memory isn't writable,
+ * in which case we will longjmp out of here. We must for
+ * this purpose use the actual register value passed to us
+ * so that we get the fault address right.
+ */
+ helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
+ /* Now we can populate the other TLB entries, if any */
+ for (i = 0; i < maxidx; i++) {
+ uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
+ if (va != (vaddr_in & TARGET_PAGE_MASK)) {
+ helper_ret_stb_mmu(env, va, 0, oi, GETPC());
+ }
+ }
+ }
+
+ /*
+ * Slow path (probably attempt to do this to an I/O device or
+ * similar, or clearing of a block of code we have translations
+ * cached for). Just do a series of byte writes as the architecture
+ * demands. It's not worth trying to use a cpu_physical_memory_map(),
+ * memset(), unmap() sequence here because:
+ * + we'd need to account for the blocksize being larger than a page
+ * + the direct-RAM access case is almost always going to be dealt
+ * with in the fastpath code above, so there's no speed benefit
+ * + we would have to deal with the map returning NULL because the
+ * bounce buffer was in use
+ */
+ for (i = 0; i < blocklen; i++) {
+ helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
+ }
+ }
+#else
+ memset(g2h(vaddr), 0, blocklen);
+#endif
+}
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
new file mode 100644
index 0000000000..5feb312941
--- /dev/null
+++ b/target/arm/tlb_helper.c
@@ -0,0 +1,200 @@
+/*
+ * ARM TLB (Translation lookaside buffer) helpers.
+ *
+ * This code is licensed under the GNU GPL v2 or later.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internals.h"
+#include "exec/exec-all.h"
+
+#if !defined(CONFIG_USER_ONLY)
+
+static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
+ unsigned int target_el,
+ bool same_el, bool ea,
+ bool s1ptw, bool is_write,
+ int fsc)
+{
+ uint32_t syn;
+
+ /*
+ * ISV is only set for data aborts routed to EL2 and
+ * never for stage-1 page table walks faulting on stage 2.
+ *
+ * Furthermore, ISV is only set for certain kinds of load/stores.
+ * If the template syndrome does not have ISV set, we should leave
+ * it cleared.
+ *
+ * See ARMv8 specs, D7-1974:
+ * ISS encoding for an exception from a Data Abort, the
+ * ISV field.
+ */
+ if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
+ syn = syn_data_abort_no_iss(same_el,
+ ea, 0, s1ptw, is_write, fsc);
+ } else {
+ /*
+ * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
+ * syndrome created at translation time.
+ * Now we create the runtime syndrome with the remaining fields.
+ */
+ syn = syn_data_abort_with_iss(same_el,
+ 0, 0, 0, 0, 0,
+ ea, 0, s1ptw, is_write, fsc,
+ false);
+ /* Merge the runtime syndrome with the template syndrome. */
+ syn |= template_syn;
+ }
+ return syn;
+}
+
+static void QEMU_NORETURN arm_deliver_fault(ARMCPU *cpu, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, ARMMMUFaultInfo *fi)
+{
+ CPUARMState *env = &cpu->env;
+ int target_el;
+ bool same_el;
+ uint32_t syn, exc, fsr, fsc;
+ ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
+
+ target_el = exception_target_el(env);
+ if (fi->stage2) {
+ target_el = 2;
+ env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
+ }
+ same_el = (arm_current_el(env) == target_el);
+
+ if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
+ arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
+ /*
+ * LPAE format fault status register : bottom 6 bits are
+ * status code in the same form as needed for syndrome
+ */
+ fsr = arm_fi_to_lfsc(fi);
+ fsc = extract32(fsr, 0, 6);
+ } else {
+ fsr = arm_fi_to_sfsc(fi);
+ /*
+ * Short format FSR : this fault will never actually be reported
+ * to an EL that uses a syndrome register. Use a (currently)
+ * reserved FSR code in case the constructed syndrome does leak
+ * into the guest somehow.
+ */
+ fsc = 0x3f;
+ }
+
+ if (access_type == MMU_INST_FETCH) {
+ syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
+ exc = EXCP_PREFETCH_ABORT;
+ } else {
+ syn = merge_syn_data_abort(env->exception.syndrome, target_el,
+ same_el, fi->ea, fi->s1ptw,
+ access_type == MMU_DATA_STORE,
+ fsc);
+ if (access_type == MMU_DATA_STORE
+ && arm_feature(env, ARM_FEATURE_V6)) {
+ fsr |= (1 << 11);
+ }
+ exc = EXCP_DATA_ABORT;
+ }
+
+ env->exception.vaddress = addr;
+ env->exception.fsr = fsr;
+ raise_exception(env, exc, syn, target_el);
+}
+
+/* Raise a data fault alignment exception for the specified virtual address */
+void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ ARMMMUFaultInfo fi = {};
+
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr, true);
+
+ fi.type = ARMFault_Alignment;
+ arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
+}
+
+/*
+ * arm_cpu_do_transaction_failed: handle a memory system error response
+ * (eg "no device/memory present at address") by raising an external abort
+ * exception
+ */
+void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
+ vaddr addr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ ARMMMUFaultInfo fi = {};
+
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr, true);
+
+ fi.ea = arm_extabort_type(response);
+ fi.type = ARMFault_SyncExternal;
+ arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
+}
+
+#endif /* !defined(CONFIG_USER_ONLY) */
+
+bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+
+#ifdef CONFIG_USER_ONLY
+ cpu->env.exception.vaddress = address;
+ if (access_type == MMU_INST_FETCH) {
+ cs->exception_index = EXCP_PREFETCH_ABORT;
+ } else {
+ cs->exception_index = EXCP_DATA_ABORT;
+ }
+ cpu_loop_exit_restore(cs, retaddr);
+#else
+ hwaddr phys_addr;
+ target_ulong page_size;
+ int prot, ret;
+ MemTxAttrs attrs = {};
+ ARMMMUFaultInfo fi = {};
+
+ /*
+ * Walk the page table and (if the mapping exists) add the page
+ * to the TLB. On success, return true. Otherwise, if probing,
+ * return false. Otherwise populate fsr with ARM DFSR/IFSR fault
+ * register format, and signal the fault.
+ */
+ ret = get_phys_addr(&cpu->env, address, access_type,
+ core_to_arm_mmu_idx(&cpu->env, mmu_idx),
+ &phys_addr, &attrs, &prot, &page_size, &fi, NULL);
+ if (likely(!ret)) {
+ /*
+ * Map a single [sub]page. Regions smaller than our declared
+ * target page size are handled specially, so for those we
+ * pass in the exact addresses.
+ */
+ if (page_size >= TARGET_PAGE_SIZE) {
+ phys_addr &= TARGET_PAGE_MASK;
+ address &= TARGET_PAGE_MASK;
+ }
+ tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
+ prot, mmu_idx, page_size);
+ return true;
+ } else if (probe) {
+ return false;
+ } else {
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr, true);
+ arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
+ }
+#endif
+}
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 97f4164fbb..d3231477a2 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -27,7 +27,6 @@
#include "translate.h"
#include "internals.h"
#include "qemu/host-utils.h"
-#include "qemu/qemu-print.h"
#include "hw/semihosting/semihost.h"
#include "exec/gen-icount.h"
@@ -152,133 +151,6 @@ static void set_btype(DisasContext *s, int val)
s->btype = -1;
}
-void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- uint32_t psr = pstate_read(env);
- int i;
- int el = arm_current_el(env);
- const char *ns_status;
-
- qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
- for (i = 0; i < 32; i++) {
- if (i == 31) {
- qemu_fprintf(f, " SP=%016" PRIx64 "\n", env->xregs[i]);
- } else {
- qemu_fprintf(f, "X%02d=%016" PRIx64 "%s", i, env->xregs[i],
- (i + 2) % 3 ? " " : "\n");
- }
- }
-
- if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
- ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
- } else {
- ns_status = "";
- }
- qemu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
- psr,
- psr & PSTATE_N ? 'N' : '-',
- psr & PSTATE_Z ? 'Z' : '-',
- psr & PSTATE_C ? 'C' : '-',
- psr & PSTATE_V ? 'V' : '-',
- ns_status,
- el,
- psr & PSTATE_SP ? 'h' : 't');
-
- if (cpu_isar_feature(aa64_bti, cpu)) {
- qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
- }
- if (!(flags & CPU_DUMP_FPU)) {
- qemu_fprintf(f, "\n");
- return;
- }
- if (fp_exception_el(env, el) != 0) {
- qemu_fprintf(f, " FPU disabled\n");
- return;
- }
- qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
- vfp_get_fpcr(env), vfp_get_fpsr(env));
-
- if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
- int j, zcr_len = sve_zcr_len_for_el(env, el);
-
- for (i = 0; i <= FFR_PRED_NUM; i++) {
- bool eol;
- if (i == FFR_PRED_NUM) {
- qemu_fprintf(f, "FFR=");
- /* It's last, so end the line. */
- eol = true;
- } else {
- qemu_fprintf(f, "P%02d=", i);
- switch (zcr_len) {
- case 0:
- eol = i % 8 == 7;
- break;
- case 1:
- eol = i % 6 == 5;
- break;
- case 2:
- case 3:
- eol = i % 3 == 2;
- break;
- default:
- /* More than one quadword per predicate. */
- eol = true;
- break;
- }
- }
- for (j = zcr_len / 4; j >= 0; j--) {
- int digits;
- if (j * 4 + 4 <= zcr_len + 1) {
- digits = 16;
- } else {
- digits = (zcr_len % 4 + 1) * 4;
- }
- qemu_fprintf(f, "%0*" PRIx64 "%s", digits,
- env->vfp.pregs[i].p[j],
- j ? ":" : eol ? "\n" : " ");
- }
- }
-
- for (i = 0; i < 32; i++) {
- if (zcr_len == 0) {
- qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64 "%s",
- i, env->vfp.zregs[i].d[1],
- env->vfp.zregs[i].d[0], i & 1 ? "\n" : " ");
- } else if (zcr_len == 1) {
- qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64
- ":%016" PRIx64 ":%016" PRIx64 "\n",
- i, env->vfp.zregs[i].d[3], env->vfp.zregs[i].d[2],
- env->vfp.zregs[i].d[1], env->vfp.zregs[i].d[0]);
- } else {
- for (j = zcr_len; j >= 0; j--) {
- bool odd = (zcr_len - j) % 2 != 0;
- if (j == zcr_len) {
- qemu_fprintf(f, "Z%02d[%x-%x]=", i, j, j - 1);
- } else if (!odd) {
- if (j > 0) {
- qemu_fprintf(f, " [%x-%x]=", j, j - 1);
- } else {
- qemu_fprintf(f, " [%x]=", j);
- }
- }
- qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%s",
- env->vfp.zregs[i].d[j * 2 + 1],
- env->vfp.zregs[i].d[j * 2],
- odd || j == 0 ? "\n" : ":");
- }
- }
- }
- } else {
- for (i = 0; i < 32; i++) {
- uint64_t *q = aa64_vfp_qreg(env, i);
- qemu_fprintf(f, "Q%02d=%016" PRIx64 ":%016" PRIx64 "%s",
- i, q[1], q[0], (i & 1 ? "\n" : " "));
- }
- }
-}
-
void gen_a64_set_pc_im(uint64_t val)
{
tcg_gen_movi_i64(cpu_pc, val);
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 4750b9fa1b..a5d7723423 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -28,7 +28,6 @@
#include "tcg-op-gvec.h"
#include "qemu/log.h"
#include "qemu/bitops.h"
-#include "qemu/qemu-print.h"
#include "arm_ldst.h"
#include "hw/semihosting/semihost.h"
@@ -9109,7 +9108,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
loaded_base = 0;
loaded_var = NULL;
n = 0;
- for(i=0;i<16;i++) {
+ for (i = 0; i < 16; i++) {
if (insn & (1 << i))
n++;
}
@@ -9132,7 +9131,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
}
}
j = 0;
- for(i=0;i<16;i++) {
+ for (i = 0; i < 16; i++) {
if (insn & (1 << i)) {
if (is_load) {
/* load */
@@ -12342,92 +12341,6 @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
translator_loop(ops, &dc.base, cpu, tb, max_insns);
}
-void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- int i;
-
- if (is_a64(env)) {
- aarch64_cpu_dump_state(cs, f, flags);
- return;
- }
-
- for(i=0;i<16;i++) {
- qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
- if ((i % 4) == 3)
- qemu_fprintf(f, "\n");
- else
- qemu_fprintf(f, " ");
- }
-
- if (arm_feature(env, ARM_FEATURE_M)) {
- uint32_t xpsr = xpsr_read(env);
- const char *mode;
- const char *ns_status = "";
-
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
- ns_status = env->v7m.secure ? "S " : "NS ";
- }
-
- if (xpsr & XPSR_EXCP) {
- mode = "handler";
- } else {
- if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
- mode = "unpriv-thread";
- } else {
- mode = "priv-thread";
- }
- }
-
- qemu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
- xpsr,
- xpsr & XPSR_N ? 'N' : '-',
- xpsr & XPSR_Z ? 'Z' : '-',
- xpsr & XPSR_C ? 'C' : '-',
- xpsr & XPSR_V ? 'V' : '-',
- xpsr & XPSR_T ? 'T' : 'A',
- ns_status,
- mode);
- } else {
- uint32_t psr = cpsr_read(env);
- const char *ns_status = "";
-
- if (arm_feature(env, ARM_FEATURE_EL3) &&
- (psr & CPSR_M) != ARM_CPU_MODE_MON) {
- ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
- }
-
- qemu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
- psr,
- psr & CPSR_N ? 'N' : '-',
- psr & CPSR_Z ? 'Z' : '-',
- psr & CPSR_C ? 'C' : '-',
- psr & CPSR_V ? 'V' : '-',
- psr & CPSR_T ? 'T' : 'A',
- ns_status,
- aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
- }
-
- if (flags & CPU_DUMP_FPU) {
- int numvfpregs = 0;
- if (arm_feature(env, ARM_FEATURE_VFP)) {
- numvfpregs += 16;
- }
- if (arm_feature(env, ARM_FEATURE_VFP3)) {
- numvfpregs += 16;
- }
- for (i = 0; i < numvfpregs; i++) {
- uint64_t v = *aa32_vfp_dreg(env, i);
- qemu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
- i * 2, (uint32_t)v,
- i * 2 + 1, (uint32_t)(v >> 32),
- i, v);
- }
- qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
- }
-}
-
void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
target_ulong *data)
{
diff --git a/target/arm/translate.h b/target/arm/translate.h
index bc1617809d..a20f6e2056 100644
--- a/target/arm/translate.h
+++ b/target/arm/translate.h
@@ -169,7 +169,6 @@ static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
#ifdef TARGET_AARCH64
void a64_translate_init(void);
void gen_a64_set_pc_im(uint64_t val);
-void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags);
extern const TranslatorOps aarch64_translator_ops;
#else
static inline void a64_translate_init(void)
@@ -179,10 +178,6 @@ static inline void a64_translate_init(void)
static inline void gen_a64_set_pc_im(uint64_t val)
{
}
-
-static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
-{
-}
#endif
void arm_test_cc(DisasCompare *cmp, int cc);
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
index d3e83b627b..46041e3294 100644
--- a/target/arm/vfp_helper.c
+++ b/target/arm/vfp_helper.c
@@ -18,121 +18,88 @@
*/
#include "qemu/osdep.h"
-#include "qemu/log.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "fpu/softfloat.h"
#include "internals.h"
-
+#ifdef CONFIG_TCG
+#include "qemu/log.h"
+#include "fpu/softfloat.h"
+#endif
/* VFP support. We follow the convention used for VFP instructions:
Single precision routines have a "s" suffix, double precision a
"d" suffix. */
+#ifdef CONFIG_TCG
+
/* Convert host exception flags to vfp form. */
static inline int vfp_exceptbits_from_host(int host_bits)
{
int target_bits = 0;
- if (host_bits & float_flag_invalid)
+ if (host_bits & float_flag_invalid) {
target_bits |= 1;
- if (host_bits & float_flag_divbyzero)
+ }
+ if (host_bits & float_flag_divbyzero) {
target_bits |= 2;
- if (host_bits & float_flag_overflow)
+ }
+ if (host_bits & float_flag_overflow) {
target_bits |= 4;
- if (host_bits & (float_flag_underflow | float_flag_output_denormal))
+ }
+ if (host_bits & (float_flag_underflow | float_flag_output_denormal)) {
target_bits |= 8;
- if (host_bits & float_flag_inexact)
+ }
+ if (host_bits & float_flag_inexact) {
target_bits |= 0x10;
- if (host_bits & float_flag_input_denormal)
+ }
+ if (host_bits & float_flag_input_denormal) {
target_bits |= 0x80;
+ }
return target_bits;
}
-uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
-{
- uint32_t i, fpscr;
-
- fpscr = env->vfp.xregs[ARM_VFP_FPSCR]
- | (env->vfp.vec_len << 16)
- | (env->vfp.vec_stride << 20);
-
- i = get_float_exception_flags(&env->vfp.fp_status);
- i |= get_float_exception_flags(&env->vfp.standard_fp_status);
- /* FZ16 does not generate an input denormal exception. */
- i |= (get_float_exception_flags(&env->vfp.fp_status_f16)
- & ~float_flag_input_denormal);
- fpscr |= vfp_exceptbits_from_host(i);
-
- i = env->vfp.qc[0] | env->vfp.qc[1] | env->vfp.qc[2] | env->vfp.qc[3];
- fpscr |= i ? FPCR_QC : 0;
-
- return fpscr;
-}
-
-uint32_t vfp_get_fpscr(CPUARMState *env)
-{
- return HELPER(vfp_get_fpscr)(env);
-}
-
/* Convert vfp exception flags to target form. */
static inline int vfp_exceptbits_to_host(int target_bits)
{
int host_bits = 0;
- if (target_bits & 1)
+ if (target_bits & 1) {
host_bits |= float_flag_invalid;
- if (target_bits & 2)
+ }
+ if (target_bits & 2) {
host_bits |= float_flag_divbyzero;
- if (target_bits & 4)
+ }
+ if (target_bits & 4) {
host_bits |= float_flag_overflow;
- if (target_bits & 8)
+ }
+ if (target_bits & 8) {
host_bits |= float_flag_underflow;
- if (target_bits & 0x10)
+ }
+ if (target_bits & 0x10) {
host_bits |= float_flag_inexact;
- if (target_bits & 0x80)
+ }
+ if (target_bits & 0x80) {
host_bits |= float_flag_input_denormal;
+ }
return host_bits;
}
-void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
+static uint32_t vfp_get_fpscr_from_host(CPUARMState *env)
{
- int i;
- uint32_t changed = env->vfp.xregs[ARM_VFP_FPSCR];
-
- /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
- if (!cpu_isar_feature(aa64_fp16, env_archcpu(env))) {
- val &= ~FPCR_FZ16;
- }
-
- if (arm_feature(env, ARM_FEATURE_M)) {
- /*
- * M profile FPSCR is RES0 for the QC, STRIDE, FZ16, LEN bits
- * and also for the trapped-exception-handling bits IxE.
- */
- val &= 0xf7c0009f;
- }
+ uint32_t i;
- /*
- * We don't implement trapped exception handling, so the
- * trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!)
- *
- * If we exclude the exception flags, IOC|DZC|OFC|UFC|IXC|IDC
- * (which are stored in fp_status), and the other RES0 bits
- * in between, then we clear all of the low 16 bits.
- */
- env->vfp.xregs[ARM_VFP_FPSCR] = val & 0xf7c80000;
- env->vfp.vec_len = (val >> 16) & 7;
- env->vfp.vec_stride = (val >> 20) & 3;
+ i = get_float_exception_flags(&env->vfp.fp_status);
+ i |= get_float_exception_flags(&env->vfp.standard_fp_status);
+ /* FZ16 does not generate an input denormal exception. */
+ i |= (get_float_exception_flags(&env->vfp.fp_status_f16)
+ & ~float_flag_input_denormal);
+ return vfp_exceptbits_from_host(i);
+}
- /*
- * The bit we set within fpscr_q is arbitrary; the register as a
- * whole being zero/non-zero is what counts.
- */
- env->vfp.qc[0] = val & FPCR_QC;
- env->vfp.qc[1] = 0;
- env->vfp.qc[2] = 0;
- env->vfp.qc[3] = 0;
+static void vfp_set_fpscr_to_host(CPUARMState *env, uint32_t val)
+{
+ int i;
+ uint32_t changed = env->vfp.xregs[ARM_VFP_FPSCR];
changed ^= val;
if (changed & (3 << 22)) {
@@ -170,7 +137,8 @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16);
}
- /* The exception flags are ORed together when we read fpscr so we
+ /*
+ * The exception flags are ORed together when we read fpscr so we
* only need to preserve the current state in one of our
* float_status values.
*/
@@ -180,11 +148,86 @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
set_float_exception_flags(0, &env->vfp.standard_fp_status);
}
+#else
+
+static uint32_t vfp_get_fpscr_from_host(CPUARMState *env)
+{
+ return 0;
+}
+
+static void vfp_set_fpscr_to_host(CPUARMState *env, uint32_t val)
+{
+}
+
+#endif
+
+uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
+{
+ uint32_t i, fpscr;
+
+ fpscr = env->vfp.xregs[ARM_VFP_FPSCR]
+ | (env->vfp.vec_len << 16)
+ | (env->vfp.vec_stride << 20);
+
+ fpscr |= vfp_get_fpscr_from_host(env);
+
+ i = env->vfp.qc[0] | env->vfp.qc[1] | env->vfp.qc[2] | env->vfp.qc[3];
+ fpscr |= i ? FPCR_QC : 0;
+
+ return fpscr;
+}
+
+uint32_t vfp_get_fpscr(CPUARMState *env)
+{
+ return HELPER(vfp_get_fpscr)(env);
+}
+
+void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
+{
+ /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
+ if (!cpu_isar_feature(aa64_fp16, env_archcpu(env))) {
+ val &= ~FPCR_FZ16;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ /*
+ * M profile FPSCR is RES0 for the QC, STRIDE, FZ16, LEN bits
+ * and also for the trapped-exception-handling bits IxE.
+ */
+ val &= 0xf7c0009f;
+ }
+
+ /*
+ * We don't implement trapped exception handling, so the
+ * trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!)
+ *
+ * If we exclude the exception flags, IOC|DZC|OFC|UFC|IXC|IDC
+ * (which are stored in fp_status), and the other RES0 bits
+ * in between, then we clear all of the low 16 bits.
+ */
+ env->vfp.xregs[ARM_VFP_FPSCR] = val & 0xf7c80000;
+ env->vfp.vec_len = (val >> 16) & 7;
+ env->vfp.vec_stride = (val >> 20) & 3;
+
+ /*
+ * The bit we set within fpscr_q is arbitrary; the register as a
+ * whole being zero/non-zero is what counts.
+ */
+ env->vfp.qc[0] = val & FPCR_QC;
+ env->vfp.qc[1] = 0;
+ env->vfp.qc[2] = 0;
+ env->vfp.qc[3] = 0;
+
+ vfp_set_fpscr_to_host(env, val);
+}
+
void vfp_set_fpscr(CPUARMState *env, uint32_t val)
{
HELPER(vfp_set_fpscr)(env, val);
}
+#ifdef CONFIG_TCG
+
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
#define VFP_BINOP(name) \
@@ -1278,3 +1321,5 @@ float64 HELPER(frint64_d)(float64 f, void *fpst)
{
return frint_d(f, fpst, 64);
}
+
+#endif
diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
index ffbd19afa1..f437c88aad 100644
--- a/target/ppc/fpu_helper.c
+++ b/target/ppc/fpu_helper.c
@@ -1801,37 +1801,35 @@ uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
* sfprf - set FPRF
*/
#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
-void helper_##name(CPUPPCState *env, uint32_t opcode) \
+void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xa, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
helper_reset_fpstatus(env); \
\
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld = tp##_##op(xa.fld, xb.fld, &tstat); \
+ t.fld = tp##_##op(xa->fld, xb->fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
float_invalid_op_addsub(env, sfprf, GETPC(), \
- tp##_classify(xa.fld) | \
- tp##_classify(xb.fld)); \
+ tp##_classify(xa->fld) | \
+ tp##_classify(xb->fld)); \
} \
\
if (r2sp) { \
- xt.fld = helper_frsp(env, xt.fld); \
+ t.fld = helper_frsp(env, t.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf_float64(env, xt.fld); \
+ helper_compute_fprf_float64(env, t.fld); \
} \
} \
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -1844,14 +1842,12 @@ VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
-void helper_xsaddqp(CPUPPCState *env, uint32_t opcode)
+void helper_xsaddqp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
{
- ppc_vsr_t xt, xa, xb;
+ ppc_vsr_t t = *xt;
float_status tstat;
- getVSR(rA(opcode) + 32, &xa, env);
- getVSR(rB(opcode) + 32, &xb, env);
- getVSR(rD(opcode) + 32, &xt, env);
helper_reset_fpstatus(env);
tstat = env->fp_status;
@@ -1860,18 +1856,18 @@ void helper_xsaddqp(CPUPPCState *env, uint32_t opcode)
}
set_float_exception_flags(0, &tstat);
- xt.f128 = float128_add(xa.f128, xb.f128, &tstat);
+ t.f128 = float128_add(xa->f128, xb->f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
float_invalid_op_addsub(env, 1, GETPC(),
- float128_classify(xa.f128) |
- float128_classify(xb.f128));
+ float128_classify(xa->f128) |
+ float128_classify(xb->f128));
}
- helper_compute_fprf_float128(env, xt.f128);
+ helper_compute_fprf_float128(env, t.f128);
- putVSR(rD(opcode) + 32, &xt, env);
+ *xt = t;
do_float_check_status(env, GETPC());
}
@@ -1884,38 +1880,36 @@ void helper_xsaddqp(CPUPPCState *env, uint32_t opcode)
* sfprf - set FPRF
*/
#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xa, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
helper_reset_fpstatus(env); \
\
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld = tp##_mul(xa.fld, xb.fld, &tstat); \
+ t.fld = tp##_mul(xa->fld, xb->fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
float_invalid_op_mul(env, sfprf, GETPC(), \
- tp##_classify(xa.fld) | \
- tp##_classify(xb.fld)); \
+ tp##_classify(xa->fld) | \
+ tp##_classify(xb->fld)); \
} \
\
if (r2sp) { \
- xt.fld = helper_frsp(env, xt.fld); \
+ t.fld = helper_frsp(env, t.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf_float64(env, xt.fld); \
+ helper_compute_fprf_float64(env, t.fld); \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -1924,15 +1918,12 @@ VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
-void helper_xsmulqp(CPUPPCState *env, uint32_t opcode)
+void helper_xsmulqp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
{
- ppc_vsr_t xt, xa, xb;
+ ppc_vsr_t t = *xt;
float_status tstat;
- getVSR(rA(opcode) + 32, &xa, env);
- getVSR(rB(opcode) + 32, &xb, env);
- getVSR(rD(opcode) + 32, &xt, env);
-
helper_reset_fpstatus(env);
tstat = env->fp_status;
if (unlikely(Rc(opcode) != 0)) {
@@ -1940,17 +1931,17 @@ void helper_xsmulqp(CPUPPCState *env, uint32_t opcode)
}
set_float_exception_flags(0, &tstat);
- xt.f128 = float128_mul(xa.f128, xb.f128, &tstat);
+ t.f128 = float128_mul(xa->f128, xb->f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
float_invalid_op_mul(env, 1, GETPC(),
- float128_classify(xa.f128) |
- float128_classify(xb.f128));
+ float128_classify(xa->f128) |
+ float128_classify(xb->f128));
}
- helper_compute_fprf_float128(env, xt.f128);
+ helper_compute_fprf_float128(env, t.f128);
- putVSR(rD(opcode) + 32, &xt, env);
+ *xt = t;
do_float_check_status(env, GETPC());
}
@@ -1963,41 +1954,39 @@ void helper_xsmulqp(CPUPPCState *env, uint32_t opcode)
* sfprf - set FPRF
*/
#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xa, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
helper_reset_fpstatus(env); \
\
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld = tp##_div(xa.fld, xb.fld, &tstat); \
+ t.fld = tp##_div(xa->fld, xb->fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
float_invalid_op_div(env, sfprf, GETPC(), \
- tp##_classify(xa.fld) | \
- tp##_classify(xb.fld)); \
+ tp##_classify(xa->fld) | \
+ tp##_classify(xb->fld)); \
} \
if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \
float_zero_divide_excp(env, GETPC()); \
} \
\
if (r2sp) { \
- xt.fld = helper_frsp(env, xt.fld); \
+ t.fld = helper_frsp(env, t.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf_float64(env, xt.fld); \
+ helper_compute_fprf_float64(env, t.fld); \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -2006,15 +1995,12 @@ VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
-void helper_xsdivqp(CPUPPCState *env, uint32_t opcode)
+void helper_xsdivqp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
{
- ppc_vsr_t xt, xa, xb;
+ ppc_vsr_t t = *xt;
float_status tstat;
- getVSR(rA(opcode) + 32, &xa, env);
- getVSR(rB(opcode) + 32, &xb, env);
- getVSR(rD(opcode) + 32, &xt, env);
-
helper_reset_fpstatus(env);
tstat = env->fp_status;
if (unlikely(Rc(opcode) != 0)) {
@@ -2022,20 +2008,20 @@ void helper_xsdivqp(CPUPPCState *env, uint32_t opcode)
}
set_float_exception_flags(0, &tstat);
- xt.f128 = float128_div(xa.f128, xb.f128, &tstat);
+ t.f128 = float128_div(xa->f128, xb->f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
float_invalid_op_div(env, 1, GETPC(),
- float128_classify(xa.f128) |
- float128_classify(xb.f128));
+ float128_classify(xa->f128) |
+ float128_classify(xb->f128));
}
if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {
float_zero_divide_excp(env, GETPC());
}
- helper_compute_fprf_float128(env, xt.f128);
- putVSR(rD(opcode) + 32, &xt, env);
+ helper_compute_fprf_float128(env, t.f128);
+ *xt = t;
do_float_check_status(env, GETPC());
}
@@ -2048,31 +2034,29 @@ void helper_xsdivqp(CPUPPCState *env, uint32_t opcode)
* sfprf - set FPRF
*/
#define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
helper_reset_fpstatus(env); \
\
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
+ if (unlikely(tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
float_invalid_op_vxsnan(env, GETPC()); \
} \
- xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status); \
+ t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status); \
\
if (r2sp) { \
- xt.fld = helper_frsp(env, xt.fld); \
+ t.fld = helper_frsp(env, t.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf_float64(env, xt.fld); \
+ helper_compute_fprf_float64(env, t.fld); \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -2090,39 +2074,37 @@ VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
* sfprf - set FPRF
*/
#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
helper_reset_fpstatus(env); \
\
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld = tp##_sqrt(xb.fld, &tstat); \
+ t.fld = tp##_sqrt(xb->fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
+ if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \
float_invalid_op_vxsqrt(env, sfprf, GETPC()); \
- } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
+ } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \
float_invalid_op_vxsnan(env, GETPC()); \
} \
} \
\
if (r2sp) { \
- xt.fld = helper_frsp(env, xt.fld); \
+ t.fld = helper_frsp(env, t.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf_float64(env, xt.fld); \
+ helper_compute_fprf_float64(env, t.fld); \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -2140,40 +2122,38 @@ VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
* sfprf - set FPRF
*/
#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
helper_reset_fpstatus(env); \
\
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld = tp##_sqrt(xb.fld, &tstat); \
- xt.fld = tp##_div(tp##_one, xt.fld, &tstat); \
+ t.fld = tp##_sqrt(xb->fld, &tstat); \
+ t.fld = tp##_div(tp##_one, t.fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
+ if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \
float_invalid_op_vxsqrt(env, sfprf, GETPC()); \
- } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
+ } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \
float_invalid_op_vxsnan(env, GETPC()); \
} \
} \
\
if (r2sp) { \
- xt.fld = helper_frsp(env, xt.fld); \
+ t.fld = helper_frsp(env, t.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf_float64(env, xt.fld); \
+ helper_compute_fprf_float64(env, t.fld); \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -2193,39 +2173,36 @@ VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
* nbits - number of fraction bits
*/
#define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xa, xb; \
int i; \
int fe_flag = 0; \
int fg_flag = 0; \
\
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
- \
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_infinity(xa.fld) || \
- tp##_is_infinity(xb.fld) || \
- tp##_is_zero(xb.fld))) { \
+ if (unlikely(tp##_is_infinity(xa->fld) || \
+ tp##_is_infinity(xb->fld) || \
+ tp##_is_zero(xb->fld))) { \
fe_flag = 1; \
fg_flag = 1; \
} else { \
- int e_a = ppc_##tp##_get_unbiased_exp(xa.fld); \
- int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
+ int e_a = ppc_##tp##_get_unbiased_exp(xa->fld); \
+ int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
\
- if (unlikely(tp##_is_any_nan(xa.fld) || \
- tp##_is_any_nan(xb.fld))) { \
+ if (unlikely(tp##_is_any_nan(xa->fld) || \
+ tp##_is_any_nan(xb->fld))) { \
fe_flag = 1; \
} else if ((e_b <= emin) || (e_b >= (emax - 2))) { \
fe_flag = 1; \
- } else if (!tp##_is_zero(xa.fld) && \
+ } else if (!tp##_is_zero(xa->fld) && \
(((e_a - e_b) >= emax) || \
((e_a - e_b) <= (emin + 1)) || \
(e_a <= (emin + nbits)))) { \
fe_flag = 1; \
} \
\
- if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
+ if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
/* \
* XB is not zero because of the above check and so \
* must be denormalized. \
@@ -2253,36 +2230,32 @@ VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
* nbits - number of fraction bits
*/
#define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xa, xb; \
int i; \
int fe_flag = 0; \
int fg_flag = 0; \
\
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
- \
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_infinity(xb.fld) || \
- tp##_is_zero(xb.fld))) { \
+ if (unlikely(tp##_is_infinity(xb->fld) || \
+ tp##_is_zero(xb->fld))) { \
fe_flag = 1; \
fg_flag = 1; \
} else { \
- int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
+ int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
\
- if (unlikely(tp##_is_any_nan(xb.fld))) { \
+ if (unlikely(tp##_is_any_nan(xb->fld))) { \
fe_flag = 1; \
- } else if (unlikely(tp##_is_zero(xb.fld))) { \
+ } else if (unlikely(tp##_is_zero(xb->fld))) { \
fe_flag = 1; \
- } else if (unlikely(tp##_is_neg(xb.fld))) { \
+ } else if (unlikely(tp##_is_neg(xb->fld))) { \
fe_flag = 1; \
- } else if (!tp##_is_zero(xb.fld) && \
+ } else if (!tp##_is_zero(xb->fld) && \
(e_b <= (emin + nbits))) { \
fe_flag = 1; \
} \
\
- if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
+ if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
/* \
* XB is not zero because of the above check and \
* therefore must be denormalized. \
@@ -2307,30 +2280,15 @@ VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
* fld - vsr_t field (VsrD(*) or VsrW(*))
* maddflgs - flags for the float*muladd routine that control the
* various forms (madd, msub, nmadd, nmsub)
- * afrm - A form (1=A, 0=M)
* sfprf - set FPRF
*/
-#define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+#define VSX_MADD(op, nels, tp, fld, maddflgs, sfprf, r2sp) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *b, ppc_vsr_t *c) \
{ \
- ppc_vsr_t xt_in, xa, xb, xt_out; \
- ppc_vsr_t *b, *c; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- if (afrm) { /* AxB + T */ \
- b = &xb; \
- c = &xt_in; \
- } else { /* AxT + B */ \
- b = &xt_in; \
- c = &xb; \
- } \
- \
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt_in, env); \
- \
- xt_out = xt_in; \
- \
helper_reset_fpstatus(env); \
\
for (i = 0; i < nels; i++) { \
@@ -2342,68 +2300,51 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
* result to odd. \
*/ \
set_float_rounding_mode(float_round_to_zero, &tstat); \
- xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
- maddflgs, &tstat); \
- xt_out.fld |= (get_float_exception_flags(&tstat) & \
- float_flag_inexact) != 0; \
+ t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
+ maddflgs, &tstat); \
+ t.fld |= (get_float_exception_flags(&tstat) & \
+ float_flag_inexact) != 0; \
} else { \
- xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
- maddflgs, &tstat); \
+ t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
+ maddflgs, &tstat); \
} \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- tp##_maddsub_update_excp(env, xa.fld, b->fld, \
+ tp##_maddsub_update_excp(env, xa->fld, b->fld, \
c->fld, maddflgs, GETPC()); \
} \
\
if (r2sp) { \
- xt_out.fld = helper_frsp(env, xt_out.fld); \
+ t.fld = helper_frsp(env, t.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf_float64(env, xt_out.fld); \
+ helper_compute_fprf_float64(env, t.fld); \
} \
} \
- putVSR(xT(opcode), &xt_out, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
-VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0)
-VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0)
-VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0)
-VSX_MADD(xsmsubmdp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 0)
-VSX_MADD(xsnmaddadp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 0)
-VSX_MADD(xsnmaddmdp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 0)
-VSX_MADD(xsnmsubadp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 0)
-VSX_MADD(xsnmsubmdp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 0)
-
-VSX_MADD(xsmaddasp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 1)
-VSX_MADD(xsmaddmsp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 1)
-VSX_MADD(xsmsubasp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 1)
-VSX_MADD(xsmsubmsp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 1)
-VSX_MADD(xsnmaddasp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 1)
-VSX_MADD(xsnmaddmsp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 1)
-VSX_MADD(xsnmsubasp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 1)
-VSX_MADD(xsnmsubmsp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 1)
-
-VSX_MADD(xvmaddadp, 2, float64, VsrD(i), MADD_FLGS, 1, 0, 0)
-VSX_MADD(xvmaddmdp, 2, float64, VsrD(i), MADD_FLGS, 0, 0, 0)
-VSX_MADD(xvmsubadp, 2, float64, VsrD(i), MSUB_FLGS, 1, 0, 0)
-VSX_MADD(xvmsubmdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0, 0)
-VSX_MADD(xvnmaddadp, 2, float64, VsrD(i), NMADD_FLGS, 1, 0, 0)
-VSX_MADD(xvnmaddmdp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0, 0)
-VSX_MADD(xvnmsubadp, 2, float64, VsrD(i), NMSUB_FLGS, 1, 0, 0)
-VSX_MADD(xvnmsubmdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0, 0)
-
-VSX_MADD(xvmaddasp, 4, float32, VsrW(i), MADD_FLGS, 1, 0, 0)
-VSX_MADD(xvmaddmsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0, 0)
-VSX_MADD(xvmsubasp, 4, float32, VsrW(i), MSUB_FLGS, 1, 0, 0)
-VSX_MADD(xvmsubmsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0, 0)
-VSX_MADD(xvnmaddasp, 4, float32, VsrW(i), NMADD_FLGS, 1, 0, 0)
-VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
-VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
-VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
+VSX_MADD(xsmadddp, 1, float64, VsrD(0), MADD_FLGS, 1, 0)
+VSX_MADD(xsmsubdp, 1, float64, VsrD(0), MSUB_FLGS, 1, 0)
+VSX_MADD(xsnmadddp, 1, float64, VsrD(0), NMADD_FLGS, 1, 0)
+VSX_MADD(xsnmsubdp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 0)
+VSX_MADD(xsmaddsp, 1, float64, VsrD(0), MADD_FLGS, 1, 1)
+VSX_MADD(xsmsubsp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1)
+VSX_MADD(xsnmaddsp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1)
+VSX_MADD(xsnmsubsp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1)
+
+VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0, 0)
+VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0)
+VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0)
+VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0)
+
+VSX_MADD(xvmaddsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0)
+VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0)
+VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0)
+VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0)
/*
* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
@@ -2413,24 +2354,21 @@ VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
* svxvc - set VXVC bit
*/
#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xa, xb; \
+ ppc_vsr_t t = *xt; \
bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \
\
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
- \
- if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
- float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
+ if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
+ float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
vxsnan_flag = true; \
if (fpscr_ve == 0 && svxvc) { \
vxvc_flag = true; \
} \
} else if (svxvc) { \
- vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
- float64_is_quiet_nan(xb.VsrD(0), &env->fp_status); \
+ vxvc_flag = float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || \
+ float64_is_quiet_nan(xb->VsrD(0), &env->fp_status); \
} \
if (vxsnan_flag) { \
float_invalid_op_vxsnan(env, GETPC()); \
@@ -2441,15 +2379,16 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \
\
if (!vex_flag) { \
- if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) { \
- xt.VsrD(0) = -1; \
- xt.VsrD(1) = 0; \
+ if (float64_##cmp(xb->VsrD(0), xa->VsrD(0), \
+ &env->fp_status) == exp) { \
+ t.VsrD(0) = -1; \
+ t.VsrD(1) = 0; \
} else { \
- xt.VsrD(0) = 0; \
- xt.VsrD(1) = 0; \
+ t.VsrD(0) = 0; \
+ t.VsrD(1) = 0; \
} \
} \
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -2458,20 +2397,17 @@ VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
-void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode)
+void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xa, ppc_vsr_t *xb)
{
- ppc_vsr_t xa, xb;
int64_t exp_a, exp_b;
uint32_t cc;
- getVSR(xA(opcode), &xa, env);
- getVSR(xB(opcode), &xb, env);
-
- exp_a = extract64(xa.VsrD(0), 52, 11);
- exp_b = extract64(xb.VsrD(0), 52, 11);
+ exp_a = extract64(xa->VsrD(0), 52, 11);
+ exp_b = extract64(xb->VsrD(0), 52, 11);
- if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||
- float64_is_any_nan(xb.VsrD(0)))) {
+ if (unlikely(float64_is_any_nan(xa->VsrD(0)) ||
+ float64_is_any_nan(xb->VsrD(0)))) {
cc = CRF_SO;
} else {
if (exp_a < exp_b) {
@@ -2490,20 +2426,17 @@ void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode)
do_float_check_status(env, GETPC());
}
-void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode)
+void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xa, ppc_vsr_t *xb)
{
- ppc_vsr_t xa, xb;
int64_t exp_a, exp_b;
uint32_t cc;
- getVSR(rA(opcode) + 32, &xa, env);
- getVSR(rB(opcode) + 32, &xb, env);
+ exp_a = extract64(xa->VsrD(0), 48, 15);
+ exp_b = extract64(xb->VsrD(0), 48, 15);
- exp_a = extract64(xa.VsrD(0), 48, 15);
- exp_b = extract64(xb.VsrD(0), 48, 15);
-
- if (unlikely(float128_is_any_nan(xa.f128) ||
- float128_is_any_nan(xb.f128))) {
+ if (unlikely(float128_is_any_nan(xa->f128) ||
+ float128_is_any_nan(xb->f128))) {
cc = CRF_SO;
} else {
if (exp_a < exp_b) {
@@ -2523,25 +2456,23 @@ void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode)
}
#define VSX_SCALAR_CMP(op, ordered) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xa, xb; \
uint32_t cc = 0; \
bool vxsnan_flag = false, vxvc_flag = false; \
\
helper_reset_fpstatus(env); \
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
\
- if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
- float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
+ if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
+ float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
vxsnan_flag = true; \
cc = CRF_SO; \
if (fpscr_ve == 0 && ordered) { \
vxvc_flag = true; \
} \
- } else if (float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
- float64_is_quiet_nan(xb.VsrD(0), &env->fp_status)) { \
+ } else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || \
+ float64_is_quiet_nan(xb->VsrD(0), &env->fp_status)) { \
cc = CRF_SO; \
if (ordered) { \
vxvc_flag = true; \
@@ -2554,9 +2485,9 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
float_invalid_op_vxvc(env, 0, GETPC()); \
} \
\
- if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
+ if (float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) { \
cc |= CRF_LT; \
- } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
+ } else if (!float64_le(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) { \
cc |= CRF_GT; \
} else { \
cc |= CRF_EQ; \
@@ -2573,25 +2504,23 @@ VSX_SCALAR_CMP(xscmpodp, 1)
VSX_SCALAR_CMP(xscmpudp, 0)
#define VSX_SCALAR_CMPQ(op, ordered) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xa, xb; \
uint32_t cc = 0; \
bool vxsnan_flag = false, vxvc_flag = false; \
\
helper_reset_fpstatus(env); \
- getVSR(rA(opcode) + 32, &xa, env); \
- getVSR(rB(opcode) + 32, &xb, env); \
\
- if (float128_is_signaling_nan(xa.f128, &env->fp_status) || \
- float128_is_signaling_nan(xb.f128, &env->fp_status)) { \
+ if (float128_is_signaling_nan(xa->f128, &env->fp_status) || \
+ float128_is_signaling_nan(xb->f128, &env->fp_status)) { \
vxsnan_flag = true; \
cc = CRF_SO; \
if (fpscr_ve == 0 && ordered) { \
vxvc_flag = true; \
} \
- } else if (float128_is_quiet_nan(xa.f128, &env->fp_status) || \
- float128_is_quiet_nan(xb.f128, &env->fp_status)) { \
+ } else if (float128_is_quiet_nan(xa->f128, &env->fp_status) || \
+ float128_is_quiet_nan(xb->f128, &env->fp_status)) { \
cc = CRF_SO; \
if (ordered) { \
vxvc_flag = true; \
@@ -2604,9 +2533,9 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
float_invalid_op_vxvc(env, 0, GETPC()); \
} \
\
- if (float128_lt(xa.f128, xb.f128, &env->fp_status)) { \
+ if (float128_lt(xa->f128, xb->f128, &env->fp_status)) { \
cc |= CRF_LT; \
- } else if (!float128_le(xa.f128, xb.f128, &env->fp_status)) { \
+ } else if (!float128_le(xa->f128, xb->f128, &env->fp_status)) { \
cc |= CRF_GT; \
} else { \
cc |= CRF_EQ; \
@@ -2631,24 +2560,21 @@ VSX_SCALAR_CMPQ(xscmpuqp, 0)
* fld - vsr_t field (VsrD(*) or VsrW(*))
*/
#define VSX_MAX_MIN(name, op, nels, tp, fld) \
-void helper_##name(CPUPPCState *env, uint32_t opcode) \
+void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xa, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
- \
for (i = 0; i < nels; i++) { \
- xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status); \
- if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
- tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
+ t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status); \
+ if (unlikely(tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
+ tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
float_invalid_op_vxsnan(env, GETPC()); \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -2660,29 +2586,26 @@ VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
#define VSX_MAX_MINC(name, max) \
-void helper_##name(CPUPPCState *env, uint32_t opcode) \
+void helper_##name(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xa, xb; \
+ ppc_vsr_t t = *xt; \
bool vxsnan_flag = false, vex_flag = false; \
\
- getVSR(rA(opcode) + 32, &xa, env); \
- getVSR(rB(opcode) + 32, &xb, env); \
- getVSR(rD(opcode) + 32, &xt, env); \
- \
- if (unlikely(float64_is_any_nan(xa.VsrD(0)) || \
- float64_is_any_nan(xb.VsrD(0)))) { \
- if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
- float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
+ if (unlikely(float64_is_any_nan(xa->VsrD(0)) || \
+ float64_is_any_nan(xb->VsrD(0)))) { \
+ if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
+ float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
vxsnan_flag = true; \
} \
- xt.VsrD(0) = xb.VsrD(0); \
+ t.VsrD(0) = xb->VsrD(0); \
} else if ((max && \
- !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
+ !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
(!max && \
- float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
- xt.VsrD(0) = xa.VsrD(0); \
+ float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
+ t.VsrD(0) = xa->VsrD(0); \
} else { \
- xt.VsrD(0) = xb.VsrD(0); \
+ t.VsrD(0) = xb->VsrD(0); \
} \
\
vex_flag = fpscr_ve & vxsnan_flag; \
@@ -2690,7 +2613,7 @@ void helper_##name(CPUPPCState *env, uint32_t opcode) \
float_invalid_op_vxsnan(env, GETPC()); \
} \
if (!vex_flag) { \
- putVSR(rD(opcode) + 32, &xt, env); \
+ *xt = t; \
} \
} \
@@ -2698,46 +2621,46 @@ VSX_MAX_MINC(xsmaxcdp, 1);
VSX_MAX_MINC(xsmincdp, 0);
#define VSX_MAX_MINJ(name, max) \
-void helper_##name(CPUPPCState *env, uint32_t opcode) \
+void helper_##name(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xa, xb; \
+ ppc_vsr_t t = *xt; \
bool vxsnan_flag = false, vex_flag = false; \
\
- getVSR(rA(opcode) + 32, &xa, env); \
- getVSR(rB(opcode) + 32, &xb, env); \
- getVSR(rD(opcode) + 32, &xt, env); \
- \
- if (unlikely(float64_is_any_nan(xa.VsrD(0)))) { \
- if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status)) { \
+ if (unlikely(float64_is_any_nan(xa->VsrD(0)))) { \
+ if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) { \
vxsnan_flag = true; \
} \
- xt.VsrD(0) = xa.VsrD(0); \
- } else if (unlikely(float64_is_any_nan(xb.VsrD(0)))) { \
- if (float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
+ t.VsrD(0) = xa->VsrD(0); \
+ } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) { \
+ if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
vxsnan_flag = true; \
} \
- xt.VsrD(0) = xb.VsrD(0); \
- } else if (float64_is_zero(xa.VsrD(0)) && float64_is_zero(xb.VsrD(0))) { \
+ t.VsrD(0) = xb->VsrD(0); \
+ } else if (float64_is_zero(xa->VsrD(0)) && \
+ float64_is_zero(xb->VsrD(0))) { \
if (max) { \
- if (!float64_is_neg(xa.VsrD(0)) || !float64_is_neg(xb.VsrD(0))) { \
- xt.VsrD(0) = 0ULL; \
+ if (!float64_is_neg(xa->VsrD(0)) || \
+ !float64_is_neg(xb->VsrD(0))) { \
+ t.VsrD(0) = 0ULL; \
} else { \
- xt.VsrD(0) = 0x8000000000000000ULL; \
+ t.VsrD(0) = 0x8000000000000000ULL; \
} \
} else { \
- if (float64_is_neg(xa.VsrD(0)) || float64_is_neg(xb.VsrD(0))) { \
- xt.VsrD(0) = 0x8000000000000000ULL; \
+ if (float64_is_neg(xa->VsrD(0)) || \
+ float64_is_neg(xb->VsrD(0))) { \
+ t.VsrD(0) = 0x8000000000000000ULL; \
} else { \
- xt.VsrD(0) = 0ULL; \
+ t.VsrD(0) = 0ULL; \
} \
} \
} else if ((max && \
- !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
+ !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
(!max && \
- float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
- xt.VsrD(0) = xa.VsrD(0); \
+ float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
+ t.VsrD(0) = xa->VsrD(0); \
} else { \
- xt.VsrD(0) = xb.VsrD(0); \
+ t.VsrD(0) = xb->VsrD(0); \
} \
\
vex_flag = fpscr_ve & vxsnan_flag; \
@@ -2745,7 +2668,7 @@ void helper_##name(CPUPPCState *env, uint32_t opcode) \
float_invalid_op_vxsnan(env, GETPC()); \
} \
if (!vex_flag) { \
- putVSR(rD(opcode) + 32, &xt, env); \
+ *xt = t; \
} \
} \
@@ -2763,46 +2686,42 @@ VSX_MAX_MINJ(xsminjdp, 0);
* exp - expected result of comparison
*/
#define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xa, xb; \
+ ppc_vsr_t t = *xt; \
+ uint32_t crf6 = 0; \
int i; \
int all_true = 1; \
int all_false = 1; \
\
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
- \
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_any_nan(xa.fld) || \
- tp##_is_any_nan(xb.fld))) { \
- if (tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
- tp##_is_signaling_nan(xb.fld, &env->fp_status)) { \
+ if (unlikely(tp##_is_any_nan(xa->fld) || \
+ tp##_is_any_nan(xb->fld))) { \
+ if (tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
+ tp##_is_signaling_nan(xb->fld, &env->fp_status)) { \
float_invalid_op_vxsnan(env, GETPC()); \
} \
if (svxvc) { \
float_invalid_op_vxvc(env, 0, GETPC()); \
} \
- xt.fld = 0; \
+ t.fld = 0; \
all_true = 0; \
} else { \
- if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) { \
- xt.fld = -1; \
+ if (tp##_##cmp(xb->fld, xa->fld, &env->fp_status) == exp) { \
+ t.fld = -1; \
all_false = 0; \
} else { \
- xt.fld = 0; \
+ t.fld = 0; \
all_true = 0; \
} \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
- if ((opcode >> (31 - 21)) & 1) { \
- env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
- } \
- do_float_check_status(env, GETPC()); \
- }
+ *xt = t; \
+ crf6 = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
+ return crf6; \
+}
VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
@@ -2824,27 +2743,24 @@ VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
* sfprf - set FPRF
*/
#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
- \
for (i = 0; i < nels; i++) { \
- xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
- if (unlikely(stp##_is_signaling_nan(xb.sfld, \
+ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
+ if (unlikely(stp##_is_signaling_nan(xb->sfld, \
&env->fp_status))) { \
float_invalid_op_vxsnan(env, GETPC()); \
- xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
+ t.tfld = ttp##_snan_to_qnan(t.tfld); \
} \
if (sfprf) { \
- helper_compute_fprf_##ttp(env, xt.tfld); \
+ helper_compute_fprf_##ttp(env, t.tfld); \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -2864,27 +2780,25 @@ VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
* sfprf - set FPRF
*/
#define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(rB(opcode) + 32, &xb, env); \
- getVSR(rD(opcode) + 32, &xt, env); \
- \
for (i = 0; i < nels; i++) { \
- xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
- if (unlikely(stp##_is_signaling_nan(xb.sfld, \
+ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
+ if (unlikely(stp##_is_signaling_nan(xb->sfld, \
&env->fp_status))) { \
float_invalid_op_vxsnan(env, GETPC()); \
- xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
+ t.tfld = ttp##_snan_to_qnan(t.tfld); \
} \
if (sfprf) { \
- helper_compute_fprf_##ttp(env, xt.tfld); \
+ helper_compute_fprf_##ttp(env, t.tfld); \
} \
} \
\
- putVSR(rD(opcode) + 32, &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -2902,27 +2816,24 @@ VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
* sfprf - set FPRF
*/
#define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = { }; \
int i; \
\
- getVSR(xB(opcode), &xb, env); \
- memset(&xt, 0, sizeof(xt)); \
- \
for (i = 0; i < nels; i++) { \
- xt.tfld = stp##_to_##ttp(xb.sfld, 1, &env->fp_status); \
- if (unlikely(stp##_is_signaling_nan(xb.sfld, \
+ t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status); \
+ if (unlikely(stp##_is_signaling_nan(xb->sfld, \
&env->fp_status))) { \
float_invalid_op_vxsnan(env, GETPC()); \
- xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
+ t.tfld = ttp##_snan_to_qnan(t.tfld); \
} \
if (sfprf) { \
- helper_compute_fprf_##ttp(env, xt.tfld); \
+ helper_compute_fprf_##ttp(env, t.tfld); \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -2935,28 +2846,26 @@ VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
* xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
* added to this later.
*/
-void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode)
+void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xb)
{
- ppc_vsr_t xt, xb;
+ ppc_vsr_t t = { };
float_status tstat;
- getVSR(rB(opcode) + 32, &xb, env);
- memset(&xt, 0, sizeof(xt));
-
tstat = env->fp_status;
if (unlikely(Rc(opcode) != 0)) {
tstat.float_rounding_mode = float_round_to_odd;
}
- xt.VsrD(0) = float128_to_float64(xb.f128, &tstat);
+ t.VsrD(0) = float128_to_float64(xb->f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
- if (unlikely(float128_is_signaling_nan(xb.f128, &tstat))) {
+ if (unlikely(float128_is_signaling_nan(xb->f128, &tstat))) {
float_invalid_op_vxsnan(env, GETPC());
- xt.VsrD(0) = float64_snan_to_qnan(xt.VsrD(0));
+ t.VsrD(0) = float64_snan_to_qnan(t.VsrD(0));
}
- helper_compute_fprf_float64(env, xt.VsrD(0));
+ helper_compute_fprf_float64(env, t.VsrD(0));
- putVSR(rD(opcode) + 32, &xt, env);
+ *xt = t;
do_float_check_status(env, GETPC());
}
@@ -2987,27 +2896,24 @@ uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
* rnan - resulting NaN
*/
#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
int all_flags = env->fp_status.float_exception_flags, flags; \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
- \
for (i = 0; i < nels; i++) { \
env->fp_status.float_exception_flags = 0; \
- xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, &env->fp_status); \
+ t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
flags = env->fp_status.float_exception_flags; \
if (unlikely(flags & float_flag_invalid)) { \
- float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb.sfld)); \
- xt.tfld = rnan; \
+ float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \
+ t.tfld = rnan; \
} \
all_flags |= flags; \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
env->fp_status.float_exception_flags = all_flags; \
do_float_check_status(env, GETPC()); \
}
@@ -3040,20 +2946,18 @@ VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
* rnan - resulting NaN
*/
#define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = { }; \
\
- getVSR(rB(opcode) + 32, &xb, env); \
- memset(&xt, 0, sizeof(xt)); \
- \
- xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, &env->fp_status); \
+ t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
if (env->fp_status.float_exception_flags & float_flag_invalid) { \
- float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb.sfld)); \
- xt.tfld = rnan; \
+ float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \
+ t.tfld = rnan; \
} \
\
- putVSR(rD(opcode) + 32, &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -3077,25 +2981,22 @@ VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
* sfprf - set FPRF
*/
#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
- \
for (i = 0; i < nels; i++) { \
- xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
+ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
if (r2sp) { \
- xt.tfld = helper_frsp(env, xt.tfld); \
+ t.tfld = helper_frsp(env, t.tfld); \
} \
if (sfprf) { \
- helper_compute_fprf_float64(env, xt.tfld); \
+ helper_compute_fprf_float64(env, t.tfld); \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -3121,17 +3022,15 @@ VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
* tfld - target vsr_t field
*/
#define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = *xt; \
\
- getVSR(rB(opcode) + 32, &xb, env); \
- getVSR(rD(opcode) + 32, &xt, env); \
+ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
+ helper_compute_fprf_##ttp(env, t.tfld); \
\
- xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
- helper_compute_fprf_##ttp(env, xt.tfld); \
- \
- putVSR(xT(opcode) + 32, &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -3155,27 +3054,25 @@ VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
* sfprf - set FPRF
*/
#define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
\
if (rmode != FLOAT_ROUND_CURRENT) { \
set_float_rounding_mode(rmode, &env->fp_status); \
} \
\
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_signaling_nan(xb.fld, \
+ if (unlikely(tp##_is_signaling_nan(xb->fld, \
&env->fp_status))) { \
float_invalid_op_vxsnan(env, GETPC()); \
- xt.fld = tp##_snan_to_qnan(xb.fld); \
+ t.fld = tp##_snan_to_qnan(xb->fld); \
} else { \
- xt.fld = tp##_round_to_int(xb.fld, &env->fp_status); \
+ t.fld = tp##_round_to_int(xb->fld, &env->fp_status); \
} \
if (sfprf) { \
- helper_compute_fprf_float64(env, xt.fld); \
+ helper_compute_fprf_float64(env, t.fld); \
} \
} \
\
@@ -3189,7 +3086,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
env->fp_status.float_exception_flags &= ~float_flag_inexact; \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -3223,46 +3120,41 @@ uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
}
#define VSX_XXPERM(op, indexed) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *pcv) \
{ \
- ppc_vsr_t xt, xa, pcv, xto; \
+ ppc_vsr_t t = *xt; \
int i, idx; \
\
- getVSR(xA(opcode), &xa, env); \
- getVSR(xT(opcode), &xt, env); \
- getVSR(xB(opcode), &pcv, env); \
- \
for (i = 0; i < 16; i++) { \
- idx = pcv.VsrB(i) & 0x1F; \
+ idx = pcv->VsrB(i) & 0x1F; \
if (indexed) { \
idx = 31 - idx; \
} \
- xto.VsrB(i) = (idx <= 15) ? xa.VsrB(idx) : xt.VsrB(idx - 16); \
+ t.VsrB(i) = (idx <= 15) ? xa->VsrB(idx) \
+ : xt->VsrB(idx - 16); \
} \
- putVSR(xT(opcode), &xto, env); \
+ *xt = t; \
}
VSX_XXPERM(xxperm, 0)
VSX_XXPERM(xxpermr, 1)
-void helper_xvxsigsp(CPUPPCState *env, uint32_t opcode)
+void helper_xvxsigsp(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)
{
- ppc_vsr_t xt, xb;
+ ppc_vsr_t t = { };
uint32_t exp, i, fraction;
- getVSR(xB(opcode), &xb, env);
- memset(&xt, 0, sizeof(xt));
-
for (i = 0; i < 4; i++) {
- exp = (xb.VsrW(i) >> 23) & 0xFF;
- fraction = xb.VsrW(i) & 0x7FFFFF;
+ exp = (xb->VsrW(i) >> 23) & 0xFF;
+ fraction = xb->VsrW(i) & 0x7FFFFF;
if (exp != 0 && exp != 255) {
- xt.VsrW(i) = fraction | 0x00800000;
+ t.VsrW(i) = fraction | 0x00800000;
} else {
- xt.VsrW(i) = fraction;
+ t.VsrW(i) = fraction;
}
}
- putVSR(xT(opcode), &xt, env);
+ *xt = t;
}
/*
@@ -3279,27 +3171,28 @@ void helper_xvxsigsp(CPUPPCState *env, uint32_t opcode)
#define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \
void helper_##op(CPUPPCState *env, uint32_t opcode) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t *xt = &env->vsr[xT(opcode)]; \
+ ppc_vsr_t *xb = &env->vsr[xbn]; \
+ ppc_vsr_t t = { }; \
uint32_t i, sign, dcmx; \
uint32_t cc, match = 0; \
\
- getVSR(xbn, &xb, env); \
if (!scrf) { \
- memset(&xt, 0, sizeof(xt)); \
dcmx = DCMX_XV(opcode); \
} else { \
+ t = *xt; \
dcmx = DCMX(opcode); \
} \
\
for (i = 0; i < nels; i++) { \
- sign = tp##_is_neg(xb.fld); \
- if (tp##_is_any_nan(xb.fld)) { \
+ sign = tp##_is_neg(xb->fld); \
+ if (tp##_is_any_nan(xb->fld)) { \
match = extract32(dcmx, 6, 1); \
- } else if (tp##_is_infinity(xb.fld)) { \
+ } else if (tp##_is_infinity(xb->fld)) { \
match = extract32(dcmx, 4 + !sign, 1); \
- } else if (tp##_is_zero(xb.fld)) { \
+ } else if (tp##_is_zero(xb->fld)) { \
match = extract32(dcmx, 2 + !sign, 1); \
- } else if (tp##_is_zero_or_denormal(xb.fld)) { \
+ } else if (tp##_is_zero_or_denormal(xb->fld)) { \
match = extract32(dcmx, 0 + !sign, 1); \
} \
\
@@ -3309,12 +3202,12 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
env->fpscr |= cc << FPSCR_FPRF; \
env->crf[BF(opcode)] = cc; \
} else { \
- xt.tfld = match ? fld_max : 0; \
+ t.tfld = match ? fld_max : 0; \
} \
match = 0; \
} \
if (!scrf) { \
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
} \
}
@@ -3323,31 +3216,29 @@ VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0)
VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1)
VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1)
-void helper_xststdcsp(CPUPPCState *env, uint32_t opcode)
+void helper_xststdcsp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb)
{
- ppc_vsr_t xb;
uint32_t dcmx, sign, exp;
uint32_t cc, match = 0, not_sp = 0;
- getVSR(xB(opcode), &xb, env);
dcmx = DCMX(opcode);
- exp = (xb.VsrD(0) >> 52) & 0x7FF;
+ exp = (xb->VsrD(0) >> 52) & 0x7FF;
- sign = float64_is_neg(xb.VsrD(0));
- if (float64_is_any_nan(xb.VsrD(0))) {
+ sign = float64_is_neg(xb->VsrD(0));
+ if (float64_is_any_nan(xb->VsrD(0))) {
match = extract32(dcmx, 6, 1);
- } else if (float64_is_infinity(xb.VsrD(0))) {
+ } else if (float64_is_infinity(xb->VsrD(0))) {
match = extract32(dcmx, 4 + !sign, 1);
- } else if (float64_is_zero(xb.VsrD(0))) {
+ } else if (float64_is_zero(xb->VsrD(0))) {
match = extract32(dcmx, 2 + !sign, 1);
- } else if (float64_is_zero_or_denormal(xb.VsrD(0)) ||
+ } else if (float64_is_zero_or_denormal(xb->VsrD(0)) ||
(exp > 0 && exp < 0x381)) {
match = extract32(dcmx, 0 + !sign, 1);
}
- not_sp = !float64_eq(xb.VsrD(0),
+ not_sp = !float64_eq(xb->VsrD(0),
float32_to_float64(
- float64_to_float32(xb.VsrD(0), &env->fp_status),
+ float64_to_float32(xb->VsrD(0), &env->fp_status),
&env->fp_status), &env->fp_status);
cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
@@ -3356,18 +3247,16 @@ void helper_xststdcsp(CPUPPCState *env, uint32_t opcode)
env->crf[BF(opcode)] = cc;
}
-void helper_xsrqpi(CPUPPCState *env, uint32_t opcode)
+void helper_xsrqpi(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xb)
{
- ppc_vsr_t xb;
- ppc_vsr_t xt;
+ ppc_vsr_t t = { };
uint8_t r = Rrm(opcode);
uint8_t ex = Rc(opcode);
uint8_t rmc = RMC(opcode);
uint8_t rmode = 0;
float_status tstat;
- getVSR(rB(opcode) + 32, &xb, env);
- memset(&xt, 0, sizeof(xt));
helper_reset_fpstatus(env);
if (r == 0 && rmc == 0) {
@@ -3396,13 +3285,13 @@ void helper_xsrqpi(CPUPPCState *env, uint32_t opcode)
tstat = env->fp_status;
set_float_exception_flags(0, &tstat);
set_float_rounding_mode(rmode, &tstat);
- xt.f128 = float128_round_to_int(xb.f128, &tstat);
+ t.f128 = float128_round_to_int(xb->f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
- if (float128_is_signaling_nan(xb.f128, &tstat)) {
+ if (float128_is_signaling_nan(xb->f128, &tstat)) {
float_invalid_op_vxsnan(env, GETPC());
- xt.f128 = float128_snan_to_qnan(xt.f128);
+ t.f128 = float128_snan_to_qnan(t.f128);
}
}
@@ -3410,23 +3299,21 @@ void helper_xsrqpi(CPUPPCState *env, uint32_t opcode)
env->fp_status.float_exception_flags &= ~float_flag_inexact;
}
- helper_compute_fprf_float128(env, xt.f128);
+ helper_compute_fprf_float128(env, t.f128);
do_float_check_status(env, GETPC());
- putVSR(rD(opcode) + 32, &xt, env);
+ *xt = t;
}
-void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode)
+void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xb)
{
- ppc_vsr_t xb;
- ppc_vsr_t xt;
+ ppc_vsr_t t = { };
uint8_t r = Rrm(opcode);
uint8_t rmc = RMC(opcode);
uint8_t rmode = 0;
floatx80 round_res;
float_status tstat;
- getVSR(rB(opcode) + 32, &xb, env);
- memset(&xt, 0, sizeof(xt));
helper_reset_fpstatus(env);
if (r == 0 && rmc == 0) {
@@ -3455,30 +3342,28 @@ void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode)
tstat = env->fp_status;
set_float_exception_flags(0, &tstat);
set_float_rounding_mode(rmode, &tstat);
- round_res = float128_to_floatx80(xb.f128, &tstat);
- xt.f128 = floatx80_to_float128(round_res, &tstat);
+ round_res = float128_to_floatx80(xb->f128, &tstat);
+ t.f128 = floatx80_to_float128(round_res, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
- if (float128_is_signaling_nan(xb.f128, &tstat)) {
+ if (float128_is_signaling_nan(xb->f128, &tstat)) {
float_invalid_op_vxsnan(env, GETPC());
- xt.f128 = float128_snan_to_qnan(xt.f128);
+ t.f128 = float128_snan_to_qnan(t.f128);
}
}
- helper_compute_fprf_float128(env, xt.f128);
- putVSR(rD(opcode) + 32, &xt, env);
+ helper_compute_fprf_float128(env, t.f128);
+ *xt = t;
do_float_check_status(env, GETPC());
}
-void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode)
+void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xb)
{
- ppc_vsr_t xb;
- ppc_vsr_t xt;
+ ppc_vsr_t t = { };
float_status tstat;
- getVSR(rB(opcode) + 32, &xb, env);
- memset(&xt, 0, sizeof(xt));
helper_reset_fpstatus(env);
tstat = env->fp_status;
@@ -3487,34 +3372,32 @@ void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode)
}
set_float_exception_flags(0, &tstat);
- xt.f128 = float128_sqrt(xb.f128, &tstat);
+ t.f128 = float128_sqrt(xb->f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
- if (float128_is_signaling_nan(xb.f128, &tstat)) {
+ if (float128_is_signaling_nan(xb->f128, &tstat)) {
float_invalid_op_vxsnan(env, GETPC());
- xt.f128 = float128_snan_to_qnan(xb.f128);
- } else if (float128_is_quiet_nan(xb.f128, &tstat)) {
- xt.f128 = xb.f128;
- } else if (float128_is_neg(xb.f128) && !float128_is_zero(xb.f128)) {
+ t.f128 = float128_snan_to_qnan(xb->f128);
+ } else if (float128_is_quiet_nan(xb->f128, &tstat)) {
+ t.f128 = xb->f128;
+ } else if (float128_is_neg(xb->f128) && !float128_is_zero(xb->f128)) {
float_invalid_op_vxsqrt(env, 1, GETPC());
- xt.f128 = float128_default_nan(&env->fp_status);
+ t.f128 = float128_default_nan(&env->fp_status);
}
}
- helper_compute_fprf_float128(env, xt.f128);
- putVSR(rD(opcode) + 32, &xt, env);
+ helper_compute_fprf_float128(env, t.f128);
+ *xt = t;
do_float_check_status(env, GETPC());
}
-void helper_xssubqp(CPUPPCState *env, uint32_t opcode)
+void helper_xssubqp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
{
- ppc_vsr_t xt, xa, xb;
+ ppc_vsr_t t = *xt;
float_status tstat;
- getVSR(rA(opcode) + 32, &xa, env);
- getVSR(rB(opcode) + 32, &xb, env);
- getVSR(rD(opcode) + 32, &xt, env);
helper_reset_fpstatus(env);
tstat = env->fp_status;
@@ -3523,16 +3406,16 @@ void helper_xssubqp(CPUPPCState *env, uint32_t opcode)
}
set_float_exception_flags(0, &tstat);
- xt.f128 = float128_sub(xa.f128, xb.f128, &tstat);
+ t.f128 = float128_sub(xa->f128, xb->f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
float_invalid_op_addsub(env, 1, GETPC(),
- float128_classify(xa.f128) |
- float128_classify(xb.f128));
+ float128_classify(xa->f128) |
+ float128_classify(xb->f128));
}
- helper_compute_fprf_float128(env, xt.f128);
- putVSR(rD(opcode) + 32, &xt, env);
+ helper_compute_fprf_float128(env, t.f128);
+ *xt = t;
do_float_check_status(env, GETPC());
}
diff --git a/target/ppc/helper.h b/target/ppc/helper.h
index 02b67a333e..380c9b1e2a 100644
--- a/target/ppc/helper.h
+++ b/target/ppc/helper.h
@@ -108,6 +108,10 @@ DEF_HELPER_FLAGS_1(ftsqrt, TCG_CALL_NO_RWG_SE, i32, i64)
#define dh_ctype_avr ppc_avr_t *
#define dh_is_signed_avr dh_is_signed_ptr
+#define dh_alias_vsr ptr
+#define dh_ctype_vsr ppc_vsr_t *
+#define dh_is_signed_vsr dh_is_signed_ptr
+
DEF_HELPER_3(vavgub, void, avr, avr, avr)
DEF_HELPER_3(vavguh, void, avr, avr, avr)
DEF_HELPER_3(vavguw, void, avr, avr, avr)
@@ -275,10 +279,10 @@ DEF_HELPER_3(stvebx, void, env, avr, tl)
DEF_HELPER_3(stvehx, void, env, avr, tl)
DEF_HELPER_3(stvewx, void, env, avr, tl)
#if defined(TARGET_PPC64)
-DEF_HELPER_4(lxvl, void, env, tl, tl, tl)
-DEF_HELPER_4(lxvll, void, env, tl, tl, tl)
-DEF_HELPER_4(stxvl, void, env, tl, tl, tl)
-DEF_HELPER_4(stxvll, void, env, tl, tl, tl)
+DEF_HELPER_4(lxvl, void, env, tl, vsr, tl)
+DEF_HELPER_4(lxvll, void, env, tl, vsr, tl)
+DEF_HELPER_4(stxvl, void, env, tl, vsr, tl)
+DEF_HELPER_4(stxvll, void, env, tl, vsr, tl)
#endif
DEF_HELPER_4(vsumsws, void, env, avr, avr, avr)
DEF_HELPER_4(vsum2sws, void, env, avr, avr, avr)
@@ -361,178 +365,162 @@ DEF_HELPER_4(bcdsr, i32, avr, avr, avr, i32)
DEF_HELPER_4(bcdtrunc, i32, avr, avr, avr, i32)
DEF_HELPER_4(bcdutrunc, i32, avr, avr, avr, i32)
-DEF_HELPER_2(xsadddp, void, env, i32)
-DEF_HELPER_2(xsaddqp, void, env, i32)
-DEF_HELPER_2(xssubdp, void, env, i32)
-DEF_HELPER_2(xsmuldp, void, env, i32)
-DEF_HELPER_2(xsmulqp, void, env, i32)
-DEF_HELPER_2(xsdivdp, void, env, i32)
-DEF_HELPER_2(xsdivqp, void, env, i32)
-DEF_HELPER_2(xsredp, void, env, i32)
-DEF_HELPER_2(xssqrtdp, void, env, i32)
-DEF_HELPER_2(xsrsqrtedp, void, env, i32)
-DEF_HELPER_2(xstdivdp, void, env, i32)
-DEF_HELPER_2(xstsqrtdp, void, env, i32)
-DEF_HELPER_2(xsmaddadp, void, env, i32)
-DEF_HELPER_2(xsmaddmdp, void, env, i32)
-DEF_HELPER_2(xsmsubadp, void, env, i32)
-DEF_HELPER_2(xsmsubmdp, void, env, i32)
-DEF_HELPER_2(xsnmaddadp, void, env, i32)
-DEF_HELPER_2(xsnmaddmdp, void, env, i32)
-DEF_HELPER_2(xsnmsubadp, void, env, i32)
-DEF_HELPER_2(xsnmsubmdp, void, env, i32)
-DEF_HELPER_2(xscmpeqdp, void, env, i32)
-DEF_HELPER_2(xscmpgtdp, void, env, i32)
-DEF_HELPER_2(xscmpgedp, void, env, i32)
-DEF_HELPER_2(xscmpnedp, void, env, i32)
-DEF_HELPER_2(xscmpexpdp, void, env, i32)
-DEF_HELPER_2(xscmpexpqp, void, env, i32)
-DEF_HELPER_2(xscmpodp, void, env, i32)
-DEF_HELPER_2(xscmpudp, void, env, i32)
-DEF_HELPER_2(xscmpoqp, void, env, i32)
-DEF_HELPER_2(xscmpuqp, void, env, i32)
-DEF_HELPER_2(xsmaxdp, void, env, i32)
-DEF_HELPER_2(xsmindp, void, env, i32)
-DEF_HELPER_2(xsmaxcdp, void, env, i32)
-DEF_HELPER_2(xsmincdp, void, env, i32)
-DEF_HELPER_2(xsmaxjdp, void, env, i32)
-DEF_HELPER_2(xsminjdp, void, env, i32)
-DEF_HELPER_2(xscvdphp, void, env, i32)
-DEF_HELPER_2(xscvdpqp, void, env, i32)
-DEF_HELPER_2(xscvdpsp, void, env, i32)
+DEF_HELPER_4(xsadddp, void, env, vsr, vsr, vsr)
+DEF_HELPER_5(xsaddqp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_4(xssubdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xsmuldp, void, env, vsr, vsr, vsr)
+DEF_HELPER_5(xsmulqp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_4(xsdivdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_5(xsdivqp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_3(xsredp, void, env, vsr, vsr)
+DEF_HELPER_3(xssqrtdp, void, env, vsr, vsr)
+DEF_HELPER_3(xsrsqrtedp, void, env, vsr, vsr)
+DEF_HELPER_4(xstdivdp, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xstsqrtdp, void, env, i32, vsr)
+DEF_HELPER_5(xsmadddp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xsmsubdp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xsnmadddp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xsnmsubdp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_4(xscmpeqdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xscmpgtdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xscmpgedp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xscmpnedp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xscmpexpdp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscmpexpqp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscmpodp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscmpudp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscmpoqp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscmpuqp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xsmaxdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xsmindp, void, env, vsr, vsr, vsr)
+DEF_HELPER_5(xsmaxcdp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_5(xsmincdp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_5(xsmaxjdp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_5(xsminjdp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_3(xscvdphp, void, env, vsr, vsr)
+DEF_HELPER_4(xscvdpqp, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xscvdpsp, void, env, vsr, vsr)
DEF_HELPER_2(xscvdpspn, i64, env, i64)
-DEF_HELPER_2(xscvqpdp, void, env, i32)
-DEF_HELPER_2(xscvqpsdz, void, env, i32)
-DEF_HELPER_2(xscvqpswz, void, env, i32)
-DEF_HELPER_2(xscvqpudz, void, env, i32)
-DEF_HELPER_2(xscvqpuwz, void, env, i32)
-DEF_HELPER_2(xscvhpdp, void, env, i32)
-DEF_HELPER_2(xscvsdqp, void, env, i32)
-DEF_HELPER_2(xscvspdp, void, env, i32)
+DEF_HELPER_4(xscvqpdp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscvqpsdz, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscvqpswz, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscvqpudz, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscvqpuwz, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xscvhpdp, void, env, vsr, vsr)
+DEF_HELPER_4(xscvsdqp, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xscvspdp, void, env, vsr, vsr)
DEF_HELPER_2(xscvspdpn, i64, env, i64)
-DEF_HELPER_2(xscvdpsxds, void, env, i32)
-DEF_HELPER_2(xscvdpsxws, void, env, i32)
-DEF_HELPER_2(xscvdpuxds, void, env, i32)
-DEF_HELPER_2(xscvdpuxws, void, env, i32)
-DEF_HELPER_2(xscvsxddp, void, env, i32)
-DEF_HELPER_2(xscvuxdsp, void, env, i32)
-DEF_HELPER_2(xscvsxdsp, void, env, i32)
-DEF_HELPER_2(xscvudqp, void, env, i32)
-DEF_HELPER_2(xscvuxddp, void, env, i32)
-DEF_HELPER_2(xststdcsp, void, env, i32)
+DEF_HELPER_3(xscvdpsxds, void, env, vsr, vsr)
+DEF_HELPER_3(xscvdpsxws, void, env, vsr, vsr)
+DEF_HELPER_3(xscvdpuxds, void, env, vsr, vsr)
+DEF_HELPER_3(xscvdpuxws, void, env, vsr, vsr)
+DEF_HELPER_3(xscvsxddp, void, env, vsr, vsr)
+DEF_HELPER_3(xscvuxdsp, void, env, vsr, vsr)
+DEF_HELPER_3(xscvsxdsp, void, env, vsr, vsr)
+DEF_HELPER_4(xscvudqp, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xscvuxddp, void, env, vsr, vsr)
+DEF_HELPER_3(xststdcsp, void, env, i32, vsr)
DEF_HELPER_2(xststdcdp, void, env, i32)
DEF_HELPER_2(xststdcqp, void, env, i32)
-DEF_HELPER_2(xsrdpi, void, env, i32)
-DEF_HELPER_2(xsrdpic, void, env, i32)
-DEF_HELPER_2(xsrdpim, void, env, i32)
-DEF_HELPER_2(xsrdpip, void, env, i32)
-DEF_HELPER_2(xsrdpiz, void, env, i32)
-DEF_HELPER_2(xsrqpi, void, env, i32)
-DEF_HELPER_2(xsrqpxp, void, env, i32)
-DEF_HELPER_2(xssqrtqp, void, env, i32)
-DEF_HELPER_2(xssubqp, void, env, i32)
+DEF_HELPER_3(xsrdpi, void, env, vsr, vsr)
+DEF_HELPER_3(xsrdpic, void, env, vsr, vsr)
+DEF_HELPER_3(xsrdpim, void, env, vsr, vsr)
+DEF_HELPER_3(xsrdpip, void, env, vsr, vsr)
+DEF_HELPER_3(xsrdpiz, void, env, vsr, vsr)
+DEF_HELPER_4(xsrqpi, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xsrqpxp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xssqrtqp, void, env, i32, vsr, vsr)
+DEF_HELPER_5(xssubqp, void, env, i32, vsr, vsr, vsr)
-DEF_HELPER_2(xsaddsp, void, env, i32)
-DEF_HELPER_2(xssubsp, void, env, i32)
-DEF_HELPER_2(xsmulsp, void, env, i32)
-DEF_HELPER_2(xsdivsp, void, env, i32)
-DEF_HELPER_2(xsresp, void, env, i32)
+DEF_HELPER_4(xsaddsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xssubsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xsmulsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xsdivsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_3(xsresp, void, env, vsr, vsr)
DEF_HELPER_2(xsrsp, i64, env, i64)
-DEF_HELPER_2(xssqrtsp, void, env, i32)
-DEF_HELPER_2(xsrsqrtesp, void, env, i32)
-DEF_HELPER_2(xsmaddasp, void, env, i32)
-DEF_HELPER_2(xsmaddmsp, void, env, i32)
-DEF_HELPER_2(xsmsubasp, void, env, i32)
-DEF_HELPER_2(xsmsubmsp, void, env, i32)
-DEF_HELPER_2(xsnmaddasp, void, env, i32)
-DEF_HELPER_2(xsnmaddmsp, void, env, i32)
-DEF_HELPER_2(xsnmsubasp, void, env, i32)
-DEF_HELPER_2(xsnmsubmsp, void, env, i32)
+DEF_HELPER_3(xssqrtsp, void, env, vsr, vsr)
+DEF_HELPER_3(xsrsqrtesp, void, env, vsr, vsr)
+DEF_HELPER_5(xsmaddsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xsmsubsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xsnmaddsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xsnmsubsp, void, env, vsr, vsr, vsr, vsr)
-DEF_HELPER_2(xvadddp, void, env, i32)
-DEF_HELPER_2(xvsubdp, void, env, i32)
-DEF_HELPER_2(xvmuldp, void, env, i32)
-DEF_HELPER_2(xvdivdp, void, env, i32)
-DEF_HELPER_2(xvredp, void, env, i32)
-DEF_HELPER_2(xvsqrtdp, void, env, i32)
-DEF_HELPER_2(xvrsqrtedp, void, env, i32)
-DEF_HELPER_2(xvtdivdp, void, env, i32)
-DEF_HELPER_2(xvtsqrtdp, void, env, i32)
-DEF_HELPER_2(xvmaddadp, void, env, i32)
-DEF_HELPER_2(xvmaddmdp, void, env, i32)
-DEF_HELPER_2(xvmsubadp, void, env, i32)
-DEF_HELPER_2(xvmsubmdp, void, env, i32)
-DEF_HELPER_2(xvnmaddadp, void, env, i32)
-DEF_HELPER_2(xvnmaddmdp, void, env, i32)
-DEF_HELPER_2(xvnmsubadp, void, env, i32)
-DEF_HELPER_2(xvnmsubmdp, void, env, i32)
-DEF_HELPER_2(xvmaxdp, void, env, i32)
-DEF_HELPER_2(xvmindp, void, env, i32)
-DEF_HELPER_2(xvcmpeqdp, void, env, i32)
-DEF_HELPER_2(xvcmpgedp, void, env, i32)
-DEF_HELPER_2(xvcmpgtdp, void, env, i32)
-DEF_HELPER_2(xvcmpnedp, void, env, i32)
-DEF_HELPER_2(xvcvdpsp, void, env, i32)
-DEF_HELPER_2(xvcvdpsxds, void, env, i32)
-DEF_HELPER_2(xvcvdpsxws, void, env, i32)
-DEF_HELPER_2(xvcvdpuxds, void, env, i32)
-DEF_HELPER_2(xvcvdpuxws, void, env, i32)
-DEF_HELPER_2(xvcvsxddp, void, env, i32)
-DEF_HELPER_2(xvcvuxddp, void, env, i32)
-DEF_HELPER_2(xvcvsxwdp, void, env, i32)
-DEF_HELPER_2(xvcvuxwdp, void, env, i32)
-DEF_HELPER_2(xvrdpi, void, env, i32)
-DEF_HELPER_2(xvrdpic, void, env, i32)
-DEF_HELPER_2(xvrdpim, void, env, i32)
-DEF_HELPER_2(xvrdpip, void, env, i32)
-DEF_HELPER_2(xvrdpiz, void, env, i32)
+DEF_HELPER_4(xvadddp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvsubdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvmuldp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvdivdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_3(xvredp, void, env, vsr, vsr)
+DEF_HELPER_3(xvsqrtdp, void, env, vsr, vsr)
+DEF_HELPER_3(xvrsqrtedp, void, env, vsr, vsr)
+DEF_HELPER_4(xvtdivdp, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xvtsqrtdp, void, env, i32, vsr)
+DEF_HELPER_5(xvmadddp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xvmsubdp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xvnmadddp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xvnmsubdp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_4(xvmaxdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvmindp, void, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpeqdp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpgedp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpgtdp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpnedp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_3(xvcvdpsp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvdpsxds, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvdpsxws, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvdpuxds, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvdpuxws, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvsxddp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvuxddp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvsxwdp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvuxwdp, void, env, vsr, vsr)
+DEF_HELPER_3(xvrdpi, void, env, vsr, vsr)
+DEF_HELPER_3(xvrdpic, void, env, vsr, vsr)
+DEF_HELPER_3(xvrdpim, void, env, vsr, vsr)
+DEF_HELPER_3(xvrdpip, void, env, vsr, vsr)
+DEF_HELPER_3(xvrdpiz, void, env, vsr, vsr)
-DEF_HELPER_2(xvaddsp, void, env, i32)
-DEF_HELPER_2(xvsubsp, void, env, i32)
-DEF_HELPER_2(xvmulsp, void, env, i32)
-DEF_HELPER_2(xvdivsp, void, env, i32)
-DEF_HELPER_2(xvresp, void, env, i32)
-DEF_HELPER_2(xvsqrtsp, void, env, i32)
-DEF_HELPER_2(xvrsqrtesp, void, env, i32)
-DEF_HELPER_2(xvtdivsp, void, env, i32)
-DEF_HELPER_2(xvtsqrtsp, void, env, i32)
-DEF_HELPER_2(xvmaddasp, void, env, i32)
-DEF_HELPER_2(xvmaddmsp, void, env, i32)
-DEF_HELPER_2(xvmsubasp, void, env, i32)
-DEF_HELPER_2(xvmsubmsp, void, env, i32)
-DEF_HELPER_2(xvnmaddasp, void, env, i32)
-DEF_HELPER_2(xvnmaddmsp, void, env, i32)
-DEF_HELPER_2(xvnmsubasp, void, env, i32)
-DEF_HELPER_2(xvnmsubmsp, void, env, i32)
-DEF_HELPER_2(xvmaxsp, void, env, i32)
-DEF_HELPER_2(xvminsp, void, env, i32)
-DEF_HELPER_2(xvcmpeqsp, void, env, i32)
-DEF_HELPER_2(xvcmpgesp, void, env, i32)
-DEF_HELPER_2(xvcmpgtsp, void, env, i32)
-DEF_HELPER_2(xvcmpnesp, void, env, i32)
-DEF_HELPER_2(xvcvspdp, void, env, i32)
-DEF_HELPER_2(xvcvsphp, void, env, i32)
-DEF_HELPER_2(xvcvhpsp, void, env, i32)
-DEF_HELPER_2(xvcvspsxds, void, env, i32)
-DEF_HELPER_2(xvcvspsxws, void, env, i32)
-DEF_HELPER_2(xvcvspuxds, void, env, i32)
-DEF_HELPER_2(xvcvspuxws, void, env, i32)
-DEF_HELPER_2(xvcvsxdsp, void, env, i32)
-DEF_HELPER_2(xvcvuxdsp, void, env, i32)
-DEF_HELPER_2(xvcvsxwsp, void, env, i32)
-DEF_HELPER_2(xvcvuxwsp, void, env, i32)
+DEF_HELPER_4(xvaddsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvsubsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvmulsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvdivsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_3(xvresp, void, env, vsr, vsr)
+DEF_HELPER_3(xvsqrtsp, void, env, vsr, vsr)
+DEF_HELPER_3(xvrsqrtesp, void, env, vsr, vsr)
+DEF_HELPER_4(xvtdivsp, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xvtsqrtsp, void, env, i32, vsr)
+DEF_HELPER_5(xvmaddsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xvmsubsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xvnmaddsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xvnmsubsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_4(xvmaxsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvminsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpeqsp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpgesp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpgtsp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpnesp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_3(xvcvspdp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvsphp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvhpsp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvspsxds, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvspsxws, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvspuxds, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvspuxws, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvsxdsp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvuxdsp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvsxwsp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvuxwsp, void, env, vsr, vsr)
DEF_HELPER_2(xvtstdcsp, void, env, i32)
DEF_HELPER_2(xvtstdcdp, void, env, i32)
-DEF_HELPER_2(xvrspi, void, env, i32)
-DEF_HELPER_2(xvrspic, void, env, i32)
-DEF_HELPER_2(xvrspim, void, env, i32)
-DEF_HELPER_2(xvrspip, void, env, i32)
-DEF_HELPER_2(xvrspiz, void, env, i32)
-DEF_HELPER_2(xxperm, void, env, i32)
-DEF_HELPER_2(xxpermr, void, env, i32)
-DEF_HELPER_4(xxextractuw, void, env, tl, tl, i32)
-DEF_HELPER_4(xxinsertw, void, env, tl, tl, i32)
-DEF_HELPER_2(xvxsigsp, void, env, i32)
+DEF_HELPER_3(xvrspi, void, env, vsr, vsr)
+DEF_HELPER_3(xvrspic, void, env, vsr, vsr)
+DEF_HELPER_3(xvrspim, void, env, vsr, vsr)
+DEF_HELPER_3(xvrspip, void, env, vsr, vsr)
+DEF_HELPER_3(xvrspiz, void, env, vsr, vsr)
+DEF_HELPER_4(xxperm, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xxpermr, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xxextractuw, void, env, vsr, vsr, i32)
+DEF_HELPER_4(xxinsertw, void, env, vsr, vsr, i32)
+DEF_HELPER_3(xvxsigsp, void, env, vsr, vsr)
DEF_HELPER_2(efscfsi, i32, env, i32)
DEF_HELPER_2(efscfui, i32, env, i32)
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
index 8ce89f2ad9..5c07ef3e4d 100644
--- a/target/ppc/int_helper.c
+++ b/target/ppc/int_helper.c
@@ -1899,41 +1899,35 @@ VEXTRACT(uw, u32)
VEXTRACT(d, u64)
#undef VEXTRACT
-void helper_xxextractuw(CPUPPCState *env, target_ulong xtn,
- target_ulong xbn, uint32_t index)
+void helper_xxextractuw(CPUPPCState *env, ppc_vsr_t *xt,
+ ppc_vsr_t *xb, uint32_t index)
{
- ppc_vsr_t xt, xb;
+ ppc_vsr_t t = { };
size_t es = sizeof(uint32_t);
uint32_t ext_index;
int i;
- getVSR(xbn, &xb, env);
- memset(&xt, 0, sizeof(xt));
-
ext_index = index;
for (i = 0; i < es; i++, ext_index++) {
- xt.VsrB(8 - es + i) = xb.VsrB(ext_index % 16);
+ t.VsrB(8 - es + i) = xb->VsrB(ext_index % 16);
}
- putVSR(xtn, &xt, env);
+ *xt = t;
}
-void helper_xxinsertw(CPUPPCState *env, target_ulong xtn,
- target_ulong xbn, uint32_t index)
+void helper_xxinsertw(CPUPPCState *env, ppc_vsr_t *xt,
+ ppc_vsr_t *xb, uint32_t index)
{
- ppc_vsr_t xt, xb;
+ ppc_vsr_t t = *xt;
size_t es = sizeof(uint32_t);
int ins_index, i = 0;
- getVSR(xbn, &xb, env);
- getVSR(xtn, &xt, env);
-
ins_index = index;
for (i = 0; i < es && ins_index < 16; i++, ins_index++) {
- xt.VsrB(ins_index) = xb.VsrB(8 - es + i);
+ t.VsrB(ins_index) = xb->VsrB(8 - es + i);
}
- putVSR(xtn, &xt, env);
+ *xt = t;
}
#define VEXT_SIGNED(name, element, cast) \
diff --git a/target/ppc/internal.h b/target/ppc/internal.h
index fb6f64ed1e..d3d327e548 100644
--- a/target/ppc/internal.h
+++ b/target/ppc/internal.h
@@ -204,18 +204,6 @@ EXTRACT_HELPER(IMM8, 11, 8);
EXTRACT_HELPER(DCMX, 16, 7);
EXTRACT_HELPER_SPLIT_3(DCMX_XV, 5, 16, 0, 1, 2, 5, 1, 6, 6);
-static inline void getVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
-{
- vsr->VsrD(0) = env->vsr[n].VsrD(0);
- vsr->VsrD(1) = env->vsr[n].VsrD(1);
-}
-
-static inline void putVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
-{
- env->vsr[n].VsrD(0) = vsr->VsrD(0);
- env->vsr[n].VsrD(1) = vsr->VsrD(1);
-}
-
void helper_compute_fprf_float16(CPUPPCState *env, float16 arg);
void helper_compute_fprf_float32(CPUPPCState *env, float32 arg);
void helper_compute_fprf_float128(CPUPPCState *env, float128 arg);
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index 4b4989c0af..8a06d3171e 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -2650,7 +2650,7 @@ int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
return -ENOENT;
}
- strncpy(args.name, function, sizeof(args.name));
+ strncpy(args.name, function, sizeof(args.name) - 1);
return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
}
@@ -2944,3 +2944,12 @@ void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online)
kvm_set_one_reg(cs, KVM_REG_PPC_ONLINE, &online);
}
}
+
+void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset)
+{
+ CPUState *cs = CPU(cpu);
+
+ if (kvm_enabled()) {
+ kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &tb_offset);
+ }
+}
diff --git a/target/ppc/kvm_ppc.h b/target/ppc/kvm_ppc.h
index 45776cad79..98bd7d5da6 100644
--- a/target/ppc/kvm_ppc.h
+++ b/target/ppc/kvm_ppc.h
@@ -80,6 +80,7 @@ bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu);
bool kvmppc_hpt_needs_host_contiguous_pages(void);
void kvm_check_mmu(PowerPCCPU *cpu, Error **errp);
void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online);
+void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset);
#else
@@ -206,6 +207,10 @@ static inline void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu,
return;
}
+static inline void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset)
+{
+}
+
#ifndef CONFIG_USER_ONLY
static inline bool kvmppc_spapr_use_multitce(void)
{
@@ -394,6 +399,11 @@ static inline int kvmppc_resize_hpt_commit(PowerPCCPU *cpu,
return -ENOSYS;
}
+static inline bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
+{
+ return false;
+}
+
#endif
#ifndef CONFIG_KVM
diff --git a/target/ppc/machine.c b/target/ppc/machine.c
index 5ad7b40f45..e82f5de9db 100644
--- a/target/ppc/machine.c
+++ b/target/ppc/machine.c
@@ -378,11 +378,9 @@ static int cpu_post_load(void *opaque, int version_id)
* receive the PVR it expects as a workaround.
*
*/
-#if defined(CONFIG_KVM)
if (kvmppc_pvr_workaround_required(cpu)) {
env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value;
}
-#endif
env->lr = env->spr[SPR_LR];
env->ctr = env->spr[SPR_CTR];
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
index 5b0f9ee50d..6f4ffa3661 100644
--- a/target/ppc/mem_helper.c
+++ b/target/ppc/mem_helper.c
@@ -415,28 +415,28 @@ STVE(stvewx, cpu_stl_data_ra, bswap32, u32)
#define VSX_LXVL(name, lj) \
void helper_##name(CPUPPCState *env, target_ulong addr, \
- target_ulong xt_num, target_ulong rb) \
+ ppc_vsr_t *xt, target_ulong rb) \
{ \
- int i; \
- ppc_vsr_t xt; \
+ ppc_vsr_t t; \
uint64_t nb = GET_NB(rb); \
+ int i; \
\
- xt.s128 = int128_zero(); \
+ t.s128 = int128_zero(); \
if (nb) { \
nb = (nb >= 16) ? 16 : nb; \
if (msr_le && !lj) { \
for (i = 16; i > 16 - nb; i--) { \
- xt.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \
+ t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \
addr = addr_add(env, addr, 1); \
} \
} else { \
for (i = 0; i < nb; i++) { \
- xt.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \
+ t.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \
addr = addr_add(env, addr, 1); \
} \
} \
} \
- putVSR(xt_num, &xt, env); \
+ *xt = t; \
}
VSX_LXVL(lxvl, 0)
@@ -445,25 +445,24 @@ VSX_LXVL(lxvll, 1)
#define VSX_STXVL(name, lj) \
void helper_##name(CPUPPCState *env, target_ulong addr, \
- target_ulong xt_num, target_ulong rb) \
+ ppc_vsr_t *xt, target_ulong rb) \
{ \
- int i; \
- ppc_vsr_t xt; \
target_ulong nb = GET_NB(rb); \
+ int i; \
\
if (!nb) { \
return; \
} \
- getVSR(xt_num, &xt, env); \
+ \
nb = (nb >= 16) ? 16 : nb; \
if (msr_le && !lj) { \
for (i = 16; i > 16 - nb; i--) { \
- cpu_stb_data_ra(env, addr, xt.VsrB(i - 1), GETPC()); \
+ cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \
addr = addr_add(env, addr, 1); \
} \
} else { \
for (i = 0; i < nb; i++) { \
- cpu_stb_data_ra(env, addr, xt.VsrB(i), GETPC()); \
+ cpu_stb_data_ra(env, addr, xt->VsrB(i), GETPC()); \
addr = addr_add(env, addr, 1); \
} \
} \
diff --git a/target/ppc/translate/vsx-impl.inc.c b/target/ppc/translate/vsx-impl.inc.c
index e9b7562f84..3922686ad6 100644
--- a/target/ppc/translate/vsx-impl.inc.c
+++ b/target/ppc/translate/vsx-impl.inc.c
@@ -20,6 +20,13 @@ static inline void set_cpu_vsrl(int n, TCGv_i64 src)
tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, false));
}
+static inline TCGv_ptr gen_vsr_ptr(int reg)
+{
+ TCGv_ptr r = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(r, cpu_env, vsr_full_offset(reg));
+ return r;
+}
+
#define VSX_LOAD_SCALAR(name, operation) \
static void gen_##name(DisasContext *ctx) \
{ \
@@ -337,29 +344,30 @@ VSX_VECTOR_STORE(stxv, st_i64, 0)
VSX_VECTOR_STORE(stxvx, st_i64, 1)
#ifdef TARGET_PPC64
-#define VSX_VECTOR_LOAD_STORE_LENGTH(name) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv EA, xt; \
- \
- if (xT(ctx->opcode) < 32) { \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- } else { \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- } \
- EA = tcg_temp_new(); \
- xt = tcg_const_tl(xT(ctx->opcode)); \
- gen_set_access_type(ctx, ACCESS_INT); \
- gen_addr_register(ctx, EA); \
- gen_helper_##name(cpu_env, EA, xt, cpu_gpr[rB(ctx->opcode)]); \
- tcg_temp_free(EA); \
- tcg_temp_free(xt); \
+#define VSX_VECTOR_LOAD_STORE_LENGTH(name) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ TCGv_ptr xt; \
+ \
+ if (xT(ctx->opcode) < 32) { \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ } else { \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ } \
+ EA = tcg_temp_new(); \
+ xt = gen_vsr_ptr(xT(ctx->opcode)); \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ gen_addr_register(ctx, EA); \
+ gen_helper_##name(cpu_env, EA, xt, cpu_gpr[rB(ctx->opcode)]); \
+ tcg_temp_free(EA); \
+ tcg_temp_free_ptr(xt); \
}
VSX_VECTOR_LOAD_STORE_LENGTH(lxvl)
@@ -958,6 +966,57 @@ VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP)
VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP)
VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP)
+#define VSX_CMP(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 ignored; \
+ TCGv_ptr xt, xa, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xt = gen_vsr_ptr(xT(ctx->opcode)); \
+ xa = gen_vsr_ptr(xA(ctx->opcode)); \
+ xb = gen_vsr_ptr(xB(ctx->opcode)); \
+ if ((ctx->opcode >> (31 - 21)) & 1) { \
+ gen_helper_##name(cpu_crf[6], cpu_env, xt, xa, xb); \
+ } else { \
+ ignored = tcg_temp_new_i32(); \
+ gen_helper_##name(ignored, cpu_env, xt, xa, xb); \
+ tcg_temp_free_i32(ignored); \
+ } \
+ gen_helper_float_check_status(cpu_env); \
+ tcg_temp_free_ptr(xt); \
+ tcg_temp_free_ptr(xa); \
+ tcg_temp_free_ptr(xb); \
+}
+
+VSX_CMP(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX)
+VSX_CMP(xvcmpgedp, 0x0C, 0x0E, 0, PPC2_VSX)
+VSX_CMP(xvcmpgtdp, 0x0C, 0x0D, 0, PPC2_VSX)
+VSX_CMP(xvcmpnedp, 0x0C, 0x0F, 0, PPC2_ISA300)
+VSX_CMP(xvcmpeqsp, 0x0C, 0x08, 0, PPC2_VSX)
+VSX_CMP(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX)
+VSX_CMP(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX)
+VSX_CMP(xvcmpnesp, 0x0C, 0x0B, 0, PPC2_VSX)
+
+static void gen_xscvqpdp(DisasContext *ctx)
+{
+ TCGv_i32 opc;
+ TCGv_ptr xt, xb;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ opc = tcg_const_i32(ctx->opcode);
+ xt = gen_vsr_ptr(xT(ctx->opcode));
+ xb = gen_vsr_ptr(xB(ctx->opcode));
+ gen_helper_xscvqpdp(cpu_env, opc, xt, xb);
+ tcg_temp_free_i32(opc);
+ tcg_temp_free_ptr(xt);
+ tcg_temp_free_ptr(xb);
+}
+
#define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \
static void gen_##name(DisasContext *ctx) \
{ \
@@ -971,6 +1030,128 @@ static void gen_##name(DisasContext *ctx) \
tcg_temp_free_i32(opc); \
}
+#define GEN_VSX_HELPER_X3(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr xt, xa, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xt = gen_vsr_ptr(xT(ctx->opcode)); \
+ xa = gen_vsr_ptr(xA(ctx->opcode)); \
+ xb = gen_vsr_ptr(xB(ctx->opcode)); \
+ gen_helper_##name(cpu_env, xt, xa, xb); \
+ tcg_temp_free_ptr(xt); \
+ tcg_temp_free_ptr(xa); \
+ tcg_temp_free_ptr(xb); \
+}
+
+#define GEN_VSX_HELPER_X2(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr xt, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xt = gen_vsr_ptr(xT(ctx->opcode)); \
+ xb = gen_vsr_ptr(xB(ctx->opcode)); \
+ gen_helper_##name(cpu_env, xt, xb); \
+ tcg_temp_free_ptr(xt); \
+ tcg_temp_free_ptr(xb); \
+}
+
+#define GEN_VSX_HELPER_X2_AB(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 opc; \
+ TCGv_ptr xa, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ opc = tcg_const_i32(ctx->opcode); \
+ xa = gen_vsr_ptr(xA(ctx->opcode)); \
+ xb = gen_vsr_ptr(xB(ctx->opcode)); \
+ gen_helper_##name(cpu_env, opc, xa, xb); \
+ tcg_temp_free_i32(opc); \
+ tcg_temp_free_ptr(xa); \
+ tcg_temp_free_ptr(xb); \
+}
+
+#define GEN_VSX_HELPER_X1(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 opc; \
+ TCGv_ptr xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ opc = tcg_const_i32(ctx->opcode); \
+ xb = gen_vsr_ptr(xB(ctx->opcode)); \
+ gen_helper_##name(cpu_env, opc, xb); \
+ tcg_temp_free_i32(opc); \
+ tcg_temp_free_ptr(xb); \
+}
+
+#define GEN_VSX_HELPER_R3(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 opc; \
+ TCGv_ptr xt, xa, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ opc = tcg_const_i32(ctx->opcode); \
+ xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \
+ xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \
+ xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
+ gen_helper_##name(cpu_env, opc, xt, xa, xb); \
+ tcg_temp_free_i32(opc); \
+ tcg_temp_free_ptr(xt); \
+ tcg_temp_free_ptr(xa); \
+ tcg_temp_free_ptr(xb); \
+}
+
+#define GEN_VSX_HELPER_R2(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 opc; \
+ TCGv_ptr xt, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ opc = tcg_const_i32(ctx->opcode); \
+ xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \
+ xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
+ gen_helper_##name(cpu_env, opc, xt, xb); \
+ tcg_temp_free_i32(opc); \
+ tcg_temp_free_ptr(xt); \
+ tcg_temp_free_ptr(xb); \
+}
+
+#define GEN_VSX_HELPER_R2_AB(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 opc; \
+ TCGv_ptr xa, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ opc = tcg_const_i32(ctx->opcode); \
+ xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \
+ xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
+ gen_helper_##name(cpu_env, opc, xa, xb); \
+ tcg_temp_free_i32(opc); \
+ tcg_temp_free_ptr(xa); \
+ tcg_temp_free_ptr(xb); \
+}
+
#define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \
static void gen_##name(DisasContext *ctx) \
{ \
@@ -989,176 +1170,180 @@ static void gen_##name(DisasContext *ctx) \
tcg_temp_free_i64(t1); \
}
-GEN_VSX_HELPER_2(xsadddp, 0x00, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsaddqp, 0x04, 0x00, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xssubdp, 0x00, 0x05, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmuldp, 0x00, 0x06, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmulqp, 0x04, 0x01, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xsdivdp, 0x00, 0x07, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsdivqp, 0x04, 0x11, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xsredp, 0x14, 0x05, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xstdivdp, 0x14, 0x07, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmaddadp, 0x04, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmaddmdp, 0x04, 0x05, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmsubadp, 0x04, 0x06, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmsubmdp, 0x04, 0x07, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsnmaddadp, 0x04, 0x14, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsnmaddmdp, 0x04, 0x15, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsnmsubadp, 0x04, 0x16, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsnmsubmdp, 0x04, 0x17, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscmpeqdp, 0x0C, 0x00, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscmpgtdp, 0x0C, 0x01, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscmpgedp, 0x0C, 0x02, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscmpnedp, 0x0C, 0x03, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscmpexpdp, 0x0C, 0x07, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscmpexpqp, 0x04, 0x05, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmindp, 0x00, 0x15, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmaxcdp, 0x00, 0x10, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xsmincdp, 0x00, 0x11, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xsmaxjdp, 0x00, 0x12, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xsminjdp, 0x00, 0x12, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xsadddp, 0x00, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_R3(xsaddqp, 0x04, 0x00, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xssubdp, 0x00, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xsmuldp, 0x00, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_R3(xsmulqp, 0x04, 0x01, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xsdivdp, 0x00, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_R3(xsdivqp, 0x04, 0x11, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xsredp, 0x14, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2_AB(xstdivdp, 0x14, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_X1(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xscmpeqdp, 0x0C, 0x00, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xscmpgtdp, 0x0C, 0x01, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xscmpgedp, 0x0C, 0x02, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xscmpnedp, 0x0C, 0x03, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X2_AB(xscmpexpdp, 0x0C, 0x07, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R2_AB(xscmpexpqp, 0x04, 0x05, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X2_AB(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2_AB(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_R2_AB(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_R2_AB(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xsmindp, 0x00, 0x15, 0, PPC2_VSX)
+GEN_VSX_HELPER_R3(xsmaxcdp, 0x00, 0x10, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R3(xsmincdp, 0x00, 0x11, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R3(xsmaxjdp, 0x00, 0x12, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R3(xsminjdp, 0x00, 0x12, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
+GEN_VSX_HELPER_R2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300)
GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xscvqpdp, 0x04, 0x1A, 0x14, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvqpsdz, 0x04, 0x1A, 0x19, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvqpswz, 0x04, 0x1A, 0x09, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvqpudz, 0x04, 0x1A, 0x11, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvqpuwz, 0x04, 0x1A, 0x01, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvsdqp, 0x04, 0x1A, 0x0A, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_R2(xscvqpsdz, 0x04, 0x1A, 0x19, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xscvqpswz, 0x04, 0x1A, 0x09, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xscvqpudz, 0x04, 0x1A, 0x11, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xscvqpuwz, 0x04, 0x1A, 0x01, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xscvsdqp, 0x04, 0x1A, 0x0A, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX)
GEN_VSX_HELPER_XT_XB_ENV(xscvspdpn, 0x16, 0x14, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscvudqp, 0x04, 0x1A, 0x02, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX)
+GEN_VSX_HELPER_R2(xscvudqp, 0x04, 0x1A, 0x02, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX)
GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207)
-
-GEN_VSX_HELPER_2(xsrqpi, 0x05, 0x00, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xsrqpxp, 0x05, 0x01, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xssqrtqp, 0x04, 0x19, 0x1B, PPC2_ISA300)
-GEN_VSX_HELPER_2(xssubqp, 0x04, 0x10, 0, PPC2_ISA300)
-
-GEN_VSX_HELPER_2(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xssubsp, 0x00, 0x01, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsdivsp, 0x00, 0x03, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsresp, 0x14, 0x01, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsmaddasp, 0x04, 0x00, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsmaddmsp, 0x04, 0x01, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsmsubasp, 0x04, 0x02, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsmsubmsp, 0x04, 0x03, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsnmaddasp, 0x04, 0x10, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsnmaddmsp, 0x04, 0x11, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsnmsubasp, 0x04, 0x12, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsnmsubmsp, 0x04, 0x13, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xststdcsp, 0x14, 0x12, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xsrqpi, 0x05, 0x00, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xsrqpxp, 0x05, 0x01, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xssqrtqp, 0x04, 0x19, 0x1B, PPC2_ISA300)
+GEN_VSX_HELPER_R3(xssubqp, 0x04, 0x10, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X3(xssubsp, 0x00, 0x01, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X3(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X3(xsdivsp, 0x00, 0x03, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X2(xsresp, 0x14, 0x01, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X1(xststdcsp, 0x14, 0x12, 0, PPC2_ISA300)
GEN_VSX_HELPER_2(xststdcdp, 0x14, 0x16, 0, PPC2_ISA300)
GEN_VSX_HELPER_2(xststdcqp, 0x04, 0x16, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xvadddp, 0x00, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmuldp, 0x00, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvdivdp, 0x00, 0x0F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvredp, 0x14, 0x0D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmaddadp, 0x04, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmaddmdp, 0x04, 0x0D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmsubadp, 0x04, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmsubmdp, 0x04, 0x0F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmaddadp, 0x04, 0x1C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmaddmdp, 0x04, 0x1D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmsubadp, 0x04, 0x1E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmsubmdp, 0x04, 0x1F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmindp, 0x00, 0x1D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpgtdp, 0x0C, 0x0D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpgedp, 0x0C, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpnedp, 0x0C, 0x0F, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX)
-
-GEN_VSX_HELPER_2(xvaddsp, 0x00, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvsubsp, 0x00, 0x09, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmulsp, 0x00, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvdivsp, 0x00, 0x0B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvresp, 0x14, 0x09, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmaddasp, 0x04, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmaddmsp, 0x04, 0x09, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmsubasp, 0x04, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmsubmsp, 0x04, 0x0B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmaddasp, 0x04, 0x18, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmaddmsp, 0x04, 0x19, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmsubasp, 0x04, 0x1A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmsubmsp, 0x04, 0x1B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvminsp, 0x00, 0x19, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpeqsp, 0x0C, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpnesp, 0x0C, 0x0B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvhpsp, 0x16, 0x1D, 0x18, PPC2_ISA300)
-GEN_VSX_HELPER_2(xvcvsphp, 0x16, 0x1D, 0x19, PPC2_ISA300)
-GEN_VSX_HELPER_2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrspi, 0x12, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvadddp, 0x00, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvmuldp, 0x00, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvdivdp, 0x00, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvredp, 0x14, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2_AB(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_X1(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvmindp, 0x00, 0x1D, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX)
+
+GEN_VSX_HELPER_X3(xvaddsp, 0x00, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvsubsp, 0x00, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvmulsp, 0x00, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvdivsp, 0x00, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvresp, 0x14, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2_AB(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_X1(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvminsp, 0x00, 0x19, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvhpsp, 0x16, 0x1D, 0x18, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xvcvsphp, 0x16, 0x1D, 0x19, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrspi, 0x12, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX)
GEN_VSX_HELPER_2(xvtstdcsp, 0x14, 0x1A, 0, PPC2_VSX)
GEN_VSX_HELPER_2(xvtstdcdp, 0x14, 0x1E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xxperm, 0x08, 0x03, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xxpermr, 0x08, 0x07, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xxperm, 0x08, 0x03, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xxpermr, 0x08, 0x07, 0, PPC2_ISA300)
+
+#define GEN_VSX_HELPER_VSX_MADD(name, op1, aop, mop, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr xt, xa, b, c; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xt = gen_vsr_ptr(xT(ctx->opcode)); \
+ xa = gen_vsr_ptr(xA(ctx->opcode)); \
+ if (ctx->opcode & PPC_BIT(25)) { \
+ /* \
+ * AxT + B \
+ */ \
+ b = gen_vsr_ptr(xT(ctx->opcode)); \
+ c = gen_vsr_ptr(xB(ctx->opcode)); \
+ } else { \
+ /* \
+ * AxB + T \
+ */ \
+ b = gen_vsr_ptr(xB(ctx->opcode)); \
+ c = gen_vsr_ptr(xT(ctx->opcode)); \
+ } \
+ gen_helper_##name(cpu_env, xt, xa, b, c); \
+ tcg_temp_free_ptr(xt); \
+ tcg_temp_free_ptr(xa); \
+ tcg_temp_free_ptr(b); \
+ tcg_temp_free_ptr(c); \
+}
+
+GEN_VSX_HELPER_VSX_MADD(xsmadddp, 0x04, 0x04, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xsmsubdp, 0x04, 0x06, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xsnmadddp, 0x04, 0x14, 0x15, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xsnmsubdp, 0x04, 0x16, 0x17, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xsmaddsp, 0x04, 0x00, 0x01, 0, PPC2_VSX207)
+GEN_VSX_HELPER_VSX_MADD(xsmsubsp, 0x04, 0x02, 0x03, 0, PPC2_VSX207)
+GEN_VSX_HELPER_VSX_MADD(xsnmaddsp, 0x04, 0x10, 0x11, 0, PPC2_VSX207)
+GEN_VSX_HELPER_VSX_MADD(xsnmsubsp, 0x04, 0x12, 0x13, 0, PPC2_VSX207)
+GEN_VSX_HELPER_VSX_MADD(xvmadddp, 0x04, 0x0C, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvmsubdp, 0x04, 0x0E, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvnmadddp, 0x04, 0x1C, 0x1D, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvnmsubdp, 0x04, 0x1E, 0x1F, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvmaddsp, 0x04, 0x08, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvmsubsp, 0x04, 0x0A, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvnmaddsp, 0x04, 0x18, 0x19, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvnmsubsp, 0x04, 0x1A, 0x1B, 0, PPC2_VSX)
static void gen_xxbrd(DisasContext *ctx)
{
@@ -1460,7 +1645,7 @@ static void gen_xxsldwi(DisasContext *ctx)
#define VSX_EXTRACT_INSERT(name) \
static void gen_##name(DisasContext *ctx) \
{ \
- TCGv xt, xb; \
+ TCGv_ptr xt, xb; \
TCGv_i32 t0; \
TCGv_i64 t1; \
uint8_t uimm = UIMM4(ctx->opcode); \
@@ -1469,8 +1654,8 @@ static void gen_##name(DisasContext *ctx) \
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- xt = tcg_const_tl(xT(ctx->opcode)); \
- xb = tcg_const_tl(xB(ctx->opcode)); \
+ xt = gen_vsr_ptr(xT(ctx->opcode)); \
+ xb = gen_vsr_ptr(xB(ctx->opcode)); \
t0 = tcg_temp_new_i32(); \
t1 = tcg_temp_new_i64(); \
/* \
@@ -1485,8 +1670,8 @@ static void gen_##name(DisasContext *ctx) \
} \
tcg_gen_movi_i32(t0, uimm); \
gen_helper_##name(cpu_env, xt, xb, t0); \
- tcg_temp_free(xb); \
- tcg_temp_free(xt); \
+ tcg_temp_free_ptr(xb); \
+ tcg_temp_free_ptr(xt); \
tcg_temp_free_i32(t0); \
tcg_temp_free_i64(t1); \
}
@@ -1813,7 +1998,7 @@ static void gen_xvxexpdp(DisasContext *ctx)
tcg_temp_free_i64(xbl);
}
-GEN_VSX_HELPER_2(xvxsigsp, 0x00, 0x04, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xvxsigsp, 0x00, 0x04, 0, PPC2_ISA300)
static void gen_xvxsigdp(DisasContext *ctx)
{
diff --git a/target/ppc/translate/vsx-ops.inc.c b/target/ppc/translate/vsx-ops.inc.c
index 5030c4aceb..7fd3942b84 100644
--- a/target/ppc/translate/vsx-ops.inc.c
+++ b/target/ppc/translate/vsx-ops.inc.c
@@ -63,6 +63,12 @@ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2), \
GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 2, opc3, 0, PPC_NONE, fl2), \
GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 3, opc3, 0, PPC_NONE, fl2)
+#define GEN_XX3FORM_NAME(name, opcname, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 2, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 3, opc3, 0, PPC_NONE, fl2)
+
#define GEN_XX2IFORM(name, opc2, opc3, fl2) \
GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 1, PPC_NONE, fl2), \
GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 1, PPC_NONE, fl2), \
@@ -182,14 +188,14 @@ GEN_XX2FORM(xssqrtdp, 0x16, 0x04, PPC2_VSX),
GEN_XX2FORM(xsrsqrtedp, 0x14, 0x04, PPC2_VSX),
GEN_XX3FORM(xstdivdp, 0x14, 0x07, PPC2_VSX),
GEN_XX2FORM(xstsqrtdp, 0x14, 0x06, PPC2_VSX),
-GEN_XX3FORM(xsmaddadp, 0x04, 0x04, PPC2_VSX),
-GEN_XX3FORM(xsmaddmdp, 0x04, 0x05, PPC2_VSX),
-GEN_XX3FORM(xsmsubadp, 0x04, 0x06, PPC2_VSX),
-GEN_XX3FORM(xsmsubmdp, 0x04, 0x07, PPC2_VSX),
-GEN_XX3FORM(xsnmaddadp, 0x04, 0x14, PPC2_VSX),
-GEN_XX3FORM(xsnmaddmdp, 0x04, 0x15, PPC2_VSX),
-GEN_XX3FORM(xsnmsubadp, 0x04, 0x16, PPC2_VSX),
-GEN_XX3FORM(xsnmsubmdp, 0x04, 0x17, PPC2_VSX),
+GEN_XX3FORM_NAME(xsmadddp, "xsmaddadp", 0x04, 0x04, PPC2_VSX),
+GEN_XX3FORM_NAME(xsmadddp, "xsmaddmdp", 0x04, 0x05, PPC2_VSX),
+GEN_XX3FORM_NAME(xsmsubdp, "xsmsubadp", 0x04, 0x06, PPC2_VSX),
+GEN_XX3FORM_NAME(xsmsubdp, "xsmsubmdp", 0x04, 0x07, PPC2_VSX),
+GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddadp", 0x04, 0x14, PPC2_VSX),
+GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddmdp", 0x04, 0x15, PPC2_VSX),
+GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubadp", 0x04, 0x16, PPC2_VSX),
+GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubmdp", 0x04, 0x17, PPC2_VSX),
GEN_XX3FORM(xscmpeqdp, 0x0C, 0x00, PPC2_ISA300),
GEN_XX3FORM(xscmpgtdp, 0x0C, 0x01, PPC2_ISA300),
GEN_XX3FORM(xscmpgedp, 0x0C, 0x02, PPC2_ISA300),
@@ -235,14 +241,14 @@ GEN_XX2FORM(xsresp, 0x14, 0x01, PPC2_VSX207),
GEN_XX2FORM(xsrsp, 0x12, 0x11, PPC2_VSX207),
GEN_XX2FORM(xssqrtsp, 0x16, 0x00, PPC2_VSX207),
GEN_XX2FORM(xsrsqrtesp, 0x14, 0x00, PPC2_VSX207),
-GEN_XX3FORM(xsmaddasp, 0x04, 0x00, PPC2_VSX207),
-GEN_XX3FORM(xsmaddmsp, 0x04, 0x01, PPC2_VSX207),
-GEN_XX3FORM(xsmsubasp, 0x04, 0x02, PPC2_VSX207),
-GEN_XX3FORM(xsmsubmsp, 0x04, 0x03, PPC2_VSX207),
-GEN_XX3FORM(xsnmaddasp, 0x04, 0x10, PPC2_VSX207),
-GEN_XX3FORM(xsnmaddmsp, 0x04, 0x11, PPC2_VSX207),
-GEN_XX3FORM(xsnmsubasp, 0x04, 0x12, PPC2_VSX207),
-GEN_XX3FORM(xsnmsubmsp, 0x04, 0x13, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsmaddsp, "xsmaddasp", 0x04, 0x00, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsmaddsp, "xsmaddmsp", 0x04, 0x01, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsmsubsp, "xsmsubasp", 0x04, 0x02, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsmsubsp, "xsmsubmsp", 0x04, 0x03, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddasp", 0x04, 0x10, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddmsp", 0x04, 0x11, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubasp", 0x04, 0x12, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubmsp", 0x04, 0x13, PPC2_VSX207),
GEN_XX2FORM(xscvsxdsp, 0x10, 0x13, PPC2_VSX207),
GEN_XX2FORM(xscvuxdsp, 0x10, 0x12, PPC2_VSX207),
@@ -255,14 +261,14 @@ GEN_XX2FORM(xvsqrtdp, 0x16, 0x0C, PPC2_VSX),
GEN_XX2FORM(xvrsqrtedp, 0x14, 0x0C, PPC2_VSX),
GEN_XX3FORM(xvtdivdp, 0x14, 0x0F, PPC2_VSX),
GEN_XX2FORM(xvtsqrtdp, 0x14, 0x0E, PPC2_VSX),
-GEN_XX3FORM(xvmaddadp, 0x04, 0x0C, PPC2_VSX),
-GEN_XX3FORM(xvmaddmdp, 0x04, 0x0D, PPC2_VSX),
-GEN_XX3FORM(xvmsubadp, 0x04, 0x0E, PPC2_VSX),
-GEN_XX3FORM(xvmsubmdp, 0x04, 0x0F, PPC2_VSX),
-GEN_XX3FORM(xvnmaddadp, 0x04, 0x1C, PPC2_VSX),
-GEN_XX3FORM(xvnmaddmdp, 0x04, 0x1D, PPC2_VSX),
-GEN_XX3FORM(xvnmsubadp, 0x04, 0x1E, PPC2_VSX),
-GEN_XX3FORM(xvnmsubmdp, 0x04, 0x1F, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmadddp, "xvmaddadp", 0x04, 0x0C, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmadddp, "xvmaddmdp", 0x04, 0x0D, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmsubdp, "xvmsubadp", 0x04, 0x0E, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmsubdp, "xvmsubmdp", 0x04, 0x0F, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmadddp, "xvnmaddadp", 0x04, 0x1C, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmadddp, "xvnmaddmdp", 0x04, 0x1D, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmsubdp, "xvnmsubadp", 0x04, 0x1E, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmsubdp, "xvnmsubmdp", 0x04, 0x1F, PPC2_VSX),
GEN_XX3FORM(xvmaxdp, 0x00, 0x1C, PPC2_VSX),
GEN_XX3FORM(xvmindp, 0x00, 0x1D, PPC2_VSX),
GEN_XX3_RC_FORM(xvcmpeqdp, 0x0C, 0x0C, PPC2_VSX),
@@ -293,14 +299,14 @@ GEN_XX2FORM(xvsqrtsp, 0x16, 0x08, PPC2_VSX),
GEN_XX2FORM(xvrsqrtesp, 0x14, 0x08, PPC2_VSX),
GEN_XX3FORM(xvtdivsp, 0x14, 0x0B, PPC2_VSX),
GEN_XX2FORM(xvtsqrtsp, 0x14, 0x0A, PPC2_VSX),
-GEN_XX3FORM(xvmaddasp, 0x04, 0x08, PPC2_VSX),
-GEN_XX3FORM(xvmaddmsp, 0x04, 0x09, PPC2_VSX),
-GEN_XX3FORM(xvmsubasp, 0x04, 0x0A, PPC2_VSX),
-GEN_XX3FORM(xvmsubmsp, 0x04, 0x0B, PPC2_VSX),
-GEN_XX3FORM(xvnmaddasp, 0x04, 0x18, PPC2_VSX),
-GEN_XX3FORM(xvnmaddmsp, 0x04, 0x19, PPC2_VSX),
-GEN_XX3FORM(xvnmsubasp, 0x04, 0x1A, PPC2_VSX),
-GEN_XX3FORM(xvnmsubmsp, 0x04, 0x1B, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmaddsp, "xvmaddasp", 0x04, 0x08, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmaddsp, "xvmaddmsp", 0x04, 0x09, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmsubsp, "xvmsubasp", 0x04, 0x0A, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmsubsp, "xvmsubmsp", 0x04, 0x0B, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmaddsp, "xvnmaddasp", 0x04, 0x18, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmaddsp, "xvnmaddmsp", 0x04, 0x19, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmsubsp, "xvnmsubasp", 0x04, 0x1A, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmsubsp, "xvnmsubmsp", 0x04, 0x1B, PPC2_VSX),
GEN_XX3FORM(xvmaxsp, 0x00, 0x18, PPC2_VSX),
GEN_XX3FORM(xvminsp, 0x00, 0x19, PPC2_VSX),
GEN_XX3_RC_FORM(xvcmpeqsp, 0x0C, 0x08, PPC2_VSX),
diff --git a/target/s390x/cpu_features.c b/target/s390x/cpu_features.c
index f64f581c86..9f817e3cfa 100644
--- a/target/s390x/cpu_features.c
+++ b/target/s390x/cpu_features.c
@@ -2,8 +2,9 @@
* CPU features/facilities for s390x
*
* Copyright IBM Corp. 2016, 2018
+ * Copyright Red Hat, Inc. 2019
*
- * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
+ * Author(s): David Hildenbrand <david@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or (at
* your option) any later version. See the COPYING file in the top-level
@@ -14,346 +15,17 @@
#include "qemu/module.h"
#include "cpu_features.h"
-#define FEAT_INIT(_name, _type, _bit, _desc) \
- { \
- .name = _name, \
- .type = _type, \
- .bit = _bit, \
- .desc = _desc, \
- }
-
-/* S390FeatDef.bit is not applicable as there is no feature block. */
-#define FEAT_INIT_MISC(_name, _desc) \
- FEAT_INIT(_name, S390_FEAT_TYPE_MISC, 0, _desc)
-
-/* indexed by feature number for easy lookup */
-static const S390FeatDef s390_features[] = {
- FEAT_INIT("esan3", S390_FEAT_TYPE_STFL, 0, "Instructions marked as n3"),
- FEAT_INIT("zarch", S390_FEAT_TYPE_STFL, 1, "z/Architecture architectural mode"),
- FEAT_INIT("dateh", S390_FEAT_TYPE_STFL, 3, "DAT-enhancement facility"),
- FEAT_INIT("idtes", S390_FEAT_TYPE_STFL, 4, "IDTE selective TLB segment-table clearing"),
- FEAT_INIT("idter", S390_FEAT_TYPE_STFL, 5, "IDTE selective TLB region-table clearing"),
- FEAT_INIT("asnlxr", S390_FEAT_TYPE_STFL, 6, "ASN-and-LX reuse facility"),
- FEAT_INIT("stfle", S390_FEAT_TYPE_STFL, 7, "Store-facility-list-extended facility"),
- FEAT_INIT("edat", S390_FEAT_TYPE_STFL, 8, "Enhanced-DAT facility"),
- FEAT_INIT("srs", S390_FEAT_TYPE_STFL, 9, "Sense-running-status facility"),
- FEAT_INIT("csske", S390_FEAT_TYPE_STFL, 10, "Conditional-SSKE facility"),
- FEAT_INIT("ctop", S390_FEAT_TYPE_STFL, 11, "Configuration-topology facility"),
- FEAT_INIT("apqci", S390_FEAT_TYPE_STFL, 12, "Query AP Configuration Information facility"),
- FEAT_INIT("ipter", S390_FEAT_TYPE_STFL, 13, "IPTE-range facility"),
- FEAT_INIT("nonqks", S390_FEAT_TYPE_STFL, 14, "Nonquiescing key-setting facility"),
- FEAT_INIT("apft", S390_FEAT_TYPE_STFL, 15, "AP Facilities Test facility"),
- FEAT_INIT("etf2", S390_FEAT_TYPE_STFL, 16, "Extended-translation facility 2"),
- FEAT_INIT("msa-base", S390_FEAT_TYPE_STFL, 17, "Message-security-assist facility (excluding subfunctions)"),
- FEAT_INIT("ldisp", S390_FEAT_TYPE_STFL, 18, "Long-displacement facility"),
- FEAT_INIT("ldisphp", S390_FEAT_TYPE_STFL, 19, "Long-displacement facility has high performance"),
- FEAT_INIT("hfpm", S390_FEAT_TYPE_STFL, 20, "HFP-multiply-add/subtract facility"),
- FEAT_INIT("eimm", S390_FEAT_TYPE_STFL, 21, "Extended-immediate facility"),
- FEAT_INIT("etf3", S390_FEAT_TYPE_STFL, 22, "Extended-translation facility 3"),
- FEAT_INIT("hfpue", S390_FEAT_TYPE_STFL, 23, "HFP-unnormalized-extension facility"),
- FEAT_INIT("etf2eh", S390_FEAT_TYPE_STFL, 24, "ETF2-enhancement facility"),
- FEAT_INIT("stckf", S390_FEAT_TYPE_STFL, 25, "Store-clock-fast facility"),
- FEAT_INIT("parseh", S390_FEAT_TYPE_STFL, 26, "Parsing-enhancement facility"),
- FEAT_INIT("mvcos", S390_FEAT_TYPE_STFL, 27, "Move-with-optional-specification facility"),
- FEAT_INIT("tods-base", S390_FEAT_TYPE_STFL, 28, "TOD-clock-steering facility (excluding subfunctions)"),
- FEAT_INIT("etf3eh", S390_FEAT_TYPE_STFL, 30, "ETF3-enhancement facility"),
- FEAT_INIT("ectg", S390_FEAT_TYPE_STFL, 31, "Extract-CPU-time facility"),
- FEAT_INIT("csst", S390_FEAT_TYPE_STFL, 32, "Compare-and-swap-and-store facility"),
- FEAT_INIT("csst2", S390_FEAT_TYPE_STFL, 33, "Compare-and-swap-and-store facility 2"),
- FEAT_INIT("ginste", S390_FEAT_TYPE_STFL, 34, "General-instructions-extension facility"),
- FEAT_INIT("exrl", S390_FEAT_TYPE_STFL, 35, "Execute-extensions facility"),
- FEAT_INIT("emon", S390_FEAT_TYPE_STFL, 36, "Enhanced-monitor facility"),
- FEAT_INIT("fpe", S390_FEAT_TYPE_STFL, 37, "Floating-point extension facility"),
- FEAT_INIT("opc", S390_FEAT_TYPE_STFL, 38, "Order Preserving Compression facility"),
- FEAT_INIT("sprogp", S390_FEAT_TYPE_STFL, 40, "Set-program-parameters facility"),
- FEAT_INIT("fpseh", S390_FEAT_TYPE_STFL, 41, "Floating-point-support-enhancement facilities"),
- FEAT_INIT("dfp", S390_FEAT_TYPE_STFL, 42, "DFP (decimal-floating-point) facility"),
- FEAT_INIT("dfphp", S390_FEAT_TYPE_STFL, 43, "DFP (decimal-floating-point) facility has high performance"),
- FEAT_INIT("pfpo", S390_FEAT_TYPE_STFL, 44, "PFPO instruction"),
- FEAT_INIT("stfle45", S390_FEAT_TYPE_STFL, 45, "Various facilities introduced with z196"),
- FEAT_INIT("cmpsceh", S390_FEAT_TYPE_STFL, 47, "CMPSC-enhancement facility"),
- FEAT_INIT("dfpzc", S390_FEAT_TYPE_STFL, 48, "Decimal-floating-point zoned-conversion facility"),
- FEAT_INIT("stfle49", S390_FEAT_TYPE_STFL, 49, "Various facilities introduced with zEC12"),
- FEAT_INIT("cte", S390_FEAT_TYPE_STFL, 50, "Constrained transactional-execution facility"),
- FEAT_INIT("ltlbc", S390_FEAT_TYPE_STFL, 51, "Local-TLB-clearing facility"),
- FEAT_INIT("iacc2", S390_FEAT_TYPE_STFL, 52, "Interlocked-access facility 2"),
- FEAT_INIT("stfle53", S390_FEAT_TYPE_STFL, 53, "Various facilities introduced with z13"),
- FEAT_INIT("eec", S390_FEAT_TYPE_STFL, 54, "Entropy encoding compression facility"),
- FEAT_INIT("msa5-base", S390_FEAT_TYPE_STFL, 57, "Message-security-assist-extension-5 facility (excluding subfunctions)"),
- FEAT_INIT("minste2", S390_FEAT_TYPE_STFL, 58, "Miscellaneous-instruction-extensions facility 2"),
- FEAT_INIT("sema", S390_FEAT_TYPE_STFL, 59, "Semaphore-assist facility"),
- FEAT_INIT("tsi", S390_FEAT_TYPE_STFL, 60, "Time-slice Instrumentation facility"),
- FEAT_INIT("minste3", S390_FEAT_TYPE_STFL, 61, "Miscellaneous-Instruction-Extensions Facility 3"),
- FEAT_INIT("ri", S390_FEAT_TYPE_STFL, 64, "CPU runtime-instrumentation facility"),
- FEAT_INIT("zpci", S390_FEAT_TYPE_STFL, 69, "z/PCI facility"),
- FEAT_INIT("aen", S390_FEAT_TYPE_STFL, 71, "General-purpose-adapter-event-notification facility"),
- FEAT_INIT("ais", S390_FEAT_TYPE_STFL, 72, "General-purpose-adapter-interruption-suppression facility"),
- FEAT_INIT("te", S390_FEAT_TYPE_STFL, 73, "Transactional-execution facility"),
- FEAT_INIT("sthyi", S390_FEAT_TYPE_STFL, 74, "Store-hypervisor-information facility"),
- FEAT_INIT("aefsi", S390_FEAT_TYPE_STFL, 75, "Access-exception-fetch/store-indication facility"),
- FEAT_INIT("msa3-base", S390_FEAT_TYPE_STFL, 76, "Message-security-assist-extension-3 facility (excluding subfunctions)"),
- FEAT_INIT("msa4-base", S390_FEAT_TYPE_STFL, 77, "Message-security-assist-extension-4 facility (excluding subfunctions)"),
- FEAT_INIT("edat2", S390_FEAT_TYPE_STFL, 78, "Enhanced-DAT facility 2"),
- FEAT_INIT("dfppc", S390_FEAT_TYPE_STFL, 80, "Decimal-floating-point packed-conversion facility"),
- FEAT_INIT("ppa15", S390_FEAT_TYPE_STFL, 81, "PPA15 is installed"),
- FEAT_INIT("bpb", S390_FEAT_TYPE_STFL, 82, "Branch prediction blocking"),
- FEAT_INIT("vx", S390_FEAT_TYPE_STFL, 129, "Vector facility"),
- FEAT_INIT("iep", S390_FEAT_TYPE_STFL, 130, "Instruction-execution-protection facility"),
- FEAT_INIT("sea_esop2", S390_FEAT_TYPE_STFL, 131, "Side-effect-access facility and Enhanced-suppression-on-protection facility 2"),
- FEAT_INIT("gs", S390_FEAT_TYPE_STFL, 133, "Guarded-storage facility"),
- FEAT_INIT("vxpd", S390_FEAT_TYPE_STFL, 134, "Vector packed decimal facility"),
- FEAT_INIT("vxeh", S390_FEAT_TYPE_STFL, 135, "Vector enhancements facility"),
- FEAT_INIT("mepoch", S390_FEAT_TYPE_STFL, 139, "Multiple-epoch facility"),
- FEAT_INIT("tpei", S390_FEAT_TYPE_STFL, 144, "Test-pending-external-interruption facility"),
- FEAT_INIT("irbm", S390_FEAT_TYPE_STFL, 145, "Insert-reference-bits-multiple facility"),
- FEAT_INIT("msa8-base", S390_FEAT_TYPE_STFL, 146, "Message-security-assist-extension-8 facility (excluding subfunctions)"),
- FEAT_INIT("cmmnt", S390_FEAT_TYPE_STFL, 147, "CMM: ESSA-enhancement (no translate) facility"),
- FEAT_INIT("vxeh2", S390_FEAT_TYPE_STFL, 148, "Vector Enhancements facility 2"),
- FEAT_INIT("esort-base", S390_FEAT_TYPE_STFL, 150, "Enhanced-sort facility (excluding subfunctions)"),
- FEAT_INIT("deflate-base", S390_FEAT_TYPE_STFL, 151, "Deflate-conversion facility (excluding subfunctions)"),
- FEAT_INIT("vxbeh", S390_FEAT_TYPE_STFL, 152, "Vector BCD enhancements facility 1"),
- FEAT_INIT("msa9-base", S390_FEAT_TYPE_STFL, 155, "Message-security-assist-extension-9 facility (excluding subfunctions)"),
- FEAT_INIT("etoken", S390_FEAT_TYPE_STFL, 156, "Etoken facility"),
-
- /* SCLP SCCB Byte 80 - 98 (bit numbers relative to byte-80) */
- FEAT_INIT("gsls", S390_FEAT_TYPE_SCLP_CONF_CHAR, 40, "SIE: Guest-storage-limit-suppression facility"),
- FEAT_INIT("esop", S390_FEAT_TYPE_SCLP_CONF_CHAR, 46, "Enhanced-suppression-on-protection facility"),
- FEAT_INIT("hpma2", S390_FEAT_TYPE_SCLP_CONF_CHAR, 90, "Host page management assist 2 Facility"), /* 91-2 */
- FEAT_INIT("kss", S390_FEAT_TYPE_SCLP_CONF_CHAR, 151, "SIE: Keyless-subset facility"), /* 98-7 */
-
- /* SCLP SCCB Byte 116 - 119 (bit numbers relative to byte-116) */
- FEAT_INIT("64bscao", S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT, 0, "SIE: 64-bit-SCAO facility"),
- FEAT_INIT("cmma", S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT, 1, "SIE: Collaborative-memory-management assist"),
- FEAT_INIT("pfmfi", S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT, 9, "SIE: PFMF interpretation facility"),
- FEAT_INIT("ibs", S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT, 10, "SIE: Interlock-and-broadcast-suppression facility"),
-
- FEAT_INIT("sief2", S390_FEAT_TYPE_SCLP_CPU, 4, "SIE: interception format 2 (Virtual SIE)"),
- FEAT_INIT("skey", S390_FEAT_TYPE_SCLP_CPU, 5, "SIE: Storage-key facility"),
- FEAT_INIT("gpereh", S390_FEAT_TYPE_SCLP_CPU, 10, "SIE: Guest-PER enhancement facility"),
- FEAT_INIT("siif", S390_FEAT_TYPE_SCLP_CPU, 11, "SIE: Shared IPTE-interlock facility"),
- FEAT_INIT("sigpif", S390_FEAT_TYPE_SCLP_CPU, 12, "SIE: SIGP interpretation facility"),
- FEAT_INIT("ib", S390_FEAT_TYPE_SCLP_CPU, 42, "SIE: Intervention bypass facility"),
- FEAT_INIT("cei", S390_FEAT_TYPE_SCLP_CPU, 43, "SIE: Conditional-external-interception facility"),
-
- FEAT_INIT_MISC("dateh2", "DAT-enhancement facility 2"),
- FEAT_INIT_MISC("cmm", "Collaborative-memory-management facility"),
- FEAT_INIT_MISC("ap", "AP instructions installed"),
-
- FEAT_INIT("plo-cl", S390_FEAT_TYPE_PLO, 0, "PLO Compare and load (32 bit in general registers)"),
- FEAT_INIT("plo-clg", S390_FEAT_TYPE_PLO, 1, "PLO Compare and load (64 bit in parameter list)"),
- FEAT_INIT("plo-clgr", S390_FEAT_TYPE_PLO, 2, "PLO Compare and load (32 bit in general registers)"),
- FEAT_INIT("plo-clx", S390_FEAT_TYPE_PLO, 3, "PLO Compare and load (128 bit in parameter list)"),
- FEAT_INIT("plo-cs", S390_FEAT_TYPE_PLO, 4, "PLO Compare and swap (32 bit in general registers)"),
- FEAT_INIT("plo-csg", S390_FEAT_TYPE_PLO, 5, "PLO Compare and swap (64 bit in parameter list)"),
- FEAT_INIT("plo-csgr", S390_FEAT_TYPE_PLO, 6, "PLO Compare and swap (32 bit in general registers)"),
- FEAT_INIT("plo-csx", S390_FEAT_TYPE_PLO, 7, "PLO Compare and swap (128 bit in parameter list)"),
- FEAT_INIT("plo-dcs", S390_FEAT_TYPE_PLO, 8, "PLO Double compare and swap (32 bit in general registers)"),
- FEAT_INIT("plo-dcsg", S390_FEAT_TYPE_PLO, 9, "PLO Double compare and swap (64 bit in parameter list)"),
- FEAT_INIT("plo-dcsgr", S390_FEAT_TYPE_PLO, 10, "PLO Double compare and swap (32 bit in general registers)"),
- FEAT_INIT("plo-dcsx", S390_FEAT_TYPE_PLO, 11, "PLO Double compare and swap (128 bit in parameter list)"),
- FEAT_INIT("plo-csst", S390_FEAT_TYPE_PLO, 12, "PLO Compare and swap and store (32 bit in general registers)"),
- FEAT_INIT("plo-csstg", S390_FEAT_TYPE_PLO, 13, "PLO Compare and swap and store (64 bit in parameter list)"),
- FEAT_INIT("plo-csstgr", S390_FEAT_TYPE_PLO, 14, "PLO Compare and swap and store (32 bit in general registers)"),
- FEAT_INIT("plo-csstx", S390_FEAT_TYPE_PLO, 15, "PLO Compare and swap and store (128 bit in parameter list)"),
- FEAT_INIT("plo-csdst", S390_FEAT_TYPE_PLO, 16, "PLO Compare and swap and double store (32 bit in general registers)"),
- FEAT_INIT("plo-csdstg", S390_FEAT_TYPE_PLO, 17, "PLO Compare and swap and double store (64 bit in parameter list)"),
- FEAT_INIT("plo-csdstgr", S390_FEAT_TYPE_PLO, 18, "PLO Compare and swap and double store (32 bit in general registers)"),
- FEAT_INIT("plo-csdstx", S390_FEAT_TYPE_PLO, 19, "PLO Compare and swap and double store (128 bit in parameter list)"),
- FEAT_INIT("plo-cstst", S390_FEAT_TYPE_PLO, 20, "PLO Compare and swap and triple store (32 bit in general registers)"),
- FEAT_INIT("plo-cststg", S390_FEAT_TYPE_PLO, 21, "PLO Compare and swap and triple store (64 bit in parameter list)"),
- FEAT_INIT("plo-cststgr", S390_FEAT_TYPE_PLO, 22, "PLO Compare and swap and triple store (32 bit in general registers)"),
- FEAT_INIT("plo-cststx", S390_FEAT_TYPE_PLO, 23, "PLO Compare and swap and triple store (128 bit in parameter list)"),
-
- FEAT_INIT("ptff-qto", S390_FEAT_TYPE_PTFF, 1, "PTFF Query TOD Offset"),
- FEAT_INIT("ptff-qsi", S390_FEAT_TYPE_PTFF, 2, "PTFF Query Steering Information"),
- FEAT_INIT("ptff-qpc", S390_FEAT_TYPE_PTFF, 3, "PTFF Query Physical Clock"),
- FEAT_INIT("ptff-qui", S390_FEAT_TYPE_PTFF, 4, "PTFF Query UTC Information"),
- FEAT_INIT("ptff-qtou", S390_FEAT_TYPE_PTFF, 5, "PTFF Query TOD Offset User"),
- FEAT_INIT("ptff-qsie", S390_FEAT_TYPE_PTFF, 10, "PTFF Query Steering Information Extended"),
- FEAT_INIT("ptff-qtoue", S390_FEAT_TYPE_PTFF, 13, "PTFF Query TOD Offset User Extended"),
- FEAT_INIT("ptff-sto", S390_FEAT_TYPE_PTFF, 65, "PTFF Set TOD Offset"),
- FEAT_INIT("ptff-stou", S390_FEAT_TYPE_PTFF, 69, "PTFF Set TOD Offset User"),
- FEAT_INIT("ptff-stoe", S390_FEAT_TYPE_PTFF, 73, "PTFF Set TOD Offset Extended"),
- FEAT_INIT("ptff-stoue", S390_FEAT_TYPE_PTFF, 77, "PTFF Set TOD Offset User Extended"),
-
- FEAT_INIT("kmac-dea", S390_FEAT_TYPE_KMAC, 1, "KMAC DEA"),
- FEAT_INIT("kmac-tdea-128", S390_FEAT_TYPE_KMAC, 2, "KMAC TDEA-128"),
- FEAT_INIT("kmac-tdea-192", S390_FEAT_TYPE_KMAC, 3, "KMAC TDEA-192"),
- FEAT_INIT("kmac-edea", S390_FEAT_TYPE_KMAC, 9, "KMAC Encrypted-DEA"),
- FEAT_INIT("kmac-etdea-128", S390_FEAT_TYPE_KMAC, 10, "KMAC Encrypted-TDEA-128"),
- FEAT_INIT("kmac-etdea-192", S390_FEAT_TYPE_KMAC, 11, "KMAC Encrypted-TDEA-192"),
- FEAT_INIT("kmac-aes-128", S390_FEAT_TYPE_KMAC, 18, "KMAC AES-128"),
- FEAT_INIT("kmac-aes-192", S390_FEAT_TYPE_KMAC, 19, "KMAC AES-192"),
- FEAT_INIT("kmac-aes-256", S390_FEAT_TYPE_KMAC, 20, "KMAC AES-256"),
- FEAT_INIT("kmac-eaes-128", S390_FEAT_TYPE_KMAC, 26, "KMAC Encrypted-AES-128"),
- FEAT_INIT("kmac-eaes-192", S390_FEAT_TYPE_KMAC, 27, "KMAC Encrypted-AES-192"),
- FEAT_INIT("kmac-eaes-256", S390_FEAT_TYPE_KMAC, 28, "KMAC Encrypted-AES-256"),
-
- FEAT_INIT("kmc-dea", S390_FEAT_TYPE_KMC, 1, "KMC DEA"),
- FEAT_INIT("kmc-tdea-128", S390_FEAT_TYPE_KMC, 2, "KMC TDEA-128"),
- FEAT_INIT("kmc-tdea-192", S390_FEAT_TYPE_KMC, 3, "KMC TDEA-192"),
- FEAT_INIT("kmc-edea", S390_FEAT_TYPE_KMC, 9, "KMC Encrypted-DEA"),
- FEAT_INIT("kmc-etdea-128", S390_FEAT_TYPE_KMC, 10, "KMC Encrypted-TDEA-128"),
- FEAT_INIT("kmc-etdea-192", S390_FEAT_TYPE_KMC, 11, "KMC Encrypted-TDEA-192"),
- FEAT_INIT("kmc-aes-128", S390_FEAT_TYPE_KMC, 18, "KMC AES-128"),
- FEAT_INIT("kmc-aes-192", S390_FEAT_TYPE_KMC, 19, "KMC AES-192"),
- FEAT_INIT("kmc-aes-256", S390_FEAT_TYPE_KMC, 20, "KMC AES-256"),
- FEAT_INIT("kmc-eaes-128", S390_FEAT_TYPE_KMC, 26, "KMC Encrypted-AES-128"),
- FEAT_INIT("kmc-eaes-192", S390_FEAT_TYPE_KMC, 27, "KMC Encrypted-AES-192"),
- FEAT_INIT("kmc-eaes-256", S390_FEAT_TYPE_KMC, 28, "KMC Encrypted-AES-256"),
- FEAT_INIT("kmc-prng", S390_FEAT_TYPE_KMC, 67, "KMC PRNG"),
-
- FEAT_INIT("km-dea", S390_FEAT_TYPE_KM, 1, "KM DEA"),
- FEAT_INIT("km-tdea-128", S390_FEAT_TYPE_KM, 2, "KM TDEA-128"),
- FEAT_INIT("km-tdea-192", S390_FEAT_TYPE_KM, 3, "KM TDEA-192"),
- FEAT_INIT("km-edea", S390_FEAT_TYPE_KM, 9, "KM Encrypted-DEA"),
- FEAT_INIT("km-etdea-128", S390_FEAT_TYPE_KM, 10, "KM Encrypted-TDEA-128"),
- FEAT_INIT("km-etdea-192", S390_FEAT_TYPE_KM, 11, "KM Encrypted-TDEA-192"),
- FEAT_INIT("km-aes-128", S390_FEAT_TYPE_KM, 18, "KM AES-128"),
- FEAT_INIT("km-aes-192", S390_FEAT_TYPE_KM, 19, "KM AES-192"),
- FEAT_INIT("km-aes-256", S390_FEAT_TYPE_KM, 20, "KM AES-256"),
- FEAT_INIT("km-eaes-128", S390_FEAT_TYPE_KM, 26, "KM Encrypted-AES-128"),
- FEAT_INIT("km-eaes-192", S390_FEAT_TYPE_KM, 27, "KM Encrypted-AES-192"),
- FEAT_INIT("km-eaes-256", S390_FEAT_TYPE_KM, 28, "KM Encrypted-AES-256"),
- FEAT_INIT("km-xts-aes-128", S390_FEAT_TYPE_KM, 50, "KM XTS-AES-128"),
- FEAT_INIT("km-xts-aes-256", S390_FEAT_TYPE_KM, 52, "KM XTS-AES-256"),
- FEAT_INIT("km-xts-eaes-128", S390_FEAT_TYPE_KM, 58, "KM XTS-Encrypted-AES-128"),
- FEAT_INIT("km-xts-eaes-256", S390_FEAT_TYPE_KM, 60, "KM XTS-Encrypted-AES-256"),
-
- FEAT_INIT("kimd-sha-1", S390_FEAT_TYPE_KIMD, 1, "KIMD SHA-1"),
- FEAT_INIT("kimd-sha-256", S390_FEAT_TYPE_KIMD, 2, "KIMD SHA-256"),
- FEAT_INIT("kimd-sha-512", S390_FEAT_TYPE_KIMD, 3, "KIMD SHA-512"),
- FEAT_INIT("kimd-sha3-224", S390_FEAT_TYPE_KIMD, 32, "KIMD SHA3-224"),
- FEAT_INIT("kimd-sha3-256", S390_FEAT_TYPE_KIMD, 33, "KIMD SHA3-256"),
- FEAT_INIT("kimd-sha3-384", S390_FEAT_TYPE_KIMD, 34, "KIMD SHA3-384"),
- FEAT_INIT("kimd-sha3-512", S390_FEAT_TYPE_KIMD, 35, "KIMD SHA3-512"),
- FEAT_INIT("kimd-shake-128", S390_FEAT_TYPE_KIMD, 36, "KIMD SHAKE-128"),
- FEAT_INIT("kimd-shake-256", S390_FEAT_TYPE_KIMD, 37, "KIMD SHAKE-256"),
- FEAT_INIT("kimd-ghash", S390_FEAT_TYPE_KIMD, 65, "KIMD GHASH"),
-
- FEAT_INIT("klmd-sha-1", S390_FEAT_TYPE_KLMD, 1, "KLMD SHA-1"),
- FEAT_INIT("klmd-sha-256", S390_FEAT_TYPE_KLMD, 2, "KLMD SHA-256"),
- FEAT_INIT("klmd-sha-512", S390_FEAT_TYPE_KLMD, 3, "KLMD SHA-512"),
- FEAT_INIT("klmd-sha3-224", S390_FEAT_TYPE_KLMD, 32, "KLMD SHA3-224"),
- FEAT_INIT("klmd-sha3-256", S390_FEAT_TYPE_KLMD, 33, "KLMD SHA3-256"),
- FEAT_INIT("klmd-sha3-384", S390_FEAT_TYPE_KLMD, 34, "KLMD SHA3-384"),
- FEAT_INIT("klmd-sha3-512", S390_FEAT_TYPE_KLMD, 35, "KLMD SHA3-512"),
- FEAT_INIT("klmd-shake-128", S390_FEAT_TYPE_KLMD, 36, "KLMD SHAKE-128"),
- FEAT_INIT("klmd-shake-256", S390_FEAT_TYPE_KLMD, 37, "KLMD SHAKE-256"),
-
- FEAT_INIT("pckmo-edea", S390_FEAT_TYPE_PCKMO, 1, "PCKMO Encrypted-DEA-Key"),
- FEAT_INIT("pckmo-etdea-128", S390_FEAT_TYPE_PCKMO, 2, "PCKMO Encrypted-TDEA-128-Key"),
- FEAT_INIT("pckmo-etdea-192", S390_FEAT_TYPE_PCKMO, 3, "PCKMO Encrypted-TDEA-192-Key"),
- FEAT_INIT("pckmo-aes-128", S390_FEAT_TYPE_PCKMO, 18, "PCKMO Encrypted-AES-128-Key"),
- FEAT_INIT("pckmo-aes-192", S390_FEAT_TYPE_PCKMO, 19, "PCKMO Encrypted-AES-192-Key"),
- FEAT_INIT("pckmo-aes-256", S390_FEAT_TYPE_PCKMO, 20, "PCKMO Encrypted-AES-256-Key"),
- FEAT_INIT("pckmo-ecc-p256", S390_FEAT_TYPE_PCKMO, 32, "PCKMO Encrypt-ECC-P256-Key"),
- FEAT_INIT("pckmo-ecc-p384", S390_FEAT_TYPE_PCKMO, 33, "PCKMO Encrypt-ECC-P384-Key"),
- FEAT_INIT("pckmo-ecc-p521", S390_FEAT_TYPE_PCKMO, 34, "PCKMO Encrypt-ECC-P521-Key"),
- FEAT_INIT("pckmo-ecc-ed25519", S390_FEAT_TYPE_PCKMO, 40 , "PCKMO Encrypt-ECC-Ed25519-Key"),
- FEAT_INIT("pckmo-ecc-ed448", S390_FEAT_TYPE_PCKMO, 41 , "PCKMO Encrypt-ECC-Ed448-Key"),
-
- FEAT_INIT("kmctr-dea", S390_FEAT_TYPE_KMCTR, 1, "KMCTR DEA"),
- FEAT_INIT("kmctr-tdea-128", S390_FEAT_TYPE_KMCTR, 2, "KMCTR TDEA-128"),
- FEAT_INIT("kmctr-tdea-192", S390_FEAT_TYPE_KMCTR, 3, "KMCTR TDEA-192"),
- FEAT_INIT("kmctr-edea", S390_FEAT_TYPE_KMCTR, 9, "KMCTR Encrypted-DEA"),
- FEAT_INIT("kmctr-etdea-128", S390_FEAT_TYPE_KMCTR, 10, "KMCTR Encrypted-TDEA-128"),
- FEAT_INIT("kmctr-etdea-192", S390_FEAT_TYPE_KMCTR, 11, "KMCTR Encrypted-TDEA-192"),
- FEAT_INIT("kmctr-aes-128", S390_FEAT_TYPE_KMCTR, 18, "KMCTR AES-128"),
- FEAT_INIT("kmctr-aes-192", S390_FEAT_TYPE_KMCTR, 19, "KMCTR AES-192"),
- FEAT_INIT("kmctr-aes-256", S390_FEAT_TYPE_KMCTR, 20, "KMCTR AES-256"),
- FEAT_INIT("kmctr-eaes-128", S390_FEAT_TYPE_KMCTR, 26, "KMCTR Encrypted-AES-128"),
- FEAT_INIT("kmctr-eaes-192", S390_FEAT_TYPE_KMCTR, 27, "KMCTR Encrypted-AES-192"),
- FEAT_INIT("kmctr-eaes-256", S390_FEAT_TYPE_KMCTR, 28, "KMCTR Encrypted-AES-256"),
-
- FEAT_INIT("kmf-dea", S390_FEAT_TYPE_KMF, 1, "KMF DEA"),
- FEAT_INIT("kmf-tdea-128", S390_FEAT_TYPE_KMF, 2, "KMF TDEA-128"),
- FEAT_INIT("kmf-tdea-192", S390_FEAT_TYPE_KMF, 3, "KMF TDEA-192"),
- FEAT_INIT("kmf-edea", S390_FEAT_TYPE_KMF, 9, "KMF Encrypted-DEA"),
- FEAT_INIT("kmf-etdea-128", S390_FEAT_TYPE_KMF, 10, "KMF Encrypted-TDEA-128"),
- FEAT_INIT("kmf-etdea-192", S390_FEAT_TYPE_KMF, 11, "KMF Encrypted-TDEA-192"),
- FEAT_INIT("kmf-aes-128", S390_FEAT_TYPE_KMF, 18, "KMF AES-128"),
- FEAT_INIT("kmf-aes-192", S390_FEAT_TYPE_KMF, 19, "KMF AES-192"),
- FEAT_INIT("kmf-aes-256", S390_FEAT_TYPE_KMF, 20, "KMF AES-256"),
- FEAT_INIT("kmf-eaes-128", S390_FEAT_TYPE_KMF, 26, "KMF Encrypted-AES-128"),
- FEAT_INIT("kmf-eaes-192", S390_FEAT_TYPE_KMF, 27, "KMF Encrypted-AES-192"),
- FEAT_INIT("kmf-eaes-256", S390_FEAT_TYPE_KMF, 28, "KMF Encrypted-AES-256"),
-
- FEAT_INIT("kmo-dea", S390_FEAT_TYPE_KMO, 1, "KMO DEA"),
- FEAT_INIT("kmo-tdea-128", S390_FEAT_TYPE_KMO, 2, "KMO TDEA-128"),
- FEAT_INIT("kmo-tdea-192", S390_FEAT_TYPE_KMO, 3, "KMO TDEA-192"),
- FEAT_INIT("kmo-edea", S390_FEAT_TYPE_KMO, 9, "KMO Encrypted-DEA"),
- FEAT_INIT("kmo-etdea-128", S390_FEAT_TYPE_KMO, 10, "KMO Encrypted-TDEA-128"),
- FEAT_INIT("kmo-etdea-192", S390_FEAT_TYPE_KMO, 11, "KMO Encrypted-TDEA-192"),
- FEAT_INIT("kmo-aes-128", S390_FEAT_TYPE_KMO, 18, "KMO AES-128"),
- FEAT_INIT("kmo-aes-192", S390_FEAT_TYPE_KMO, 19, "KMO AES-192"),
- FEAT_INIT("kmo-aes-256", S390_FEAT_TYPE_KMO, 20, "KMO AES-256"),
- FEAT_INIT("kmo-eaes-128", S390_FEAT_TYPE_KMO, 26, "KMO Encrypted-AES-128"),
- FEAT_INIT("kmo-eaes-192", S390_FEAT_TYPE_KMO, 27, "KMO Encrypted-AES-192"),
- FEAT_INIT("kmo-eaes-256", S390_FEAT_TYPE_KMO, 28, "KMO Encrypted-AES-256"),
-
- FEAT_INIT("pcc-cmac-dea", S390_FEAT_TYPE_PCC, 1, "PCC Compute-Last-Block-CMAC-Using-DEA"),
- FEAT_INIT("pcc-cmac-tdea-128", S390_FEAT_TYPE_PCC, 2, "PCC Compute-Last-Block-CMAC-Using-TDEA-128"),
- FEAT_INIT("pcc-cmac-tdea-192", S390_FEAT_TYPE_PCC, 3, "PCC Compute-Last-Block-CMAC-Using-TDEA-192"),
- FEAT_INIT("pcc-cmac-edea", S390_FEAT_TYPE_PCC, 9, "PCC Compute-Last-Block-CMAC-Using-Encrypted-DEA"),
- FEAT_INIT("pcc-cmac-etdea-128", S390_FEAT_TYPE_PCC, 10, "PCC Compute-Last-Block-CMAC-Using-Encrypted-TDEA-128"),
- FEAT_INIT("pcc-cmac-etdea-192", S390_FEAT_TYPE_PCC, 11, "PCC Compute-Last-Block-CMAC-Using-EncryptedTDEA-192"),
- FEAT_INIT("pcc-cmac-aes-128", S390_FEAT_TYPE_PCC, 18, "PCC Compute-Last-Block-CMAC-Using-AES-128"),
- FEAT_INIT("pcc-cmac-aes-192", S390_FEAT_TYPE_PCC, 19, "PCC Compute-Last-Block-CMAC-Using-AES-192"),
- FEAT_INIT("pcc-cmac-eaes-256", S390_FEAT_TYPE_PCC, 20, "PCC Compute-Last-Block-CMAC-Using-AES-256"),
- FEAT_INIT("pcc-cmac-eaes-128", S390_FEAT_TYPE_PCC, 26, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-128"),
- FEAT_INIT("pcc-cmac-eaes-192", S390_FEAT_TYPE_PCC, 27, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-192"),
- FEAT_INIT("pcc-cmac-eaes-256", S390_FEAT_TYPE_PCC, 28, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-256"),
- FEAT_INIT("pcc-xts-aes-128", S390_FEAT_TYPE_PCC, 50, "PCC Compute-XTS-Parameter-Using-AES-128"),
- FEAT_INIT("pcc-xts-aes-256", S390_FEAT_TYPE_PCC, 52, "PCC Compute-XTS-Parameter-Using-AES-256"),
- FEAT_INIT("pcc-xts-eaes-128", S390_FEAT_TYPE_PCC, 58, "PCC Compute-XTS-Parameter-Using-Encrypted-AES-128"),
- FEAT_INIT("pcc-xts-eaes-256", S390_FEAT_TYPE_PCC, 60, "PCC Compute-XTS-Parameter-Using-Encrypted-AES-256"),
- FEAT_INIT("pcc-scalar-mult-p256", S390_FEAT_TYPE_PCC, 64, "PCC Scalar-Multiply-P256"),
- FEAT_INIT("pcc-scalar-mult-p384", S390_FEAT_TYPE_PCC, 65, "PCC Scalar-Multiply-P384"),
- FEAT_INIT("pcc-scalar-mult-p521", S390_FEAT_TYPE_PCC, 66, "PCC Scalar-Multiply-P521"),
- FEAT_INIT("pcc-scalar-mult-ed25519", S390_FEAT_TYPE_PCC, 72, "PCC Scalar-Multiply-Ed25519"),
- FEAT_INIT("pcc-scalar-mult-ed448", S390_FEAT_TYPE_PCC, 73, "PCC Scalar-Multiply-Ed448"),
- FEAT_INIT("pcc-scalar-mult-x25519", S390_FEAT_TYPE_PCC, 80, "PCC Scalar-Multiply-X25519"),
- FEAT_INIT("pcc-scalar-mult-x448", S390_FEAT_TYPE_PCC, 81, "PCC Scalar-Multiply-X448"),
-
- FEAT_INIT("ppno-sha-512-drng", S390_FEAT_TYPE_PPNO, 3, "PPNO SHA-512-DRNG"),
- FEAT_INIT("prno-trng-qrtcr", S390_FEAT_TYPE_PPNO, 112, "PRNO TRNG-Query-Raw-to-Conditioned-Ratio"),
- FEAT_INIT("prno-trng", S390_FEAT_TYPE_PPNO, 114, "PRNO TRNG"),
-
- FEAT_INIT("kma-gcm-aes-128", S390_FEAT_TYPE_KMA, 18, "KMA GCM-AES-128"),
- FEAT_INIT("kma-gcm-aes-192", S390_FEAT_TYPE_KMA, 19, "KMA GCM-AES-192"),
- FEAT_INIT("kma-gcm-aes-256", S390_FEAT_TYPE_KMA, 20, "KMA GCM-AES-256"),
- FEAT_INIT("kma-gcm-eaes-128", S390_FEAT_TYPE_KMA, 26, "KMA GCM-Encrypted-AES-128"),
- FEAT_INIT("kma-gcm-eaes-192", S390_FEAT_TYPE_KMA, 27, "KMA GCM-Encrypted-AES-192"),
- FEAT_INIT("kma-gcm-eaes-256", S390_FEAT_TYPE_KMA, 28, "KMA GCM-Encrypted-AES-256"),
-
- FEAT_INIT("kdsa-ecdsa-verify-p256", S390_FEAT_TYPE_KDSA, 1, "KDSA ECDSA-Verify-P256"),
- FEAT_INIT("kdsa-ecdsa-verify-p384", S390_FEAT_TYPE_KDSA, 2, "KDSA ECDSA-Verify-P384"),
- FEAT_INIT("kdsa-ecdsa-verify-p521", S390_FEAT_TYPE_KDSA, 3, "KDSA ECDSA-Verify-P521"),
- FEAT_INIT("kdsa-ecdsa-sign-p256", S390_FEAT_TYPE_KDSA, 9, "KDSA ECDSA-Sign-P256"),
- FEAT_INIT("kdsa-ecdsa-sign-p384", S390_FEAT_TYPE_KDSA, 10, "KDSA ECDSA-Sign-P384"),
- FEAT_INIT("kdsa-ecdsa-sign-p521", S390_FEAT_TYPE_KDSA, 11, "KDSA ECDSA-Sign-P521"),
- FEAT_INIT("kdsa-eecdsa-sign-p256", S390_FEAT_TYPE_KDSA, 17, "KDSA Encrypted-ECDSA-Sign-P256"),
- FEAT_INIT("kdsa-eecdsa-sign-p384", S390_FEAT_TYPE_KDSA, 18, "KDSA Encrypted-ECDSA-Sign-P384"),
- FEAT_INIT("kdsa-eecdsa-sign-p521", S390_FEAT_TYPE_KDSA, 19, "KDSA Encrypted-ECDSA-Sign-P521"),
- FEAT_INIT("kdsa-eddsa-verify-ed25519", S390_FEAT_TYPE_KDSA, 32, "KDSA EdDSA-Verify-Ed25519"),
- FEAT_INIT("kdsa-eddsa-verify-ed448", S390_FEAT_TYPE_KDSA, 36, "KDSA EdDSA-Verify-Ed448"),
- FEAT_INIT("kdsa-eddsa-sign-ed25519", S390_FEAT_TYPE_KDSA, 40, "KDSA EdDSA-Sign-Ed25519"),
- FEAT_INIT("kdsa-eddsa-sign-ed448", S390_FEAT_TYPE_KDSA, 44, "KDSA EdDSA-Sign-Ed448"),
- FEAT_INIT("kdsa-eeddsa-sign-ed25519", S390_FEAT_TYPE_KDSA, 48, "KDSA Encrypted-EdDSA-Sign-Ed25519"),
- FEAT_INIT("kdsa-eeddsa-sign-ed448", S390_FEAT_TYPE_KDSA, 52, "KDSA Encrypted-EdDSA-Sign-Ed448"),
-
- FEAT_INIT("sortl-sflr", S390_FEAT_TYPE_SORTL, 1, "SORTL SFLR"),
- FEAT_INIT("sortl-svlr", S390_FEAT_TYPE_SORTL, 2, "SORTL SVLR"),
- FEAT_INIT("sortl-32", S390_FEAT_TYPE_SORTL, 130, "SORTL 32 input lists"),
- FEAT_INIT("sortl-128", S390_FEAT_TYPE_SORTL, 132, "SORTL 128 input lists"),
- FEAT_INIT("sortl-f0", S390_FEAT_TYPE_SORTL, 192, "SORTL format 0 parameter-block"),
-
- FEAT_INIT("dfltcc-gdht", S390_FEAT_TYPE_DFLTCC, 1, "DFLTCC GDHT"),
- FEAT_INIT("dfltcc-cmpr", S390_FEAT_TYPE_DFLTCC, 2, "DFLTCC CMPR"),
- FEAT_INIT("dfltcc-xpnd", S390_FEAT_TYPE_DFLTCC, 4, "DFLTCC XPND"),
- FEAT_INIT("dfltcc-f0", S390_FEAT_TYPE_DFLTCC, 192, "DFLTCC format 0 parameter-block"),
+#define DEF_FEAT(_FEAT, _NAME, _TYPE, _BIT, _DESC) \
+ [S390_FEAT_##_FEAT] = { \
+ .name = _NAME, \
+ .type = S390_FEAT_TYPE_##_TYPE, \
+ .bit = _BIT, \
+ .desc = _DESC, \
+ },
+static const S390FeatDef s390_features[S390_FEAT_MAX] = {
+ #include "cpu_features_def.inc.h"
};
+#undef DEF_FEAT
const S390FeatDef *s390_feat_def(S390Feat feat)
{
diff --git a/target/s390x/cpu_features_def.h b/target/s390x/cpu_features_def.h
index 292b17b35d..412d356feb 100644
--- a/target/s390x/cpu_features_def.h
+++ b/target/s390x/cpu_features_def.h
@@ -2,9 +2,10 @@
* CPU features/facilities for s390
*
* Copyright IBM Corp. 2016, 2018
+ * Copyright Red Hat, Inc. 2019
*
* Author(s): Michael Mueller <mimu@linux.vnet.ibm.com>
- * David Hildenbrand <dahi@linux.vnet.ibm.com>
+ * David Hildenbrand <david@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or (at
* your option) any later version. See the COPYING file in the top-level
@@ -14,354 +15,11 @@
#ifndef TARGET_S390X_CPU_FEATURES_DEF_H
#define TARGET_S390X_CPU_FEATURES_DEF_H
+#define DEF_FEAT(_FEAT, ...) S390_FEAT_##_FEAT,
typedef enum {
- /* Stfle */
- S390_FEAT_ESAN3 = 0,
- S390_FEAT_ZARCH,
- S390_FEAT_DAT_ENH,
- S390_FEAT_IDTE_SEGMENT,
- S390_FEAT_IDTE_REGION,
- S390_FEAT_ASN_LX_REUSE,
- S390_FEAT_STFLE,
- S390_FEAT_EDAT,
- S390_FEAT_SENSE_RUNNING_STATUS,
- S390_FEAT_CONDITIONAL_SSKE,
- S390_FEAT_CONFIGURATION_TOPOLOGY,
- S390_FEAT_AP_QUERY_CONFIG_INFO,
- S390_FEAT_IPTE_RANGE,
- S390_FEAT_NONQ_KEY_SETTING,
- S390_FEAT_AP_FACILITIES_TEST,
- S390_FEAT_EXTENDED_TRANSLATION_2,
- S390_FEAT_MSA,
- S390_FEAT_LONG_DISPLACEMENT,
- S390_FEAT_LONG_DISPLACEMENT_FAST,
- S390_FEAT_HFP_MADDSUB,
- S390_FEAT_EXTENDED_IMMEDIATE,
- S390_FEAT_EXTENDED_TRANSLATION_3,
- S390_FEAT_HFP_UNNORMALIZED_EXT,
- S390_FEAT_ETF2_ENH,
- S390_FEAT_STORE_CLOCK_FAST,
- S390_FEAT_PARSING_ENH,
- S390_FEAT_MOVE_WITH_OPTIONAL_SPEC,
- S390_FEAT_TOD_CLOCK_STEERING,
- S390_FEAT_ETF3_ENH,
- S390_FEAT_EXTRACT_CPU_TIME,
- S390_FEAT_COMPARE_AND_SWAP_AND_STORE,
- S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2,
- S390_FEAT_GENERAL_INSTRUCTIONS_EXT,
- S390_FEAT_EXECUTE_EXT,
- S390_FEAT_ENHANCED_MONITOR,
- S390_FEAT_FLOATING_POINT_EXT,
- S390_FEAT_ORDER_PRESERVING_COMPRESSION,
- S390_FEAT_SET_PROGRAM_PARAMETERS,
- S390_FEAT_FLOATING_POINT_SUPPPORT_ENH,
- S390_FEAT_DFP,
- S390_FEAT_DFP_FAST,
- S390_FEAT_PFPO,
- S390_FEAT_STFLE_45,
- S390_FEAT_CMPSC_ENH,
- S390_FEAT_DFP_ZONED_CONVERSION,
- S390_FEAT_STFLE_49,
- S390_FEAT_CONSTRAINT_TRANSACTIONAL_EXE,
- S390_FEAT_LOCAL_TLB_CLEARING,
- S390_FEAT_INTERLOCKED_ACCESS_2,
- S390_FEAT_STFLE_53,
- S390_FEAT_ENTROPY_ENC_COMP,
- S390_FEAT_MSA_EXT_5,
- S390_FEAT_MISC_INSTRUCTION_EXT,
- S390_FEAT_SEMAPHORE_ASSIST,
- S390_FEAT_TIME_SLICE_INSTRUMENTATION,
- S390_FEAT_MISC_INSTRUCTION_EXT3,
- S390_FEAT_RUNTIME_INSTRUMENTATION,
- S390_FEAT_ZPCI,
- S390_FEAT_ADAPTER_EVENT_NOTIFICATION,
- S390_FEAT_ADAPTER_INT_SUPPRESSION,
- S390_FEAT_TRANSACTIONAL_EXE,
- S390_FEAT_STORE_HYPERVISOR_INFO,
- S390_FEAT_ACCESS_EXCEPTION_FS_INDICATION,
- S390_FEAT_MSA_EXT_3,
- S390_FEAT_MSA_EXT_4,
- S390_FEAT_EDAT_2,
- S390_FEAT_DFP_PACKED_CONVERSION,
- S390_FEAT_PPA15,
- S390_FEAT_BPB,
- S390_FEAT_VECTOR,
- S390_FEAT_INSTRUCTION_EXEC_PROT,
- S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2,
- S390_FEAT_GUARDED_STORAGE,
- S390_FEAT_VECTOR_PACKED_DECIMAL,
- S390_FEAT_VECTOR_ENH,
- S390_FEAT_MULTIPLE_EPOCH,
- S390_FEAT_TEST_PENDING_EXT_INTERRUPTION,
- S390_FEAT_INSERT_REFERENCE_BITS_MULT,
- S390_FEAT_MSA_EXT_8,
- S390_FEAT_CMM_NT,
- S390_FEAT_VECTOR_ENH2,
- S390_FEAT_ESORT_BASE,
- S390_FEAT_DEFLATE_BASE,
- S390_FEAT_VECTOR_BCD_ENH,
- S390_FEAT_MSA_EXT_9,
- S390_FEAT_ETOKEN,
-
- /* Sclp Conf Char */
- S390_FEAT_SIE_GSLS,
- S390_FEAT_ESOP,
- S390_FEAT_HPMA2,
- S390_FEAT_SIE_KSS,
-
- /* Sclp Conf Char Ext */
- S390_FEAT_SIE_64BSCAO,
- S390_FEAT_SIE_CMMA,
- S390_FEAT_SIE_PFMFI,
- S390_FEAT_SIE_IBS,
-
- /* Sclp Cpu */
- S390_FEAT_SIE_F2,
- S390_FEAT_SIE_SKEY,
- S390_FEAT_SIE_GPERE,
- S390_FEAT_SIE_SIIF,
- S390_FEAT_SIE_SIGPIF,
- S390_FEAT_SIE_IB,
- S390_FEAT_SIE_CEI,
-
- /* Misc */
- S390_FEAT_DAT_ENH_2,
- S390_FEAT_CMM,
- S390_FEAT_AP,
-
- /* PLO */
- S390_FEAT_PLO_CL,
- S390_FEAT_PLO_CLG,
- S390_FEAT_PLO_CLGR,
- S390_FEAT_PLO_CLX,
- S390_FEAT_PLO_CS,
- S390_FEAT_PLO_CSG,
- S390_FEAT_PLO_CSGR,
- S390_FEAT_PLO_CSX,
- S390_FEAT_PLO_DCS,
- S390_FEAT_PLO_DCSG,
- S390_FEAT_PLO_DCSGR,
- S390_FEAT_PLO_DCSX,
- S390_FEAT_PLO_CSST,
- S390_FEAT_PLO_CSSTG,
- S390_FEAT_PLO_CSSTGR,
- S390_FEAT_PLO_CSSTX,
- S390_FEAT_PLO_CSDST,
- S390_FEAT_PLO_CSDSTG,
- S390_FEAT_PLO_CSDSTGR,
- S390_FEAT_PLO_CSDSTX,
- S390_FEAT_PLO_CSTST,
- S390_FEAT_PLO_CSTSTG,
- S390_FEAT_PLO_CSTSTGR,
- S390_FEAT_PLO_CSTSTX,
-
- /* PTFF */
- S390_FEAT_PTFF_QTO,
- S390_FEAT_PTFF_QSI,
- S390_FEAT_PTFF_QPT,
- S390_FEAT_PTFF_QUI,
- S390_FEAT_PTFF_QTOU,
- S390_FEAT_PTFF_QSIE,
- S390_FEAT_PTFF_QTOUE,
- S390_FEAT_PTFF_STO,
- S390_FEAT_PTFF_STOU,
- S390_FEAT_PTFF_STOE,
- S390_FEAT_PTFF_STOUE,
-
- /* KMAC */
- S390_FEAT_KMAC_DEA,
- S390_FEAT_KMAC_TDEA_128,
- S390_FEAT_KMAC_TDEA_192,
- S390_FEAT_KMAC_EDEA,
- S390_FEAT_KMAC_ETDEA_128,
- S390_FEAT_KMAC_ETDEA_192,
- S390_FEAT_KMAC_AES_128,
- S390_FEAT_KMAC_AES_192,
- S390_FEAT_KMAC_AES_256,
- S390_FEAT_KMAC_EAES_128,
- S390_FEAT_KMAC_EAES_192,
- S390_FEAT_KMAC_EAES_256,
-
- /* KMC */
- S390_FEAT_KMC_DEA,
- S390_FEAT_KMC_TDEA_128,
- S390_FEAT_KMC_TDEA_192,
- S390_FEAT_KMC_EDEA,
- S390_FEAT_KMC_ETDEA_128,
- S390_FEAT_KMC_ETDEA_192,
- S390_FEAT_KMC_AES_128,
- S390_FEAT_KMC_AES_192,
- S390_FEAT_KMC_AES_256,
- S390_FEAT_KMC_EAES_128,
- S390_FEAT_KMC_EAES_192,
- S390_FEAT_KMC_EAES_256,
- S390_FEAT_KMC_PRNG,
-
- /* KM */
- S390_FEAT_KM_DEA,
- S390_FEAT_KM_TDEA_128,
- S390_FEAT_KM_TDEA_192,
- S390_FEAT_KM_EDEA,
- S390_FEAT_KM_ETDEA_128,
- S390_FEAT_KM_ETDEA_192,
- S390_FEAT_KM_AES_128,
- S390_FEAT_KM_AES_192,
- S390_FEAT_KM_AES_256,
- S390_FEAT_KM_EAES_128,
- S390_FEAT_KM_EAES_192,
- S390_FEAT_KM_EAES_256,
- S390_FEAT_KM_XTS_AES_128,
- S390_FEAT_KM_XTS_AES_256,
- S390_FEAT_KM_XTS_EAES_128,
- S390_FEAT_KM_XTS_EAES_256,
-
- /* KIMD */
- S390_FEAT_KIMD_SHA_1,
- S390_FEAT_KIMD_SHA_256,
- S390_FEAT_KIMD_SHA_512,
- S390_FEAT_KIMD_SHA3_224,
- S390_FEAT_KIMD_SHA3_256,
- S390_FEAT_KIMD_SHA3_384,
- S390_FEAT_KIMD_SHA3_512,
- S390_FEAT_KIMD_SHAKE_128,
- S390_FEAT_KIMD_SHAKE_256,
- S390_FEAT_KIMD_GHASH,
-
- /* KLMD */
- S390_FEAT_KLMD_SHA_1,
- S390_FEAT_KLMD_SHA_256,
- S390_FEAT_KLMD_SHA_512,
- S390_FEAT_KLMD_SHA3_224,
- S390_FEAT_KLMD_SHA3_256,
- S390_FEAT_KLMD_SHA3_384,
- S390_FEAT_KLMD_SHA3_512,
- S390_FEAT_KLMD_SHAKE_128,
- S390_FEAT_KLMD_SHAKE_256,
-
- /* PCKMO */
- S390_FEAT_PCKMO_EDEA,
- S390_FEAT_PCKMO_ETDEA_128,
- S390_FEAT_PCKMO_ETDEA_256,
- S390_FEAT_PCKMO_AES_128,
- S390_FEAT_PCKMO_AES_192,
- S390_FEAT_PCKMO_AES_256,
- S390_FEAT_PCKMO_ECC_P256,
- S390_FEAT_PCKMO_ECC_P384,
- S390_FEAT_PCKMO_ECC_P521,
- S390_FEAT_PCKMO_ECC_ED25519,
- S390_FEAT_PCKMO_ECC_ED448,
-
- /* KMCTR */
- S390_FEAT_KMCTR_DEA,
- S390_FEAT_KMCTR_TDEA_128,
- S390_FEAT_KMCTR_TDEA_192,
- S390_FEAT_KMCTR_EDEA,
- S390_FEAT_KMCTR_ETDEA_128,
- S390_FEAT_KMCTR_ETDEA_192,
- S390_FEAT_KMCTR_AES_128,
- S390_FEAT_KMCTR_AES_192,
- S390_FEAT_KMCTR_AES_256,
- S390_FEAT_KMCTR_EAES_128,
- S390_FEAT_KMCTR_EAES_192,
- S390_FEAT_KMCTR_EAES_256,
-
- /* KMF */
- S390_FEAT_KMF_DEA,
- S390_FEAT_KMF_TDEA_128,
- S390_FEAT_KMF_TDEA_192,
- S390_FEAT_KMF_EDEA,
- S390_FEAT_KMF_ETDEA_128,
- S390_FEAT_KMF_ETDEA_192,
- S390_FEAT_KMF_AES_128,
- S390_FEAT_KMF_AES_192,
- S390_FEAT_KMF_AES_256,
- S390_FEAT_KMF_EAES_128,
- S390_FEAT_KMF_EAES_192,
- S390_FEAT_KMF_EAES_256,
-
- /* KMO */
- S390_FEAT_KMO_DEA,
- S390_FEAT_KMO_TDEA_128,
- S390_FEAT_KMO_TDEA_192,
- S390_FEAT_KMO_EDEA,
- S390_FEAT_KMO_ETDEA_128,
- S390_FEAT_KMO_ETDEA_192,
- S390_FEAT_KMO_AES_128,
- S390_FEAT_KMO_AES_192,
- S390_FEAT_KMO_AES_256,
- S390_FEAT_KMO_EAES_128,
- S390_FEAT_KMO_EAES_192,
- S390_FEAT_KMO_EAES_256,
-
- /* PCC */
- S390_FEAT_PCC_CMAC_DEA,
- S390_FEAT_PCC_CMAC_TDEA_128,
- S390_FEAT_PCC_CMAC_TDEA_192,
- S390_FEAT_PCC_CMAC_ETDEA_128,
- S390_FEAT_PCC_CMAC_ETDEA_192,
- S390_FEAT_PCC_CMAC_TDEA,
- S390_FEAT_PCC_CMAC_AES_128,
- S390_FEAT_PCC_CMAC_AES_192,
- S390_FEAT_PCC_CMAC_AES_256,
- S390_FEAT_PCC_CMAC_EAES_128,
- S390_FEAT_PCC_CMAC_EAES_192,
- S390_FEAT_PCC_CMAC_EAES_256,
- S390_FEAT_PCC_XTS_AES_128,
- S390_FEAT_PCC_XTS_AES_256,
- S390_FEAT_PCC_XTS_EAES_128,
- S390_FEAT_PCC_XTS_EAES_256,
- S390_FEAT_PCC_SCALAR_MULT_P256,
- S390_FEAT_PCC_SCALAR_MULT_P384,
- S390_FEAT_PCC_SCALAR_MULT_P512,
- S390_FEAT_PCC_SCALAR_MULT_ED25519,
- S390_FEAT_PCC_SCALAR_MULT_ED448,
- S390_FEAT_PCC_SCALAR_MULT_X25519,
- S390_FEAT_PCC_SCALAR_MULT_X448,
-
- /* PPNO/PRNO */
- S390_FEAT_PPNO_SHA_512_DRNG,
- S390_FEAT_PRNO_TRNG_QRTCR,
- S390_FEAT_PRNO_TRNG,
-
- /* KMA */
- S390_FEAT_KMA_GCM_AES_128,
- S390_FEAT_KMA_GCM_AES_192,
- S390_FEAT_KMA_GCM_AES_256 ,
- S390_FEAT_KMA_GCM_EAES_128,
- S390_FEAT_KMA_GCM_EAES_192,
- S390_FEAT_KMA_GCM_EAES_256,
-
- /* KDSA */
- S390_FEAT_ECDSA_VERIFY_P256,
- S390_FEAT_ECDSA_VERIFY_P384,
- S390_FEAT_ECDSA_VERIFY_P512,
- S390_FEAT_ECDSA_SIGN_P256,
- S390_FEAT_ECDSA_SIGN_P384,
- S390_FEAT_ECDSA_SIGN_P512,
- S390_FEAT_EECDSA_SIGN_P256,
- S390_FEAT_EECDSA_SIGN_P384,
- S390_FEAT_EECDSA_SIGN_P512,
- S390_FEAT_EDDSA_VERIFY_ED25519,
- S390_FEAT_EDDSA_VERIFY_ED448,
- S390_FEAT_EDDSA_SIGN_ED25519,
- S390_FEAT_EDDSA_SIGN_ED448,
- S390_FEAT_EEDDSA_SIGN_ED25519,
- S390_FEAT_EEDDSA_SIGN_ED448,
-
- /* SORTL */
- S390_FEAT_SORTL_SFLR,
- S390_FEAT_SORTL_SVLR,
- S390_FEAT_SORTL_32,
- S390_FEAT_SORTL_128,
- S390_FEAT_SORTL_F0,
-
- /* DEFLATE */
- S390_FEAT_DEFLATE_GHDT,
- S390_FEAT_DEFLATE_CMPR,
- S390_FEAT_DEFLATE_XPND,
- S390_FEAT_DEFLATE_F0,
-
+ #include "cpu_features_def.inc.h"
S390_FEAT_MAX,
} S390Feat;
+#undef DEF_FEAT
#endif /* TARGET_S390X_CPU_FEATURES_DEF_H */
diff --git a/target/s390x/cpu_features_def.inc.h b/target/s390x/cpu_features_def.inc.h
new file mode 100644
index 0000000000..c20c780f2e
--- /dev/null
+++ b/target/s390x/cpu_features_def.inc.h
@@ -0,0 +1,369 @@
+/*
+ * RAW s390x CPU feature definitions:
+ *
+ * DEF_FEAT(_FEAT, _NAME, _TYPE, _BIT, _DESC):
+ * - _FEAT: Feature (enum) name used internally (S390_FEAT_##_FEAT)
+ * - _NAME: Feature name exposed to the user.
+ * - _TYPE: Feature type (S390_FEAT_TYPE_##_TYPE).
+ * - _BIT: Feature bit number within feature type block (unused for MISC).
+ * - _DESC: Feature description, exposed to the user.
+ *
+ * Copyright IBM Corp. 2016, 2018
+ * Copyright Red Hat, Inc. 2019
+ *
+ * Author(s): Michael Mueller <mimu@linux.vnet.ibm.com>
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+/* Features exposed via the STFL(E) instruction. */
+DEF_FEAT(ESAN3, "esan3", STFL, 0, "Instructions marked as n3")
+DEF_FEAT(ZARCH, "zarch", STFL, 1, "z/Architecture architectural mode")
+DEF_FEAT(DAT_ENH, "dateh", STFL, 3, "DAT-enhancement facility")
+DEF_FEAT(IDTE_SEGMENT, "idtes", STFL, 4, "IDTE selective TLB segment-table clearing")
+DEF_FEAT(IDTE_REGION, "idter", STFL, 5, "IDTE selective TLB region-table clearing")
+DEF_FEAT(ASN_LX_REUSE, "asnlxr", STFL, 6, "ASN-and-LX reuse facility")
+DEF_FEAT(STFLE, "stfle", STFL, 7, "Store-facility-list-extended facility")
+DEF_FEAT(EDAT, "edat", STFL, 8, "Enhanced-DAT facility")
+DEF_FEAT(SENSE_RUNNING_STATUS, "srs", STFL, 9, "Sense-running-status facility")
+DEF_FEAT(CONDITIONAL_SSKE, "csske", STFL, 10, "Conditional-SSKE facility")
+DEF_FEAT(CONFIGURATION_TOPOLOGY, "ctop", STFL, 11, "Configuration-topology facility")
+DEF_FEAT(AP_QUERY_CONFIG_INFO, "apqci", STFL, 12, "Query AP Configuration Information facility")
+DEF_FEAT(IPTE_RANGE, "ipter", STFL, 13, "IPTE-range facility")
+DEF_FEAT(NONQ_KEY_SETTING, "nonqks", STFL, 14, "Nonquiescing key-setting facility")
+DEF_FEAT(AP_FACILITIES_TEST, "apft", STFL, 15, "AP Facilities Test facility")
+DEF_FEAT(EXTENDED_TRANSLATION_2, "etf2", STFL, 16, "Extended-translation facility 2")
+DEF_FEAT(MSA, "msa-base", STFL, 17, "Message-security-assist facility (excluding subfunctions)")
+DEF_FEAT(LONG_DISPLACEMENT, "ldisp", STFL, 18, "Long-displacement facility")
+DEF_FEAT(LONG_DISPLACEMENT_FAST, "ldisphp", STFL, 19, "Long-displacement facility has high performance")
+DEF_FEAT(HFP_MADDSUB, "hfpm", STFL, 20, "HFP-multiply-add/subtract facility")
+DEF_FEAT(EXTENDED_IMMEDIATE, "eimm", STFL, 21, "Extended-immediate facility")
+DEF_FEAT(EXTENDED_TRANSLATION_3, "etf3", STFL, 22, "Extended-translation facility 3")
+DEF_FEAT(HFP_UNNORMALIZED_EXT, "hfpue", STFL, 23, "HFP-unnormalized-extension facility")
+DEF_FEAT(ETF2_ENH, "etf2eh", STFL, 24, "ETF2-enhancement facility")
+DEF_FEAT(STORE_CLOCK_FAST, "stckf", STFL, 25, "Store-clock-fast facility")
+DEF_FEAT(PARSING_ENH, "parseh", STFL, 26, "Parsing-enhancement facility")
+DEF_FEAT(MOVE_WITH_OPTIONAL_SPEC, "mvcos", STFL, 27, "Move-with-optional-specification facility")
+DEF_FEAT(TOD_CLOCK_STEERING, "tods-base", STFL, 28, "TOD-clock-steering facility (excluding subfunctions)")
+DEF_FEAT(ETF3_ENH, "etf3eh", STFL, 30, "ETF3-enhancement facility")
+DEF_FEAT(EXTRACT_CPU_TIME, "ectg", STFL, 31, "Extract-CPU-time facility")
+DEF_FEAT(COMPARE_AND_SWAP_AND_STORE, "csst", STFL, 32, "Compare-and-swap-and-store facility")
+DEF_FEAT(COMPARE_AND_SWAP_AND_STORE_2, "csst2", STFL, 33, "Compare-and-swap-and-store facility 2")
+DEF_FEAT(GENERAL_INSTRUCTIONS_EXT, "ginste", STFL, 34, "General-instructions-extension facility")
+DEF_FEAT(EXECUTE_EXT, "exrl", STFL, 35, "Execute-extensions facility")
+DEF_FEAT(ENHANCED_MONITOR, "emon", STFL, 36, "Enhanced-monitor facility")
+DEF_FEAT(FLOATING_POINT_EXT, "fpe", STFL, 37, "Floating-point extension facility")
+DEF_FEAT(ORDER_PRESERVING_COMPRESSION, "opc", STFL, 38, "Order Preserving Compression facility")
+DEF_FEAT(SET_PROGRAM_PARAMETERS, "sprogp", STFL, 40, "Set-program-parameters facility")
+DEF_FEAT(FLOATING_POINT_SUPPPORT_ENH, "fpseh", STFL, 41, "Floating-point-support-enhancement facilities")
+DEF_FEAT(DFP, "dfp", STFL, 42, "DFP (decimal-floating-point) facility")
+DEF_FEAT(DFP_FAST, "dfphp", STFL, 43, "DFP (decimal-floating-point) facility has high performance")
+DEF_FEAT(PFPO, "pfpo", STFL, 44, "PFPO instruction")
+DEF_FEAT(STFLE_45, "stfle45", STFL, 45, "Various facilities introduced with z196")
+DEF_FEAT(CMPSC_ENH, "cmpsceh", STFL, 47, "CMPSC-enhancement facility")
+DEF_FEAT(DFP_ZONED_CONVERSION, "dfpzc", STFL, 48, "Decimal-floating-point zoned-conversion facility")
+DEF_FEAT(STFLE_49, "stfle49", STFL, 49, "Various facilities introduced with zEC12")
+DEF_FEAT(CONSTRAINT_TRANSACTIONAL_EXE, "cte", STFL, 50, "Constrained transactional-execution facility")
+DEF_FEAT(LOCAL_TLB_CLEARING, "ltlbc", STFL, 51, "Local-TLB-clearing facility")
+DEF_FEAT(INTERLOCKED_ACCESS_2, "iacc2", STFL, 52, "Interlocked-access facility 2")
+DEF_FEAT(STFLE_53, "stfle53", STFL, 53, "Various facilities introduced with z13")
+DEF_FEAT(ENTROPY_ENC_COMP, "eec", STFL, 54, "Entropy encoding compression facility")
+DEF_FEAT(MSA_EXT_5, "msa5-base", STFL, 57, "Message-security-assist-extension-5 facility (excluding subfunctions)")
+DEF_FEAT(MISC_INSTRUCTION_EXT, "minste2", STFL, 58, "Miscellaneous-instruction-extensions facility 2")
+DEF_FEAT(SEMAPHORE_ASSIST, "sema", STFL, 59, "Semaphore-assist facility")
+DEF_FEAT(TIME_SLICE_INSTRUMENTATION, "tsi", STFL, 60, "Time-slice Instrumentation facility")
+DEF_FEAT(MISC_INSTRUCTION_EXT3, "minste3", STFL, 61, "Miscellaneous-Instruction-Extensions Facility 3")
+DEF_FEAT(RUNTIME_INSTRUMENTATION, "ri", STFL, 64, "CPU runtime-instrumentation facility")
+DEF_FEAT(ZPCI, "zpci", STFL, 69, "z/PCI facility")
+DEF_FEAT(ADAPTER_EVENT_NOTIFICATION, "aen", STFL, 71, "General-purpose-adapter-event-notification facility")
+DEF_FEAT(ADAPTER_INT_SUPPRESSION, "ais", STFL, 72, "General-purpose-adapter-interruption-suppression facility")
+DEF_FEAT(TRANSACTIONAL_EXE, "te", STFL, 73, "Transactional-execution facility")
+DEF_FEAT(STORE_HYPERVISOR_INFO, "sthyi", STFL, 74, "Store-hypervisor-information facility")
+DEF_FEAT(ACCESS_EXCEPTION_FS_INDICATION, "aefsi", STFL, 75, "Access-exception-fetch/store-indication facility")
+DEF_FEAT(MSA_EXT_3, "msa3-base", STFL, 76, "Message-security-assist-extension-3 facility (excluding subfunctions)")
+DEF_FEAT(MSA_EXT_4, "msa4-base", STFL, 77, "Message-security-assist-extension-4 facility (excluding subfunctions)")
+DEF_FEAT(EDAT_2, "edat2", STFL, 78, "Enhanced-DAT facility 2")
+DEF_FEAT(DFP_PACKED_CONVERSION, "dfppc", STFL, 80, "Decimal-floating-point packed-conversion facility")
+DEF_FEAT(PPA15, "ppa15", STFL, 81, "PPA15 is installed")
+DEF_FEAT(BPB, "bpb", STFL, 82, "Branch prediction blocking")
+DEF_FEAT(VECTOR, "vx", STFL, 129, "Vector facility")
+DEF_FEAT(INSTRUCTION_EXEC_PROT, "iep", STFL, 130, "Instruction-execution-protection facility")
+DEF_FEAT(SIDE_EFFECT_ACCESS_ESOP2, "sea_esop2", STFL, 131, "Side-effect-access facility and Enhanced-suppression-on-protection facility 2")
+DEF_FEAT(GUARDED_STORAGE, "gs", STFL, 133, "Guarded-storage facility")
+DEF_FEAT(VECTOR_PACKED_DECIMAL, "vxpd", STFL, 134, "Vector packed decimal facility")
+DEF_FEAT(VECTOR_ENH, "vxeh", STFL, 135, "Vector enhancements facility")
+DEF_FEAT(MULTIPLE_EPOCH, "mepoch", STFL, 139, "Multiple-epoch facility")
+DEF_FEAT(TEST_PENDING_EXT_INTERRUPTION, "tpei", STFL, 144, "Test-pending-external-interruption facility")
+DEF_FEAT(INSERT_REFERENCE_BITS_MULT, "irbm", STFL, 145, "Insert-reference-bits-multiple facility")
+DEF_FEAT(MSA_EXT_8, "msa8-base", STFL, 146, "Message-security-assist-extension-8 facility (excluding subfunctions)")
+DEF_FEAT(CMM_NT, "cmmnt", STFL, 147, "CMM: ESSA-enhancement (no translate) facility")
+DEF_FEAT(VECTOR_ENH2, "vxeh2", STFL, 148, "Vector Enhancements facility 2")
+DEF_FEAT(ESORT_BASE, "esort-base", STFL, 150, "Enhanced-sort facility (excluding subfunctions)")
+DEF_FEAT(DEFLATE_BASE, "deflate-base", STFL, 151, "Deflate-conversion facility (excluding subfunctions)")
+DEF_FEAT(VECTOR_BCD_ENH, "vxbeh", STFL, 152, "Vector BCD enhancements facility 1")
+DEF_FEAT(MSA_EXT_9, "msa9-base", STFL, 155, "Message-security-assist-extension-9 facility (excluding subfunctions)")
+DEF_FEAT(ETOKEN, "etoken", STFL, 156, "Etoken facility")
+
+/* Features exposed via SCLP SCCB Byte 80 - 98 (bit numbers relative to byte-80) */
+DEF_FEAT(SIE_GSLS, "gsls", SCLP_CONF_CHAR, 40, "SIE: Guest-storage-limit-suppression facility")
+DEF_FEAT(ESOP, "esop", SCLP_CONF_CHAR, 46, "Enhanced-suppression-on-protection facility")
+DEF_FEAT(HPMA2, "hpma2", SCLP_CONF_CHAR, 90, "Host page management assist 2 Facility") /* 91-2 */
+DEF_FEAT(SIE_KSS, "kss", SCLP_CONF_CHAR, 151, "SIE: Keyless-subset facility") /* 98-7 */
+
+/* Features exposed via SCLP SCCB Byte 116 - 119 (bit numbers relative to byte-116) */
+DEF_FEAT(SIE_64BSCAO, "64bscao", SCLP_CONF_CHAR_EXT, 0, "SIE: 64-bit-SCAO facility")
+DEF_FEAT(SIE_CMMA, "cmma", SCLP_CONF_CHAR_EXT, 1, "SIE: Collaborative-memory-management assist")
+DEF_FEAT(SIE_PFMFI, "pfmfi", SCLP_CONF_CHAR_EXT, 9, "SIE: PFMF interpretation facility")
+DEF_FEAT(SIE_IBS, "ibs", SCLP_CONF_CHAR_EXT, 10, "SIE: Interlock-and-broadcast-suppression facility")
+
+/* Features exposed via SCLP CPU info. */
+DEF_FEAT(SIE_F2, "sief2", SCLP_CPU, 4, "SIE: interception format 2 (Virtual SIE)")
+DEF_FEAT(SIE_SKEY, "skey", SCLP_CPU, 5, "SIE: Storage-key facility")
+DEF_FEAT(SIE_GPERE, "gpereh", SCLP_CPU, 10, "SIE: Guest-PER enhancement facility")
+DEF_FEAT(SIE_SIIF, "siif", SCLP_CPU, 11, "SIE: Shared IPTE-interlock facility")
+DEF_FEAT(SIE_SIGPIF, "sigpif", SCLP_CPU, 12, "SIE: SIGP interpretation facility")
+DEF_FEAT(SIE_IB, "ib", SCLP_CPU, 42, "SIE: Intervention bypass facility")
+DEF_FEAT(SIE_CEI, "cei", SCLP_CPU, 43, "SIE: Conditional-external-interception facility")
+
+/*
+ * Features exposed via no feature bit (but e.g., instruction sensing)
+ * -> the feature bit number is irrelavant
+ */
+DEF_FEAT(DAT_ENH_2, "dateh2", MISC, 0, "DAT-enhancement facility 2")
+DEF_FEAT(CMM, "cmm", MISC, 0, "Collaborative-memory-management facility")
+DEF_FEAT(AP, "ap", MISC, 0, "AP instructions installed")
+
+/* Features exposed via the PLO instruction. */
+DEF_FEAT(PLO_CL, "plo-cl", PLO, 0, "PLO Compare and load (32 bit in general registers)")
+DEF_FEAT(PLO_CLG, "plo-clg", PLO, 1, "PLO Compare and load (64 bit in parameter list)")
+DEF_FEAT(PLO_CLGR, "plo-clgr", PLO, 2, "PLO Compare and load (32 bit in general registers)")
+DEF_FEAT(PLO_CLX, "plo-clx", PLO, 3, "PLO Compare and load (128 bit in parameter list)")
+DEF_FEAT(PLO_CS, "plo-cs", PLO, 4, "PLO Compare and swap (32 bit in general registers)")
+DEF_FEAT(PLO_CSG, "plo-csg", PLO, 5, "PLO Compare and swap (64 bit in parameter list)")
+DEF_FEAT(PLO_CSGR, "plo-csgr", PLO, 6, "PLO Compare and swap (32 bit in general registers)")
+DEF_FEAT(PLO_CSX, "plo-csx", PLO, 7, "PLO Compare and swap (128 bit in parameter list)")
+DEF_FEAT(PLO_DCS, "plo-dcs", PLO, 8, "PLO Double compare and swap (32 bit in general registers)")
+DEF_FEAT(PLO_DCSG, "plo-dcsg", PLO, 9, "PLO Double compare and swap (64 bit in parameter list)")
+DEF_FEAT(PLO_DCSGR, "plo-dcsgr", PLO, 10, "PLO Double compare and swap (32 bit in general registers)")
+DEF_FEAT(PLO_DCSX, "plo-dcsx", PLO, 11, "PLO Double compare and swap (128 bit in parameter list)")
+DEF_FEAT(PLO_CSST, "plo-csst", PLO, 12, "PLO Compare and swap and store (32 bit in general registers)")
+DEF_FEAT(PLO_CSSTG, "plo-csstg", PLO, 13, "PLO Compare and swap and store (64 bit in parameter list)")
+DEF_FEAT(PLO_CSSTGR, "plo-csstgr", PLO, 14, "PLO Compare and swap and store (32 bit in general registers)")
+DEF_FEAT(PLO_CSSTX, "plo-csstx", PLO, 15, "PLO Compare and swap and store (128 bit in parameter list)")
+DEF_FEAT(PLO_CSDST, "plo-csdst", PLO, 16, "PLO Compare and swap and double store (32 bit in general registers)")
+DEF_FEAT(PLO_CSDSTG, "plo-csdstg", PLO, 17, "PLO Compare and swap and double store (64 bit in parameter list)")
+DEF_FEAT(PLO_CSDSTGR, "plo-csdstgr", PLO, 18, "PLO Compare and swap and double store (32 bit in general registers)")
+DEF_FEAT(PLO_CSDSTX, "plo-csdstx", PLO, 19, "PLO Compare and swap and double store (128 bit in parameter list)")
+DEF_FEAT(PLO_CSTST, "plo-cstst", PLO, 20, "PLO Compare and swap and triple store (32 bit in general registers)")
+DEF_FEAT(PLO_CSTSTG, "plo-cststg", PLO, 21, "PLO Compare and swap and triple store (64 bit in parameter list)")
+DEF_FEAT(PLO_CSTSTGR, "plo-cststgr", PLO, 22, "PLO Compare and swap and triple store (32 bit in general registers)")
+DEF_FEAT(PLO_CSTSTX, "plo-cststx", PLO, 23, "PLO Compare and swap and triple store (128 bit in parameter list)")
+
+/* Features exposed via the PTFF instruction. */
+DEF_FEAT(PTFF_QTO, "ptff-qto", PTFF, 1, "PTFF Query TOD Offset")
+DEF_FEAT(PTFF_QSI, "ptff-qsi", PTFF, 2, "PTFF Query Steering Information")
+DEF_FEAT(PTFF_QPT, "ptff-qpc", PTFF, 3, "PTFF Query Physical Clock")
+DEF_FEAT(PTFF_QUI, "ptff-qui", PTFF, 4, "PTFF Query UTC Information")
+DEF_FEAT(PTFF_QTOU, "ptff-qtou", PTFF, 5, "PTFF Query TOD Offset User")
+DEF_FEAT(PTFF_QSIE, "ptff-qsie", PTFF, 10, "PTFF Query Steering Information Extended")
+DEF_FEAT(PTFF_QTOUE, "ptff-qtoue", PTFF, 13, "PTFF Query TOD Offset User Extended")
+DEF_FEAT(PTFF_STO, "ptff-sto", PTFF, 65, "PTFF Set TOD Offset")
+DEF_FEAT(PTFF_STOU, "ptff-stou", PTFF, 69, "PTFF Set TOD Offset User")
+DEF_FEAT(PTFF_STOE, "ptff-stoe", PTFF, 73, "PTFF Set TOD Offset Extended")
+DEF_FEAT(PTFF_STOUE, "ptff-stoue", PTFF, 77, "PTFF Set TOD Offset User Extended")
+
+/* Features exposed via the KMAC instruction. */
+DEF_FEAT(KMAC_DEA, "kmac-dea", KMAC, 1, "KMAC DEA")
+DEF_FEAT(KMAC_TDEA_128, "kmac-tdea-128", KMAC, 2, "KMAC TDEA-128")
+DEF_FEAT(KMAC_TDEA_192, "kmac-tdea-192", KMAC, 3, "KMAC TDEA-192")
+DEF_FEAT(KMAC_EDEA, "kmac-edea", KMAC, 9, "KMAC Encrypted-DEA")
+DEF_FEAT(KMAC_ETDEA_128, "kmac-etdea-128", KMAC, 10, "KMAC Encrypted-TDEA-128")
+DEF_FEAT(KMAC_ETDEA_192, "kmac-etdea-192", KMAC, 11, "KMAC Encrypted-TDEA-192")
+DEF_FEAT(KMAC_AES_128, "kmac-aes-128", KMAC, 18, "KMAC AES-128")
+DEF_FEAT(KMAC_AES_192, "kmac-aes-192", KMAC, 19, "KMAC AES-192")
+DEF_FEAT(KMAC_AES_256, "kmac-aes-256", KMAC, 20, "KMAC AES-256")
+DEF_FEAT(KMAC_EAES_128, "kmac-eaes-128", KMAC, 26, "KMAC Encrypted-AES-128")
+DEF_FEAT(KMAC_EAES_192, "kmac-eaes-192", KMAC, 27, "KMAC Encrypted-AES-192")
+DEF_FEAT(KMAC_EAES_256, "kmac-eaes-256", KMAC, 28, "KMAC Encrypted-AES-256")
+
+/* Features exposed via the KMC instruction. */
+DEF_FEAT(KMC_DEA, "kmc-dea", KMC, 1, "KMC DEA")
+DEF_FEAT(KMC_TDEA_128, "kmc-tdea-128", KMC, 2, "KMC TDEA-128")
+DEF_FEAT(KMC_TDEA_192, "kmc-tdea-192", KMC, 3, "KMC TDEA-192")
+DEF_FEAT(KMC_EDEA, "kmc-edea", KMC, 9, "KMC Encrypted-DEA")
+DEF_FEAT(KMC_ETDEA_128, "kmc-etdea-128", KMC, 10, "KMC Encrypted-TDEA-128")
+DEF_FEAT(KMC_ETDEA_192, "kmc-etdea-192", KMC, 11, "KMC Encrypted-TDEA-192")
+DEF_FEAT(KMC_AES_128, "kmc-aes-128", KMC, 18, "KMC AES-128")
+DEF_FEAT(KMC_AES_192, "kmc-aes-192", KMC, 19, "KMC AES-192")
+DEF_FEAT(KMC_AES_256, "kmc-aes-256", KMC, 20, "KMC AES-256")
+DEF_FEAT(KMC_EAES_128, "kmc-eaes-128", KMC, 26, "KMC Encrypted-AES-128")
+DEF_FEAT(KMC_EAES_192, "kmc-eaes-192", KMC, 27, "KMC Encrypted-AES-192")
+DEF_FEAT(KMC_EAES_256, "kmc-eaes-256", KMC, 28, "KMC Encrypted-AES-256")
+DEF_FEAT(KMC_PRNG, "kmc-prng", KMC, 67, "KMC PRNG")
+
+/* Features exposed via the KM instruction. */
+DEF_FEAT(KM_DEA, "km-dea", KM, 1, "KM DEA")
+DEF_FEAT(KM_TDEA_128, "km-tdea-128", KM, 2, "KM TDEA-128")
+DEF_FEAT(KM_TDEA_192, "km-tdea-192", KM, 3, "KM TDEA-192")
+DEF_FEAT(KM_EDEA, "km-edea", KM, 9, "KM Encrypted-DEA")
+DEF_FEAT(KM_ETDEA_128, "km-etdea-128", KM, 10, "KM Encrypted-TDEA-128")
+DEF_FEAT(KM_ETDEA_192, "km-etdea-192", KM, 11, "KM Encrypted-TDEA-192")
+DEF_FEAT(KM_AES_128, "km-aes-128", KM, 18, "KM AES-128")
+DEF_FEAT(KM_AES_192, "km-aes-192", KM, 19, "KM AES-192")
+DEF_FEAT(KM_AES_256, "km-aes-256", KM, 20, "KM AES-256")
+DEF_FEAT(KM_EAES_128, "km-eaes-128", KM, 26, "KM Encrypted-AES-128")
+DEF_FEAT(KM_EAES_192, "km-eaes-192", KM, 27, "KM Encrypted-AES-192")
+DEF_FEAT(KM_EAES_256, "km-eaes-256", KM, 28, "KM Encrypted-AES-256")
+DEF_FEAT(KM_XTS_AES_128, "km-xts-aes-128", KM, 50, "KM XTS-AES-128")
+DEF_FEAT(KM_XTS_AES_256, "km-xts-aes-256", KM, 52, "KM XTS-AES-256")
+DEF_FEAT(KM_XTS_EAES_128, "km-xts-eaes-128", KM, 58, "KM XTS-Encrypted-AES-128")
+DEF_FEAT(KM_XTS_EAES_256, "km-xts-eaes-256", KM, 60, "KM XTS-Encrypted-AES-256")
+
+/* Features exposed via the KIMD instruction. */
+DEF_FEAT(KIMD_SHA_1, "kimd-sha-1", KIMD, 1, "KIMD SHA-1")
+DEF_FEAT(KIMD_SHA_256, "kimd-sha-256", KIMD, 2, "KIMD SHA-256")
+DEF_FEAT(KIMD_SHA_512, "kimd-sha-512", KIMD, 3, "KIMD SHA-512")
+DEF_FEAT(KIMD_SHA3_224, "kimd-sha3-224", KIMD, 32, "KIMD SHA3-224")
+DEF_FEAT(KIMD_SHA3_256, "kimd-sha3-256", KIMD, 33, "KIMD SHA3-256")
+DEF_FEAT(KIMD_SHA3_384, "kimd-sha3-384", KIMD, 34, "KIMD SHA3-384")
+DEF_FEAT(KIMD_SHA3_512, "kimd-sha3-512", KIMD, 35, "KIMD SHA3-512")
+DEF_FEAT(KIMD_SHAKE_128, "kimd-shake-128", KIMD, 36, "KIMD SHAKE-128")
+DEF_FEAT(KIMD_SHAKE_256, "kimd-shake-256", KIMD, 37, "KIMD SHAKE-256")
+DEF_FEAT(KIMD_GHASH, "kimd-ghash", KIMD, 65, "KIMD GHASH")
+
+/* Features exposed via the KLMD instruction. */
+DEF_FEAT(KLMD_SHA_1, "klmd-sha-1", KLMD, 1, "KLMD SHA-1")
+DEF_FEAT(KLMD_SHA_256, "klmd-sha-256", KLMD, 2, "KLMD SHA-256")
+DEF_FEAT(KLMD_SHA_512, "klmd-sha-512", KLMD, 3, "KLMD SHA-512")
+DEF_FEAT(KLMD_SHA3_224, "klmd-sha3-224", KLMD, 32, "KLMD SHA3-224")
+DEF_FEAT(KLMD_SHA3_256, "klmd-sha3-256", KLMD, 33, "KLMD SHA3-256")
+DEF_FEAT(KLMD_SHA3_384, "klmd-sha3-384", KLMD, 34, "KLMD SHA3-384")
+DEF_FEAT(KLMD_SHA3_512, "klmd-sha3-512", KLMD, 35, "KLMD SHA3-512")
+DEF_FEAT(KLMD_SHAKE_128, "klmd-shake-128", KLMD, 36, "KLMD SHAKE-128")
+DEF_FEAT(KLMD_SHAKE_256, "klmd-shake-256", KLMD, 37, "KLMD SHAKE-256")
+
+/* Features exposed via the PCKMO instruction. */
+DEF_FEAT(PCKMO_EDEA, "pckmo-edea", PCKMO, 1, "PCKMO Encrypted-DEA-Key")
+DEF_FEAT(PCKMO_ETDEA_128, "pckmo-etdea-128", PCKMO, 2, "PCKMO Encrypted-TDEA-128-Key")
+DEF_FEAT(PCKMO_ETDEA_256, "pckmo-etdea-192", PCKMO, 3, "PCKMO Encrypted-TDEA-192-Key")
+DEF_FEAT(PCKMO_AES_128, "pckmo-aes-128", PCKMO, 18, "PCKMO Encrypted-AES-128-Key")
+DEF_FEAT(PCKMO_AES_192, "pckmo-aes-192", PCKMO, 19, "PCKMO Encrypted-AES-192-Key")
+DEF_FEAT(PCKMO_AES_256, "pckmo-aes-256", PCKMO, 20, "PCKMO Encrypted-AES-256-Key")
+DEF_FEAT(PCKMO_ECC_P256, "pckmo-ecc-p256", PCKMO, 32, "PCKMO Encrypt-ECC-P256-Key")
+DEF_FEAT(PCKMO_ECC_P384, "pckmo-ecc-p384", PCKMO, 33, "PCKMO Encrypt-ECC-P384-Key")
+DEF_FEAT(PCKMO_ECC_P521, "pckmo-ecc-p521", PCKMO, 34, "PCKMO Encrypt-ECC-P521-Key")
+DEF_FEAT(PCKMO_ECC_ED25519, "pckmo-ecc-ed25519", PCKMO, 40 , "PCKMO Encrypt-ECC-Ed25519-Key")
+DEF_FEAT(PCKMO_ECC_ED448, "pckmo-ecc-ed448", PCKMO, 41 , "PCKMO Encrypt-ECC-Ed448-Key")
+
+/* Features exposed via the KMCTR instruction. */
+DEF_FEAT(KMCTR_DEA, "kmctr-dea", KMCTR, 1, "KMCTR DEA")
+DEF_FEAT(KMCTR_TDEA_128, "kmctr-tdea-128", KMCTR, 2, "KMCTR TDEA-128")
+DEF_FEAT(KMCTR_TDEA_192, "kmctr-tdea-192", KMCTR, 3, "KMCTR TDEA-192")
+DEF_FEAT(KMCTR_EDEA, "kmctr-edea", KMCTR, 9, "KMCTR Encrypted-DEA")
+DEF_FEAT(KMCTR_ETDEA_128, "kmctr-etdea-128", KMCTR, 10, "KMCTR Encrypted-TDEA-128")
+DEF_FEAT(KMCTR_ETDEA_192, "kmctr-etdea-192", KMCTR, 11, "KMCTR Encrypted-TDEA-192")
+DEF_FEAT(KMCTR_AES_128, "kmctr-aes-128", KMCTR, 18, "KMCTR AES-128")
+DEF_FEAT(KMCTR_AES_192, "kmctr-aes-192", KMCTR, 19, "KMCTR AES-192")
+DEF_FEAT(KMCTR_AES_256, "kmctr-aes-256", KMCTR, 20, "KMCTR AES-256")
+DEF_FEAT(KMCTR_EAES_128, "kmctr-eaes-128", KMCTR, 26, "KMCTR Encrypted-AES-128")
+DEF_FEAT(KMCTR_EAES_192, "kmctr-eaes-192", KMCTR, 27, "KMCTR Encrypted-AES-192")
+DEF_FEAT(KMCTR_EAES_256, "kmctr-eaes-256", KMCTR, 28, "KMCTR Encrypted-AES-256")
+
+/* Features exposed via the KMF instruction. */
+DEF_FEAT(KMF_DEA, "kmf-dea", KMF, 1, "KMF DEA")
+DEF_FEAT(KMF_TDEA_128, "kmf-tdea-128", KMF, 2, "KMF TDEA-128")
+DEF_FEAT(KMF_TDEA_192, "kmf-tdea-192", KMF, 3, "KMF TDEA-192")
+DEF_FEAT(KMF_EDEA, "kmf-edea", KMF, 9, "KMF Encrypted-DEA")
+DEF_FEAT(KMF_ETDEA_128, "kmf-etdea-128", KMF, 10, "KMF Encrypted-TDEA-128")
+DEF_FEAT(KMF_ETDEA_192, "kmf-etdea-192", KMF, 11, "KMF Encrypted-TDEA-192")
+DEF_FEAT(KMF_AES_128, "kmf-aes-128", KMF, 18, "KMF AES-128")
+DEF_FEAT(KMF_AES_192, "kmf-aes-192", KMF, 19, "KMF AES-192")
+DEF_FEAT(KMF_AES_256, "kmf-aes-256", KMF, 20, "KMF AES-256")
+DEF_FEAT(KMF_EAES_128, "kmf-eaes-128", KMF, 26, "KMF Encrypted-AES-128")
+DEF_FEAT(KMF_EAES_192, "kmf-eaes-192", KMF, 27, "KMF Encrypted-AES-192")
+DEF_FEAT(KMF_EAES_256, "kmf-eaes-256", KMF, 28, "KMF Encrypted-AES-256")
+
+/* Features exposed via the KMO instruction. */
+DEF_FEAT(KMO_DEA, "kmo-dea", KMO, 1, "KMO DEA")
+DEF_FEAT(KMO_TDEA_128, "kmo-tdea-128", KMO, 2, "KMO TDEA-128")
+DEF_FEAT(KMO_TDEA_192, "kmo-tdea-192", KMO, 3, "KMO TDEA-192")
+DEF_FEAT(KMO_EDEA, "kmo-edea", KMO, 9, "KMO Encrypted-DEA")
+DEF_FEAT(KMO_ETDEA_128, "kmo-etdea-128", KMO, 10, "KMO Encrypted-TDEA-128")
+DEF_FEAT(KMO_ETDEA_192, "kmo-etdea-192", KMO, 11, "KMO Encrypted-TDEA-192")
+DEF_FEAT(KMO_AES_128, "kmo-aes-128", KMO, 18, "KMO AES-128")
+DEF_FEAT(KMO_AES_192, "kmo-aes-192", KMO, 19, "KMO AES-192")
+DEF_FEAT(KMO_AES_256, "kmo-aes-256", KMO, 20, "KMO AES-256")
+DEF_FEAT(KMO_EAES_128, "kmo-eaes-128", KMO, 26, "KMO Encrypted-AES-128")
+DEF_FEAT(KMO_EAES_192, "kmo-eaes-192", KMO, 27, "KMO Encrypted-AES-192")
+DEF_FEAT(KMO_EAES_256, "kmo-eaes-256", KMO, 28, "KMO Encrypted-AES-256")
+
+/* Features exposed via the PCC instruction. */
+DEF_FEAT(PCC_CMAC_DEA, "pcc-cmac-dea", PCC, 1, "PCC Compute-Last-Block-CMAC-Using-DEA")
+DEF_FEAT(PCC_CMAC_TDEA_128, "pcc-cmac-tdea-128", PCC, 2, "PCC Compute-Last-Block-CMAC-Using-TDEA-128")
+DEF_FEAT(PCC_CMAC_TDEA_192, "pcc-cmac-tdea-192", PCC, 3, "PCC Compute-Last-Block-CMAC-Using-TDEA-192")
+DEF_FEAT(PCC_CMAC_ETDEA_128, "pcc-cmac-edea", PCC, 9, "PCC Compute-Last-Block-CMAC-Using-Encrypted-DEA")
+DEF_FEAT(PCC_CMAC_ETDEA_192, "pcc-cmac-etdea-128", PCC, 10, "PCC Compute-Last-Block-CMAC-Using-Encrypted-TDEA-128")
+DEF_FEAT(PCC_CMAC_TDEA, "pcc-cmac-etdea-192", PCC, 11, "PCC Compute-Last-Block-CMAC-Using-EncryptedTDEA-192")
+DEF_FEAT(PCC_CMAC_AES_128, "pcc-cmac-aes-128", PCC, 18, "PCC Compute-Last-Block-CMAC-Using-AES-128")
+DEF_FEAT(PCC_CMAC_AES_192, "pcc-cmac-aes-192", PCC, 19, "PCC Compute-Last-Block-CMAC-Using-AES-192")
+DEF_FEAT(PCC_CMAC_AES_256, "pcc-cmac-eaes-256", PCC, 20, "PCC Compute-Last-Block-CMAC-Using-AES-256")
+DEF_FEAT(PCC_CMAC_EAES_128, "pcc-cmac-eaes-128", PCC, 26, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-128")
+DEF_FEAT(PCC_CMAC_EAES_192, "pcc-cmac-eaes-192", PCC, 27, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-192")
+DEF_FEAT(PCC_CMAC_EAES_256, "pcc-cmac-eaes-256", PCC, 28, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-256")
+DEF_FEAT(PCC_XTS_AES_128, "pcc-xts-aes-128", PCC, 50, "PCC Compute-XTS-Parameter-Using-AES-128")
+DEF_FEAT(PCC_XTS_AES_256, "pcc-xts-aes-256", PCC, 52, "PCC Compute-XTS-Parameter-Using-AES-256")
+DEF_FEAT(PCC_XTS_EAES_128, "pcc-xts-eaes-128", PCC, 58, "PCC Compute-XTS-Parameter-Using-Encrypted-AES-128")
+DEF_FEAT(PCC_XTS_EAES_256, "pcc-xts-eaes-256", PCC, 60, "PCC Compute-XTS-Parameter-Using-Encrypted-AES-256")
+DEF_FEAT(PCC_SCALAR_MULT_P256, "pcc-scalar-mult-p256", PCC, 64, "PCC Scalar-Multiply-P256")
+DEF_FEAT(PCC_SCALAR_MULT_P384, "pcc-scalar-mult-p384", PCC, 65, "PCC Scalar-Multiply-P384")
+DEF_FEAT(PCC_SCALAR_MULT_P512, "pcc-scalar-mult-p521", PCC, 66, "PCC Scalar-Multiply-P521")
+DEF_FEAT(PCC_SCALAR_MULT_ED25519, "pcc-scalar-mult-ed25519", PCC, 72, "PCC Scalar-Multiply-Ed25519")
+DEF_FEAT(PCC_SCALAR_MULT_ED448, "pcc-scalar-mult-ed448", PCC, 73, "PCC Scalar-Multiply-Ed448")
+DEF_FEAT(PCC_SCALAR_MULT_X25519, "pcc-scalar-mult-x25519", PCC, 80, "PCC Scalar-Multiply-X25519")
+DEF_FEAT(PCC_SCALAR_MULT_X448, "pcc-scalar-mult-x448", PCC, 81, "PCC Scalar-Multiply-X448")
+
+/* Features exposed via the PPNO/PRNO instruction. */
+DEF_FEAT(PPNO_SHA_512_DRNG, "ppno-sha-512-drng", PPNO, 3, "PPNO SHA-512-DRNG")
+DEF_FEAT(PRNO_TRNG_QRTCR, "prno-trng-qrtcr", PPNO, 112, "PRNO TRNG-Query-Raw-to-Conditioned-Ratio")
+DEF_FEAT(PRNO_TRNG, "prno-trng", PPNO, 114, "PRNO TRNG")
+
+/* Features exposed via the KMA instruction. */
+DEF_FEAT(KMA_GCM_AES_128, "kma-gcm-aes-128", KMA, 18, "KMA GCM-AES-128")
+DEF_FEAT(KMA_GCM_AES_192, "kma-gcm-aes-192", KMA, 19, "KMA GCM-AES-192")
+DEF_FEAT(KMA_GCM_AES_256, "kma-gcm-aes-256", KMA, 20, "KMA GCM-AES-256")
+DEF_FEAT(KMA_GCM_EAES_128, "kma-gcm-eaes-128", KMA, 26, "KMA GCM-Encrypted-AES-128")
+DEF_FEAT(KMA_GCM_EAES_192, "kma-gcm-eaes-192", KMA, 27, "KMA GCM-Encrypted-AES-192")
+DEF_FEAT(KMA_GCM_EAES_256, "kma-gcm-eaes-256", KMA, 28, "KMA GCM-Encrypted-AES-256")
+
+/* Features exposed via the KDSA instruction. */
+DEF_FEAT(KDSA_ECDSA_VERIFY_P256, "kdsa-ecdsa-verify-p256", KDSA, 1, "KDSA ECDSA-Verify-P256")
+DEF_FEAT(KDSA_ECDSA_VERIFY_P384, "kdsa-ecdsa-verify-p384", KDSA, 2, "KDSA ECDSA-Verify-P384")
+DEF_FEAT(KDSA_ECDSA_VERIFY_P512, "kdsa-ecdsa-verify-p521", KDSA, 3, "KDSA ECDSA-Verify-P521")
+DEF_FEAT(KDSA_ECDSA_SIGN_P256, "kdsa-ecdsa-sign-p256", KDSA, 9, "KDSA ECDSA-Sign-P256")
+DEF_FEAT(KDSA_ECDSA_SIGN_P384, "kdsa-ecdsa-sign-p384", KDSA, 10, "KDSA ECDSA-Sign-P384")
+DEF_FEAT(KDSA_ECDSA_SIGN_P512, "kdsa-ecdsa-sign-p521", KDSA, 11, "KDSA ECDSA-Sign-P521")
+DEF_FEAT(KDSA_EECDSA_SIGN_P256, "kdsa-eecdsa-sign-p256", KDSA, 17, "KDSA Encrypted-ECDSA-Sign-P256")
+DEF_FEAT(KDSA_EECDSA_SIGN_P384, "kdsa-eecdsa-sign-p384", KDSA, 18, "KDSA Encrypted-ECDSA-Sign-P384")
+DEF_FEAT(KDSA_EECDSA_SIGN_P512, "kdsa-eecdsa-sign-p521", KDSA, 19, "KDSA Encrypted-ECDSA-Sign-P521")
+DEF_FEAT(KDSA_EDDSA_VERIFY_ED25519, "kdsa-eddsa-verify-ed25519", KDSA, 32, "KDSA EdDSA-Verify-Ed25519")
+DEF_FEAT(KDSA_EDDSA_VERIFY_ED448, "kdsa-eddsa-verify-ed448", KDSA, 36, "KDSA EdDSA-Verify-Ed448")
+DEF_FEAT(KDSA_EDDSA_SIGN_ED25519, "kdsa-eddsa-sign-ed25519", KDSA, 40, "KDSA EdDSA-Sign-Ed25519")
+DEF_FEAT(KDSA_EDDSA_SIGN_ED448, "kdsa-eddsa-sign-ed448", KDSA, 44, "KDSA EdDSA-Sign-Ed448")
+DEF_FEAT(KDSA_EEDDSA_SIGN_ED25519, "kdsa-eeddsa-sign-ed25519", KDSA, 48, "KDSA Encrypted-EdDSA-Sign-Ed25519")
+DEF_FEAT(KDSA_EEDDSA_SIGN_ED448, "kdsa-eeddsa-sign-ed448", KDSA, 52, "KDSA Encrypted-EdDSA-Sign-Ed448")
+
+/* Features exposed via the SORTL instruction. */
+DEF_FEAT(SORTL_SFLR, "sortl-sflr", SORTL, 1, "SORTL SFLR")
+DEF_FEAT(SORTL_SVLR, "sortl-svlr", SORTL, 2, "SORTL SVLR")
+DEF_FEAT(SORTL_32, "sortl-32", SORTL, 130, "SORTL 32 input lists")
+DEF_FEAT(SORTL_128, "sortl-128", SORTL, 132, "SORTL 128 input lists")
+DEF_FEAT(SORTL_F0, "sortl-f0", SORTL, 192, "SORTL format 0 parameter-block")
+
+/* Features exposed via the DEFLATE instruction. */
+DEF_FEAT(DEFLATE_GHDT, "dfltcc-gdht", DFLTCC, 1, "DFLTCC GDHT")
+DEF_FEAT(DEFLATE_CMPR, "dfltcc-cmpr", DFLTCC, 2, "DFLTCC CMPR")
+DEF_FEAT(DEFLATE_XPND, "dfltcc-xpnd", DFLTCC, 4, "DFLTCC XPND")
+DEF_FEAT(DEFLATE_F0, "dfltcc-f0", DFLTCC, 192, "DFLTCC format 0 parameter-block")
diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c
index dc320a06c2..af06be3e3b 100644
--- a/target/s390x/gen-features.c
+++ b/target/s390x/gen-features.c
@@ -216,21 +216,21 @@
#define S390_FEAT_GROUP_MSA_EXT_9 \
S390_FEAT_MSA_EXT_9, \
- S390_FEAT_ECDSA_VERIFY_P256, \
- S390_FEAT_ECDSA_VERIFY_P384, \
- S390_FEAT_ECDSA_VERIFY_P512, \
- S390_FEAT_ECDSA_SIGN_P256, \
- S390_FEAT_ECDSA_SIGN_P384, \
- S390_FEAT_ECDSA_SIGN_P512, \
- S390_FEAT_EECDSA_SIGN_P256, \
- S390_FEAT_EECDSA_SIGN_P384, \
- S390_FEAT_EECDSA_SIGN_P512, \
- S390_FEAT_EDDSA_VERIFY_ED25519, \
- S390_FEAT_EDDSA_VERIFY_ED448, \
- S390_FEAT_EDDSA_SIGN_ED25519, \
- S390_FEAT_EDDSA_SIGN_ED448, \
- S390_FEAT_EEDDSA_SIGN_ED25519, \
- S390_FEAT_EEDDSA_SIGN_ED448, \
+ S390_FEAT_KDSA_ECDSA_VERIFY_P256, \
+ S390_FEAT_KDSA_ECDSA_VERIFY_P384, \
+ S390_FEAT_KDSA_ECDSA_VERIFY_P512, \
+ S390_FEAT_KDSA_ECDSA_SIGN_P256, \
+ S390_FEAT_KDSA_ECDSA_SIGN_P384, \
+ S390_FEAT_KDSA_ECDSA_SIGN_P512, \
+ S390_FEAT_KDSA_EECDSA_SIGN_P256, \
+ S390_FEAT_KDSA_EECDSA_SIGN_P384, \
+ S390_FEAT_KDSA_EECDSA_SIGN_P512, \
+ S390_FEAT_KDSA_EDDSA_VERIFY_ED25519, \
+ S390_FEAT_KDSA_EDDSA_VERIFY_ED448, \
+ S390_FEAT_KDSA_EDDSA_SIGN_ED25519, \
+ S390_FEAT_KDSA_EDDSA_SIGN_ED448, \
+ S390_FEAT_KDSA_EEDDSA_SIGN_ED25519, \
+ S390_FEAT_KDSA_EEDDSA_SIGN_ED448, \
S390_FEAT_PCC_SCALAR_MULT_P256, \
S390_FEAT_PCC_SCALAR_MULT_P384, \
S390_FEAT_PCC_SCALAR_MULT_P512, \