aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/arm/cortex-regs.c11
-rw-r--r--target/arm/cpu.c9
-rw-r--r--target/arm/cpu.h4
-rw-r--r--target/arm/debug_helper.c11
-rw-r--r--target/arm/kvm.c35
-rw-r--r--target/arm/kvm64.c5
-rw-r--r--target/arm/kvm_arm.h19
-rw-r--r--target/arm/tcg/a64.decode152
-rw-r--r--target/arm/tcg/meson.build1
-rw-r--r--target/arm/tcg/sve_helper.c6
-rw-r--r--target/arm/tcg/translate-a64.c1285
-rw-r--r--target/arm/tcg/translate.h5
12 files changed, 802 insertions, 741 deletions
diff --git a/target/arm/cortex-regs.c b/target/arm/cortex-regs.c
index 17708480e7..ae817b08dd 100644
--- a/target/arm/cortex-regs.c
+++ b/target/arm/cortex-regs.c
@@ -15,8 +15,15 @@ static uint64_t l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = env_archcpu(env);
- /* Number of cores is in [25:24]; otherwise we RAZ */
- return (cpu->core_count - 1) << 24;
+ /*
+ * Number of cores is in [25:24]; otherwise we RAZ.
+ * If the board didn't configure the CPUs into clusters,
+ * we default to "all CPUs in one cluster", which might be
+ * more than the 4 that the hardware permits and which is
+ * all you can report in this two-bit field. Saturate to
+ * 0b11 (== 4 CPUs) rather than overflowing the field.
+ */
+ return MIN(cpu->core_count - 1, 3) << 24;
}
static const ARMCPRegInfo cortex_a72_a57_a53_cp_reginfo[] = {
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 5182ed0c91..f6a88e52ac 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -1480,6 +1480,7 @@ void arm_cpu_post_init(Object *obj)
qdev_prop_allow_set_link_before_realize,
OBJ_PROP_LINK_STRONG);
}
+ cpu->has_mte = true;
}
#endif
}
@@ -1616,7 +1617,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
}
if (cpu->tag_memory) {
error_setg(errp,
- "Cannot enable %s when guest CPUs has MTE enabled",
+ "Cannot enable %s when guest CPUs has tag memory enabled",
current_accel_name());
return;
}
@@ -1996,10 +1997,10 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
}
#ifndef CONFIG_USER_ONLY
- if (cpu->tag_memory == NULL && cpu_isar_feature(aa64_mte, cpu)) {
+ if (!cpu->has_mte && cpu_isar_feature(aa64_mte, cpu)) {
/*
- * Disable the MTE feature bits if we do not have tag-memory
- * provided by the machine.
+ * Disable the MTE feature bits if we do not have the feature
+ * setup by the machine.
*/
cpu->isar.id_aa64pfr1 =
FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index d469a2637b..c3463e39bc 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -935,6 +935,9 @@ struct ArchCPU {
*/
uint32_t psci_conduit;
+ /* CPU has Memory Tag Extension */
+ bool has_mte;
+
/* For v8M, initial value of the Secure VTOR */
uint32_t init_svtor;
/* For v8M, initial value of the Non-secure VTOR */
@@ -1053,6 +1056,7 @@ struct ArchCPU {
bool prop_pauth;
bool prop_pauth_impdef;
bool prop_lpa2;
+ OnOffAuto prop_mte;
/* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
uint32_t dcz_blocksize;
diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c
index dfc8b2a1a5..d41cc643b1 100644
--- a/target/arm/debug_helper.c
+++ b/target/arm/debug_helper.c
@@ -949,8 +949,10 @@ static const ARMCPRegInfo debug_cp_reginfo[] = {
.access = PL0_R, .accessfn = access_tdcc,
.type = ARM_CP_CONST, .resetvalue = 0 },
/*
- * OSDTRRX_EL1/OSDTRTX_EL1 are used for save and restore of DBGDTRRX_EL0.
- * It is a component of the Debug Communications Channel, which is not implemented.
+ * These registers belong to the Debug Communications Channel,
+ * which is not implemented. However we implement RAZ/WI behaviour
+ * with trapping to prevent spurious SIGILLs if the guest OS does
+ * access them as the support cannot be probed for.
*/
{ .name = "OSDTRRX_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14,
.opc0 = 2, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 2,
@@ -960,6 +962,11 @@ static const ARMCPRegInfo debug_cp_reginfo[] = {
.opc0 = 2, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
.access = PL1_RW, .accessfn = access_tdcc,
.type = ARM_CP_CONST, .resetvalue = 0 },
+ /* DBGDTRTX_EL0/DBGDTRRX_EL0 depend on direction */
+ { .name = "DBGDTR_EL0", .state = ARM_CP_STATE_BOTH, .cp = 14,
+ .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 5, .opc2 = 0,
+ .access = PL0_RW, .accessfn = access_tdcc,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
/*
* OSECCR_EL1 provides a mechanism for an operating system
* to access the contents of EDECCR. EDECCR is not implemented though,
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index 84da49332c..9553488ecd 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -31,6 +31,7 @@
#include "hw/boards.h"
#include "hw/irq.h"
#include "qemu/log.h"
+#include "migration/blocker.h"
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
@@ -1064,3 +1065,37 @@ bool kvm_arch_cpu_check_are_resettable(void)
void kvm_arch_accel_class_init(ObjectClass *oc)
{
}
+
+void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
+{
+ static bool tried_to_enable;
+ static bool succeeded_to_enable;
+ Error *mte_migration_blocker = NULL;
+ int ret;
+
+ if (!tried_to_enable) {
+ /*
+ * MTE on KVM is enabled on a per-VM basis (and retrying doesn't make
+ * sense), and we only want a single migration blocker as well.
+ */
+ tried_to_enable = true;
+
+ ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_MTE, 0);
+ if (ret) {
+ error_setg_errno(errp, -ret, "Failed to enable KVM_CAP_ARM_MTE");
+ return;
+ }
+
+ /* TODO: add proper migration support with MTE enabled */
+ error_setg(&mte_migration_blocker,
+ "Live migration disabled due to MTE enabled");
+ if (migrate_add_blocker(mte_migration_blocker, errp)) {
+ error_free(mte_migration_blocker);
+ return;
+ }
+ succeeded_to_enable = true;
+ }
+ if (succeeded_to_enable) {
+ object_property_set_bool(cpuobj, "has_mte", true, NULL);
+ }
+}
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index 810db33ccb..1893f38793 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -756,6 +756,11 @@ bool kvm_arm_steal_time_supported(void)
return kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME);
}
+bool kvm_arm_mte_supported(void)
+{
+ return kvm_check_extension(kvm_state, KVM_CAP_ARM_MTE);
+}
+
QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
uint32_t kvm_arm_sve_get_vls(CPUState *cs)
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
index 330fbe5c72..2083547bf6 100644
--- a/target/arm/kvm_arm.h
+++ b/target/arm/kvm_arm.h
@@ -314,6 +314,13 @@ bool kvm_arm_pmu_supported(void);
bool kvm_arm_sve_supported(void);
/**
+ * kvm_arm_mte_supported:
+ *
+ * Returns: true if KVM can enable MTE, and false otherwise.
+ */
+bool kvm_arm_mte_supported(void);
+
+/**
* kvm_arm_get_max_vm_ipa_size:
* @ms: Machine state handle
* @fixed_ipa: True when the IPA limit is fixed at 40. This is the case
@@ -377,6 +384,8 @@ void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa);
int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level);
+void kvm_arm_enable_mte(Object *cpuobj, Error **errp);
+
#else
/*
@@ -403,6 +412,11 @@ static inline bool kvm_arm_steal_time_supported(void)
return false;
}
+static inline bool kvm_arm_mte_supported(void)
+{
+ return false;
+}
+
/*
* These functions should never actually be called without KVM support.
*/
@@ -451,6 +465,11 @@ static inline uint32_t kvm_arm_sve_get_vls(CPUState *cs)
g_assert_not_reached();
}
+static inline void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
+{
+ g_assert_not_reached();
+}
+
#endif
static inline const char *gic_class_name(void)
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
new file mode 100644
index 0000000000..12a310d0a3
--- /dev/null
+++ b/target/arm/tcg/a64.decode
@@ -0,0 +1,152 @@
+# AArch64 A64 allowed instruction decoding
+#
+# Copyright (c) 2023 Linaro, Ltd
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+#
+# This file is processed by scripts/decodetree.py
+#
+
+&r rn
+&ri rd imm
+&rri_sf rd rn imm sf
+&i imm
+
+
+### Data Processing - Immediate
+
+# PC-rel addressing
+
+%imm_pcrel 5:s19 29:2
+@pcrel . .. ..... ................... rd:5 &ri imm=%imm_pcrel
+
+ADR 0 .. 10000 ................... ..... @pcrel
+ADRP 1 .. 10000 ................... ..... @pcrel
+
+# Add/subtract (immediate)
+
+%imm12_sh12 10:12 !function=shl_12
+@addsub_imm sf:1 .. ...... . imm:12 rn:5 rd:5
+@addsub_imm12 sf:1 .. ...... . ............ rn:5 rd:5 imm=%imm12_sh12
+
+ADD_i . 00 100010 0 ............ ..... ..... @addsub_imm
+ADD_i . 00 100010 1 ............ ..... ..... @addsub_imm12
+ADDS_i . 01 100010 0 ............ ..... ..... @addsub_imm
+ADDS_i . 01 100010 1 ............ ..... ..... @addsub_imm12
+
+SUB_i . 10 100010 0 ............ ..... ..... @addsub_imm
+SUB_i . 10 100010 1 ............ ..... ..... @addsub_imm12
+SUBS_i . 11 100010 0 ............ ..... ..... @addsub_imm
+SUBS_i . 11 100010 1 ............ ..... ..... @addsub_imm12
+
+# Add/subtract (immediate with tags)
+
+&rri_tag rd rn uimm6 uimm4
+@addsub_imm_tag . .. ...... . uimm6:6 .. uimm4:4 rn:5 rd:5 &rri_tag
+
+ADDG_i 1 00 100011 0 ...... 00 .... ..... ..... @addsub_imm_tag
+SUBG_i 1 10 100011 0 ...... 00 .... ..... ..... @addsub_imm_tag
+
+# Logical (immediate)
+
+&rri_log rd rn sf dbm
+@logic_imm_64 1 .. ...... dbm:13 rn:5 rd:5 &rri_log sf=1
+@logic_imm_32 0 .. ...... 0 dbm:12 rn:5 rd:5 &rri_log sf=0
+
+AND_i . 00 100100 . ...... ...... ..... ..... @logic_imm_64
+AND_i . 00 100100 . ...... ...... ..... ..... @logic_imm_32
+ORR_i . 01 100100 . ...... ...... ..... ..... @logic_imm_64
+ORR_i . 01 100100 . ...... ...... ..... ..... @logic_imm_32
+EOR_i . 10 100100 . ...... ...... ..... ..... @logic_imm_64
+EOR_i . 10 100100 . ...... ...... ..... ..... @logic_imm_32
+ANDS_i . 11 100100 . ...... ...... ..... ..... @logic_imm_64
+ANDS_i . 11 100100 . ...... ...... ..... ..... @logic_imm_32
+
+# Move wide (immediate)
+
+&movw rd sf imm hw
+@movw_64 1 .. ...... hw:2 imm:16 rd:5 &movw sf=1
+@movw_32 0 .. ...... 0 hw:1 imm:16 rd:5 &movw sf=0
+
+MOVN . 00 100101 .. ................ ..... @movw_64
+MOVN . 00 100101 .. ................ ..... @movw_32
+MOVZ . 10 100101 .. ................ ..... @movw_64
+MOVZ . 10 100101 .. ................ ..... @movw_32
+MOVK . 11 100101 .. ................ ..... @movw_64
+MOVK . 11 100101 .. ................ ..... @movw_32
+
+# Bitfield
+
+&bitfield rd rn sf immr imms
+@bitfield_64 1 .. ...... 1 immr:6 imms:6 rn:5 rd:5 &bitfield sf=1
+@bitfield_32 0 .. ...... 0 0 immr:5 0 imms:5 rn:5 rd:5 &bitfield sf=0
+
+SBFM . 00 100110 . ...... ...... ..... ..... @bitfield_64
+SBFM . 00 100110 . ...... ...... ..... ..... @bitfield_32
+BFM . 01 100110 . ...... ...... ..... ..... @bitfield_64
+BFM . 01 100110 . ...... ...... ..... ..... @bitfield_32
+UBFM . 10 100110 . ...... ...... ..... ..... @bitfield_64
+UBFM . 10 100110 . ...... ...... ..... ..... @bitfield_32
+
+# Extract
+
+&extract rd rn rm imm sf
+
+EXTR 1 00 100111 1 0 rm:5 imm:6 rn:5 rd:5 &extract sf=1
+EXTR 0 00 100111 0 0 rm:5 0 imm:5 rn:5 rd:5 &extract sf=0
+
+# Branches
+
+%imm26 0:s26 !function=times_4
+@branch . ..... .......................... &i imm=%imm26
+
+B 0 00101 .......................... @branch
+BL 1 00101 .......................... @branch
+
+%imm19 5:s19 !function=times_4
+&cbz rt imm sf nz
+
+CBZ sf:1 011010 nz:1 ................... rt:5 &cbz imm=%imm19
+
+%imm14 5:s14 !function=times_4
+%imm31_19 31:1 19:5
+&tbz rt imm nz bitpos
+
+TBZ . 011011 nz:1 ..... .............. rt:5 &tbz imm=%imm14 bitpos=%imm31_19
+
+B_cond 0101010 0 ................... 0 cond:4 imm=%imm19
+
+BR 1101011 0000 11111 000000 rn:5 00000 &r
+BLR 1101011 0001 11111 000000 rn:5 00000 &r
+RET 1101011 0010 11111 000000 rn:5 00000 &r
+
+&braz rn m
+BRAZ 1101011 0000 11111 00001 m:1 rn:5 11111 &braz # BRAAZ, BRABZ
+BLRAZ 1101011 0001 11111 00001 m:1 rn:5 11111 &braz # BLRAAZ, BLRABZ
+
+&reta m
+RETA 1101011 0010 11111 00001 m:1 11111 11111 &reta # RETAA, RETAB
+
+&bra rn rm m
+BRA 1101011 1000 11111 00001 m:1 rn:5 rm:5 &bra # BRAA, BRAB
+BLRA 1101011 1001 11111 00001 m:1 rn:5 rm:5 &bra # BLRAA, BLRAB
+
+ERET 1101011 0100 11111 000000 11111 00000
+ERETA 1101011 0100 11111 00001 m:1 11111 11111 &reta # ERETAA, ERETAB
+
+# We don't need to decode DRPS because it always UNDEFs except when
+# the processor is in halting debug state (which we don't implement).
+# The pattern is listed here as documentation.
+# DRPS 1101011 0101 11111 000000 11111 00000
diff --git a/target/arm/tcg/meson.build b/target/arm/tcg/meson.build
index 4d99f6dacb..130ed62fcd 100644
--- a/target/arm/tcg/meson.build
+++ b/target/arm/tcg/meson.build
@@ -13,6 +13,7 @@ gen = [
decodetree.process('a32-uncond.decode', extra_args: '--static-decode=disas_a32_uncond'),
decodetree.process('t32.decode', extra_args: '--static-decode=disas_t32'),
decodetree.process('t16.decode', extra_args: ['-w', '16', '--static-decode=disas_t16']),
+ decodetree.process('a64.decode', extra_args: ['--static-decode=disas_a64']),
]
arm_ss.add(gen)
diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c
index ccf5e5beca..0097522470 100644
--- a/target/arm/tcg/sve_helper.c
+++ b/target/arm/tcg/sve_helper.c
@@ -6727,6 +6727,7 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
intptr_t reg_off;
SVEHostPage info;
target_ulong addr, in_page;
+ ARMVectorReg scratch;
/* Skip to the first true predicate. */
reg_off = find_next_active(vg, 0, reg_max, esz);
@@ -6736,6 +6737,11 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
return;
}
+ /* Protect against overlap between vd and vm. */
+ if (unlikely(vd == vm)) {
+ vm = memcpy(&scratch, vm, reg_max);
+ }
+
/*
* Probe the first element, allowing faults.
*/
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index dff391bfe2..741a608739 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -56,6 +56,13 @@ enum a64_shift_type {
A64_SHIFT_TYPE_ROR = 3
};
+/*
+ * Include the generated decoders.
+ */
+
+#include "decode-sme-fa64.c.inc"
+#include "decode-a64.c.inc"
+
/* Table based decoder typedefs - used when the relevant bits for decode
* are too awkwardly scattered across the instruction (eg SIMD).
*/
@@ -675,83 +682,102 @@ static inline void gen_logic_CC(int sf, TCGv_i64 result)
}
/* dest = T0 + T1; compute C, N, V and Z flags */
-static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+static void gen_add64_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
{
- if (sf) {
- TCGv_i64 result, flag, tmp;
- result = tcg_temp_new_i64();
- flag = tcg_temp_new_i64();
- tmp = tcg_temp_new_i64();
+ TCGv_i64 result, flag, tmp;
+ result = tcg_temp_new_i64();
+ flag = tcg_temp_new_i64();
+ tmp = tcg_temp_new_i64();
- tcg_gen_movi_i64(tmp, 0);
- tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
+ tcg_gen_movi_i64(tmp, 0);
+ tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
- tcg_gen_extrl_i64_i32(cpu_CF, flag);
+ tcg_gen_extrl_i64_i32(cpu_CF, flag);
- gen_set_NZ64(result);
+ gen_set_NZ64(result);
- tcg_gen_xor_i64(flag, result, t0);
- tcg_gen_xor_i64(tmp, t0, t1);
- tcg_gen_andc_i64(flag, flag, tmp);
- tcg_gen_extrh_i64_i32(cpu_VF, flag);
+ tcg_gen_xor_i64(flag, result, t0);
+ tcg_gen_xor_i64(tmp, t0, t1);
+ tcg_gen_andc_i64(flag, flag, tmp);
+ tcg_gen_extrh_i64_i32(cpu_VF, flag);
- tcg_gen_mov_i64(dest, result);
- } else {
- /* 32 bit arithmetic */
- TCGv_i32 t0_32 = tcg_temp_new_i32();
- TCGv_i32 t1_32 = tcg_temp_new_i32();
- TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_mov_i64(dest, result);
+}
- tcg_gen_movi_i32(tmp, 0);
- tcg_gen_extrl_i64_i32(t0_32, t0);
- tcg_gen_extrl_i64_i32(t1_32, t1);
- tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
- tcg_gen_mov_i32(cpu_ZF, cpu_NF);
- tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
- tcg_gen_xor_i32(tmp, t0_32, t1_32);
- tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
- tcg_gen_extu_i32_i64(dest, cpu_NF);
+static void gen_add32_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ TCGv_i32 t0_32 = tcg_temp_new_i32();
+ TCGv_i32 t1_32 = tcg_temp_new_i32();
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_movi_i32(tmp, 0);
+ tcg_gen_extrl_i64_i32(t0_32, t0);
+ tcg_gen_extrl_i64_i32(t1_32, t1);
+ tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
+ tcg_gen_xor_i32(tmp, t0_32, t1_32);
+ tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
+ tcg_gen_extu_i32_i64(dest, cpu_NF);
+}
+
+static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ if (sf) {
+ gen_add64_CC(dest, t0, t1);
+ } else {
+ gen_add32_CC(dest, t0, t1);
}
}
/* dest = T0 - T1; compute C, N, V and Z flags */
-static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+static void gen_sub64_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
{
- if (sf) {
- /* 64 bit arithmetic */
- TCGv_i64 result, flag, tmp;
+ /* 64 bit arithmetic */
+ TCGv_i64 result, flag, tmp;
- result = tcg_temp_new_i64();
- flag = tcg_temp_new_i64();
- tcg_gen_sub_i64(result, t0, t1);
+ result = tcg_temp_new_i64();
+ flag = tcg_temp_new_i64();
+ tcg_gen_sub_i64(result, t0, t1);
- gen_set_NZ64(result);
+ gen_set_NZ64(result);
- tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
- tcg_gen_extrl_i64_i32(cpu_CF, flag);
+ tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
+ tcg_gen_extrl_i64_i32(cpu_CF, flag);
- tcg_gen_xor_i64(flag, result, t0);
- tmp = tcg_temp_new_i64();
- tcg_gen_xor_i64(tmp, t0, t1);
- tcg_gen_and_i64(flag, flag, tmp);
- tcg_gen_extrh_i64_i32(cpu_VF, flag);
- tcg_gen_mov_i64(dest, result);
- } else {
- /* 32 bit arithmetic */
- TCGv_i32 t0_32 = tcg_temp_new_i32();
- TCGv_i32 t1_32 = tcg_temp_new_i32();
- TCGv_i32 tmp;
+ tcg_gen_xor_i64(flag, result, t0);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_xor_i64(tmp, t0, t1);
+ tcg_gen_and_i64(flag, flag, tmp);
+ tcg_gen_extrh_i64_i32(cpu_VF, flag);
+ tcg_gen_mov_i64(dest, result);
+}
- tcg_gen_extrl_i64_i32(t0_32, t0);
- tcg_gen_extrl_i64_i32(t1_32, t1);
- tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
- tcg_gen_mov_i32(cpu_ZF, cpu_NF);
- tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
- tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
- tmp = tcg_temp_new_i32();
- tcg_gen_xor_i32(tmp, t0_32, t1_32);
- tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
- tcg_gen_extu_i32_i64(dest, cpu_NF);
+static void gen_sub32_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ /* 32 bit arithmetic */
+ TCGv_i32 t0_32 = tcg_temp_new_i32();
+ TCGv_i32 t1_32 = tcg_temp_new_i32();
+ TCGv_i32 tmp;
+
+ tcg_gen_extrl_i64_i32(t0_32, t0);
+ tcg_gen_extrl_i64_i32(t1_32, t1);
+ tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, t0_32, t1_32);
+ tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
+ tcg_gen_extu_i32_i64(dest, cpu_NF);
+}
+
+static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ if (sf) {
+ gen_sub64_CC(dest, t0, t1);
+ } else {
+ gen_sub32_CC(dest, t0, t1);
}
}
@@ -1293,116 +1319,279 @@ static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
* match up with those in the manual.
*/
-/* Unconditional branch (immediate)
- * 31 30 26 25 0
- * +----+-----------+-------------------------------------+
- * | op | 0 0 1 0 1 | imm26 |
- * +----+-----------+-------------------------------------+
- */
-static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
+static bool trans_B(DisasContext *s, arg_i *a)
{
- int64_t diff = sextract32(insn, 0, 26) * 4;
-
- if (insn & (1U << 31)) {
- /* BL Branch with link */
- gen_pc_plus_diff(s, cpu_reg(s, 30), curr_insn_len(s));
- }
+ reset_btype(s);
+ gen_goto_tb(s, 0, a->imm);
+ return true;
+}
- /* B Branch / BL Branch with link */
+static bool trans_BL(DisasContext *s, arg_i *a)
+{
+ gen_pc_plus_diff(s, cpu_reg(s, 30), curr_insn_len(s));
reset_btype(s);
- gen_goto_tb(s, 0, diff);
+ gen_goto_tb(s, 0, a->imm);
+ return true;
}
-/* Compare and branch (immediate)
- * 31 30 25 24 23 5 4 0
- * +----+-------------+----+---------------------+--------+
- * | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
- * +----+-------------+----+---------------------+--------+
- */
-static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
+
+static bool trans_CBZ(DisasContext *s, arg_cbz *a)
{
- unsigned int sf, op, rt;
- int64_t diff;
DisasLabel match;
TCGv_i64 tcg_cmp;
- sf = extract32(insn, 31, 1);
- op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
- rt = extract32(insn, 0, 5);
- diff = sextract32(insn, 5, 19) * 4;
-
- tcg_cmp = read_cpu_reg(s, rt, sf);
+ tcg_cmp = read_cpu_reg(s, a->rt, a->sf);
reset_btype(s);
match = gen_disas_label(s);
- tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
+ tcg_gen_brcondi_i64(a->nz ? TCG_COND_NE : TCG_COND_EQ,
tcg_cmp, 0, match.label);
gen_goto_tb(s, 0, 4);
set_disas_label(s, match);
- gen_goto_tb(s, 1, diff);
+ gen_goto_tb(s, 1, a->imm);
+ return true;
}
-/* Test and branch (immediate)
- * 31 30 25 24 23 19 18 5 4 0
- * +----+-------------+----+-------+-------------+------+
- * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
- * +----+-------------+----+-------+-------------+------+
- */
-static void disas_test_b_imm(DisasContext *s, uint32_t insn)
+static bool trans_TBZ(DisasContext *s, arg_tbz *a)
{
- unsigned int bit_pos, op, rt;
- int64_t diff;
DisasLabel match;
TCGv_i64 tcg_cmp;
- bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
- op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
- diff = sextract32(insn, 5, 14) * 4;
- rt = extract32(insn, 0, 5);
-
tcg_cmp = tcg_temp_new_i64();
- tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
+ tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, a->rt), 1ULL << a->bitpos);
reset_btype(s);
match = gen_disas_label(s);
- tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
+ tcg_gen_brcondi_i64(a->nz ? TCG_COND_NE : TCG_COND_EQ,
tcg_cmp, 0, match.label);
gen_goto_tb(s, 0, 4);
set_disas_label(s, match);
- gen_goto_tb(s, 1, diff);
+ gen_goto_tb(s, 1, a->imm);
+ return true;
}
-/* Conditional branch (immediate)
- * 31 25 24 23 5 4 3 0
- * +---------------+----+---------------------+----+------+
- * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
- * +---------------+----+---------------------+----+------+
- */
-static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
+static bool trans_B_cond(DisasContext *s, arg_B_cond *a)
{
- unsigned int cond;
- int64_t diff;
-
- if ((insn & (1 << 4)) || (insn & (1 << 24))) {
- unallocated_encoding(s);
- return;
- }
- diff = sextract32(insn, 5, 19) * 4;
- cond = extract32(insn, 0, 4);
-
reset_btype(s);
- if (cond < 0x0e) {
+ if (a->cond < 0x0e) {
/* genuinely conditional branches */
DisasLabel match = gen_disas_label(s);
- arm_gen_test_cc(cond, match.label);
+ arm_gen_test_cc(a->cond, match.label);
gen_goto_tb(s, 0, 4);
set_disas_label(s, match);
- gen_goto_tb(s, 1, diff);
+ gen_goto_tb(s, 1, a->imm);
} else {
/* 0xe and 0xf are both "always" conditions */
- gen_goto_tb(s, 0, diff);
+ gen_goto_tb(s, 0, a->imm);
+ }
+ return true;
+}
+
+static void set_btype_for_br(DisasContext *s, int rn)
+{
+ if (dc_isar_feature(aa64_bti, s)) {
+ /* BR to {x16,x17} or !guard -> 1, else 3. */
+ set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3);
+ }
+}
+
+static void set_btype_for_blr(DisasContext *s)
+{
+ if (dc_isar_feature(aa64_bti, s)) {
+ /* BLR sets BTYPE to 2, regardless of source guarded page. */
+ set_btype(s, 2);
+ }
+}
+
+static bool trans_BR(DisasContext *s, arg_r *a)
+{
+ gen_a64_set_pc(s, cpu_reg(s, a->rn));
+ set_btype_for_br(s, a->rn);
+ s->base.is_jmp = DISAS_JUMP;
+ return true;
+}
+
+static bool trans_BLR(DisasContext *s, arg_r *a)
+{
+ TCGv_i64 dst = cpu_reg(s, a->rn);
+ TCGv_i64 lr = cpu_reg(s, 30);
+ if (dst == lr) {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_mov_i64(tmp, dst);
+ dst = tmp;
+ }
+ gen_pc_plus_diff(s, lr, curr_insn_len(s));
+ gen_a64_set_pc(s, dst);
+ set_btype_for_blr(s);
+ s->base.is_jmp = DISAS_JUMP;
+ return true;
+}
+
+static bool trans_RET(DisasContext *s, arg_r *a)
+{
+ gen_a64_set_pc(s, cpu_reg(s, a->rn));
+ s->base.is_jmp = DISAS_JUMP;
+ return true;
+}
+
+static TCGv_i64 auth_branch_target(DisasContext *s, TCGv_i64 dst,
+ TCGv_i64 modifier, bool use_key_a)
+{
+ TCGv_i64 truedst;
+ /*
+ * Return the branch target for a BRAA/RETA/etc, which is either
+ * just the destination dst, or that value with the pauth check
+ * done and the code removed from the high bits.
+ */
+ if (!s->pauth_active) {
+ return dst;
+ }
+
+ truedst = tcg_temp_new_i64();
+ if (use_key_a) {
+ gen_helper_autia(truedst, cpu_env, dst, modifier);
+ } else {
+ gen_helper_autib(truedst, cpu_env, dst, modifier);
+ }
+ return truedst;
+}
+
+static bool trans_BRAZ(DisasContext *s, arg_braz *a)
+{
+ TCGv_i64 dst;
+
+ if (!dc_isar_feature(aa64_pauth, s)) {
+ return false;
+ }
+
+ dst = auth_branch_target(s, cpu_reg(s, a->rn), tcg_constant_i64(0), !a->m);
+ gen_a64_set_pc(s, dst);
+ set_btype_for_br(s, a->rn);
+ s->base.is_jmp = DISAS_JUMP;
+ return true;
+}
+
+static bool trans_BLRAZ(DisasContext *s, arg_braz *a)
+{
+ TCGv_i64 dst, lr;
+
+ if (!dc_isar_feature(aa64_pauth, s)) {
+ return false;
+ }
+
+ dst = auth_branch_target(s, cpu_reg(s, a->rn), tcg_constant_i64(0), !a->m);
+ lr = cpu_reg(s, 30);
+ if (dst == lr) {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_mov_i64(tmp, dst);
+ dst = tmp;
+ }
+ gen_pc_plus_diff(s, lr, curr_insn_len(s));
+ gen_a64_set_pc(s, dst);
+ set_btype_for_blr(s);
+ s->base.is_jmp = DISAS_JUMP;
+ return true;
+}
+
+static bool trans_RETA(DisasContext *s, arg_reta *a)
+{
+ TCGv_i64 dst;
+
+ dst = auth_branch_target(s, cpu_reg(s, 30), cpu_X[31], !a->m);
+ gen_a64_set_pc(s, dst);
+ s->base.is_jmp = DISAS_JUMP;
+ return true;
+}
+
+static bool trans_BRA(DisasContext *s, arg_bra *a)
+{
+ TCGv_i64 dst;
+
+ if (!dc_isar_feature(aa64_pauth, s)) {
+ return false;
+ }
+ dst = auth_branch_target(s, cpu_reg(s,a->rn), cpu_reg_sp(s, a->rm), !a->m);
+ gen_a64_set_pc(s, dst);
+ set_btype_for_br(s, a->rn);
+ s->base.is_jmp = DISAS_JUMP;
+ return true;
+}
+
+static bool trans_BLRA(DisasContext *s, arg_bra *a)
+{
+ TCGv_i64 dst, lr;
+
+ if (!dc_isar_feature(aa64_pauth, s)) {
+ return false;
+ }
+ dst = auth_branch_target(s, cpu_reg(s, a->rn), cpu_reg_sp(s, a->rm), !a->m);
+ lr = cpu_reg(s, 30);
+ if (dst == lr) {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_mov_i64(tmp, dst);
+ dst = tmp;
+ }
+ gen_pc_plus_diff(s, lr, curr_insn_len(s));
+ gen_a64_set_pc(s, dst);
+ set_btype_for_blr(s);
+ s->base.is_jmp = DISAS_JUMP;
+ return true;
+}
+
+static bool trans_ERET(DisasContext *s, arg_ERET *a)
+{
+ TCGv_i64 dst;
+
+ if (s->current_el == 0) {
+ return false;
+ }
+ if (s->fgt_eret) {
+ gen_exception_insn_el(s, 0, EXCP_UDEF, 0, 2);
+ return true;
+ }
+ dst = tcg_temp_new_i64();
+ tcg_gen_ld_i64(dst, cpu_env,
+ offsetof(CPUARMState, elr_el[s->current_el]));
+
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
}
+
+ gen_helper_exception_return(cpu_env, dst);
+ /* Must exit loop to check un-masked IRQs */
+ s->base.is_jmp = DISAS_EXIT;
+ return true;
+}
+
+static bool trans_ERETA(DisasContext *s, arg_reta *a)
+{
+ TCGv_i64 dst;
+
+ if (!dc_isar_feature(aa64_pauth, s)) {
+ return false;
+ }
+ if (s->current_el == 0) {
+ return false;
+ }
+ /* The FGT trap takes precedence over an auth trap. */
+ if (s->fgt_eret) {
+ gen_exception_insn_el(s, 0, EXCP_UDEF, a->m ? 3 : 2, 2);
+ return true;
+ }
+ dst = tcg_temp_new_i64();
+ tcg_gen_ld_i64(dst, cpu_env,
+ offsetof(CPUARMState, elr_el[s->current_el]));
+
+ dst = auth_branch_target(s, dst, cpu_X[31], !a->m);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+
+ gen_helper_exception_return(cpu_env, dst);
+ /* Must exit loop to check un-masked IRQs */
+ s->base.is_jmp = DISAS_EXIT;
+ return true;
}
/* HINT instruction group, including various allocated HINTs */
@@ -2173,233 +2362,10 @@ static void disas_exc(DisasContext *s, uint32_t insn)
}
}
-/* Unconditional branch (register)
- * 31 25 24 21 20 16 15 10 9 5 4 0
- * +---------------+-------+-------+-------+------+-------+
- * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
- * +---------------+-------+-------+-------+------+-------+
- */
-static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
-{
- unsigned int opc, op2, op3, rn, op4;
- unsigned btype_mod = 2; /* 0: BR, 1: BLR, 2: other */
- TCGv_i64 dst;
- TCGv_i64 modifier;
-
- opc = extract32(insn, 21, 4);
- op2 = extract32(insn, 16, 5);
- op3 = extract32(insn, 10, 6);
- rn = extract32(insn, 5, 5);
- op4 = extract32(insn, 0, 5);
-
- if (op2 != 0x1f) {
- goto do_unallocated;
- }
-
- switch (opc) {
- case 0: /* BR */
- case 1: /* BLR */
- case 2: /* RET */
- btype_mod = opc;
- switch (op3) {
- case 0:
- /* BR, BLR, RET */
- if (op4 != 0) {
- goto do_unallocated;
- }
- dst = cpu_reg(s, rn);
- break;
-
- case 2:
- case 3:
- if (!dc_isar_feature(aa64_pauth, s)) {
- goto do_unallocated;
- }
- if (opc == 2) {
- /* RETAA, RETAB */
- if (rn != 0x1f || op4 != 0x1f) {
- goto do_unallocated;
- }
- rn = 30;
- modifier = cpu_X[31];
- } else {
- /* BRAAZ, BRABZ, BLRAAZ, BLRABZ */
- if (op4 != 0x1f) {
- goto do_unallocated;
- }
- modifier = tcg_constant_i64(0);
- }
- if (s->pauth_active) {
- dst = tcg_temp_new_i64();
- if (op3 == 2) {
- gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
- } else {
- gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
- }
- } else {
- dst = cpu_reg(s, rn);
- }
- break;
-
- default:
- goto do_unallocated;
- }
- /* BLR also needs to load return address */
- if (opc == 1) {
- TCGv_i64 lr = cpu_reg(s, 30);
- if (dst == lr) {
- TCGv_i64 tmp = tcg_temp_new_i64();
- tcg_gen_mov_i64(tmp, dst);
- dst = tmp;
- }
- gen_pc_plus_diff(s, lr, curr_insn_len(s));
- }
- gen_a64_set_pc(s, dst);
- break;
-
- case 8: /* BRAA */
- case 9: /* BLRAA */
- if (!dc_isar_feature(aa64_pauth, s)) {
- goto do_unallocated;
- }
- if ((op3 & ~1) != 2) {
- goto do_unallocated;
- }
- btype_mod = opc & 1;
- if (s->pauth_active) {
- dst = tcg_temp_new_i64();
- modifier = cpu_reg_sp(s, op4);
- if (op3 == 2) {
- gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
- } else {
- gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
- }
- } else {
- dst = cpu_reg(s, rn);
- }
- /* BLRAA also needs to load return address */
- if (opc == 9) {
- TCGv_i64 lr = cpu_reg(s, 30);
- if (dst == lr) {
- TCGv_i64 tmp = tcg_temp_new_i64();
- tcg_gen_mov_i64(tmp, dst);
- dst = tmp;
- }
- gen_pc_plus_diff(s, lr, curr_insn_len(s));
- }
- gen_a64_set_pc(s, dst);
- break;
-
- case 4: /* ERET */
- if (s->current_el == 0) {
- goto do_unallocated;
- }
- switch (op3) {
- case 0: /* ERET */
- if (op4 != 0) {
- goto do_unallocated;
- }
- if (s->fgt_eret) {
- gen_exception_insn_el(s, 0, EXCP_UDEF, syn_erettrap(op3), 2);
- return;
- }
- dst = tcg_temp_new_i64();
- tcg_gen_ld_i64(dst, cpu_env,
- offsetof(CPUARMState, elr_el[s->current_el]));
- break;
-
- case 2: /* ERETAA */
- case 3: /* ERETAB */
- if (!dc_isar_feature(aa64_pauth, s)) {
- goto do_unallocated;
- }
- if (rn != 0x1f || op4 != 0x1f) {
- goto do_unallocated;
- }
- /* The FGT trap takes precedence over an auth trap. */
- if (s->fgt_eret) {
- gen_exception_insn_el(s, 0, EXCP_UDEF, syn_erettrap(op3), 2);
- return;
- }
- dst = tcg_temp_new_i64();
- tcg_gen_ld_i64(dst, cpu_env,
- offsetof(CPUARMState, elr_el[s->current_el]));
- if (s->pauth_active) {
- modifier = cpu_X[31];
- if (op3 == 2) {
- gen_helper_autia(dst, cpu_env, dst, modifier);
- } else {
- gen_helper_autib(dst, cpu_env, dst, modifier);
- }
- }
- break;
-
- default:
- goto do_unallocated;
- }
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_start();
- }
-
- gen_helper_exception_return(cpu_env, dst);
- /* Must exit loop to check un-masked IRQs */
- s->base.is_jmp = DISAS_EXIT;
- return;
-
- case 5: /* DRPS */
- if (op3 != 0 || op4 != 0 || rn != 0x1f) {
- goto do_unallocated;
- } else {
- unallocated_encoding(s);
- }
- return;
-
- default:
- do_unallocated:
- unallocated_encoding(s);
- return;
- }
-
- switch (btype_mod) {
- case 0: /* BR */
- if (dc_isar_feature(aa64_bti, s)) {
- /* BR to {x16,x17} or !guard -> 1, else 3. */
- set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3);
- }
- break;
-
- case 1: /* BLR */
- if (dc_isar_feature(aa64_bti, s)) {
- /* BLR sets BTYPE to 2, regardless of source guarded page. */
- set_btype(s, 2);
- }
- break;
-
- default: /* RET or none of the above. */
- /* BTYPE will be set to 0 by normal end-of-insn processing. */
- break;
- }
-
- s->base.is_jmp = DISAS_JUMP;
-}
-
/* Branches, exception generating and system instructions */
static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
{
switch (extract32(insn, 25, 7)) {
- case 0x0a: case 0x0b:
- case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
- disas_uncond_b_imm(s, insn);
- break;
- case 0x1a: case 0x5a: /* Compare & branch (immediate) */
- disas_comp_b_imm(s, insn);
- break;
- case 0x1b: case 0x5b: /* Test & branch (immediate) */
- disas_test_b_imm(s, insn);
- break;
- case 0x2a: /* Conditional branch (immediate) */
- disas_cond_b_imm(s, insn);
- break;
case 0x6a: /* Exception generation / System */
if (insn & (1 << 24)) {
if (extract32(insn, 22, 2) == 0) {
@@ -2411,9 +2377,6 @@ static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
disas_exc(s, insn);
}
break;
- case 0x6b: /* Unconditional branch (register) */
- disas_uncond_b_reg(s, insn);
- break;
default:
unallocated_encoding(s);
break;
@@ -4172,132 +4135,82 @@ static void disas_ldst(DisasContext *s, uint32_t insn)
}
}
-/* PC-rel. addressing
- * 31 30 29 28 24 23 5 4 0
- * +----+-------+-----------+-------------------+------+
- * | op | immlo | 1 0 0 0 0 | immhi | Rd |
- * +----+-------+-----------+-------------------+------+
- */
-static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
-{
- unsigned int page, rd;
- int64_t offset;
+typedef void ArithTwoOp(TCGv_i64, TCGv_i64, TCGv_i64);
- page = extract32(insn, 31, 1);
- /* SignExtend(immhi:immlo) -> offset */
- offset = sextract64(insn, 5, 19);
- offset = offset << 2 | extract32(insn, 29, 2);
- rd = extract32(insn, 0, 5);
+static bool gen_rri(DisasContext *s, arg_rri_sf *a,
+ bool rd_sp, bool rn_sp, ArithTwoOp *fn)
+{
+ TCGv_i64 tcg_rn = rn_sp ? cpu_reg_sp(s, a->rn) : cpu_reg(s, a->rn);
+ TCGv_i64 tcg_rd = rd_sp ? cpu_reg_sp(s, a->rd) : cpu_reg(s, a->rd);
+ TCGv_i64 tcg_imm = tcg_constant_i64(a->imm);
- if (page) {
- /* ADRP (page based) */
- offset <<= 12;
- /* The page offset is ok for CF_PCREL. */
- offset -= s->pc_curr & 0xfff;
+ fn(tcg_rd, tcg_rn, tcg_imm);
+ if (!a->sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
}
-
- gen_pc_plus_diff(s, cpu_reg(s, rd), offset);
+ return true;
}
/*
- * Add/subtract (immediate)
- *
- * 31 30 29 28 23 22 21 10 9 5 4 0
- * +--+--+--+-------------+--+-------------+-----+-----+
- * |sf|op| S| 1 0 0 0 1 0 |sh| imm12 | Rn | Rd |
- * +--+--+--+-------------+--+-------------+-----+-----+
- *
- * sf: 0 -> 32bit, 1 -> 64bit
- * op: 0 -> add , 1 -> sub
- * S: 1 -> set flags
- * sh: 1 -> LSL imm by 12
+ * PC-rel. addressing
*/
-static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
-{
- int rd = extract32(insn, 0, 5);
- int rn = extract32(insn, 5, 5);
- uint64_t imm = extract32(insn, 10, 12);
- bool shift = extract32(insn, 22, 1);
- bool setflags = extract32(insn, 29, 1);
- bool sub_op = extract32(insn, 30, 1);
- bool is_64bit = extract32(insn, 31, 1);
-
- TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
- TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
- TCGv_i64 tcg_result;
- if (shift) {
- imm <<= 12;
- }
+static bool trans_ADR(DisasContext *s, arg_ri *a)
+{
+ gen_pc_plus_diff(s, cpu_reg(s, a->rd), a->imm);
+ return true;
+}
- tcg_result = tcg_temp_new_i64();
- if (!setflags) {
- if (sub_op) {
- tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
- } else {
- tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
- }
- } else {
- TCGv_i64 tcg_imm = tcg_constant_i64(imm);
- if (sub_op) {
- gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
- } else {
- gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
- }
- }
+static bool trans_ADRP(DisasContext *s, arg_ri *a)
+{
+ int64_t offset = (int64_t)a->imm << 12;
- if (is_64bit) {
- tcg_gen_mov_i64(tcg_rd, tcg_result);
- } else {
- tcg_gen_ext32u_i64(tcg_rd, tcg_result);
- }
+ /* The page offset is ok for CF_PCREL. */
+ offset -= s->pc_curr & 0xfff;
+ gen_pc_plus_diff(s, cpu_reg(s, a->rd), offset);
+ return true;
}
/*
+ * Add/subtract (immediate)
+ */
+TRANS(ADD_i, gen_rri, a, 1, 1, tcg_gen_add_i64)
+TRANS(SUB_i, gen_rri, a, 1, 1, tcg_gen_sub_i64)
+TRANS(ADDS_i, gen_rri, a, 0, 1, a->sf ? gen_add64_CC : gen_add32_CC)
+TRANS(SUBS_i, gen_rri, a, 0, 1, a->sf ? gen_sub64_CC : gen_sub32_CC)
+
+/*
* Add/subtract (immediate, with tags)
- *
- * 31 30 29 28 23 22 21 16 14 10 9 5 4 0
- * +--+--+--+-------------+--+---------+--+-------+-----+-----+
- * |sf|op| S| 1 0 0 0 1 1 |o2| uimm6 |o3| uimm4 | Rn | Rd |
- * +--+--+--+-------------+--+---------+--+-------+-----+-----+
- *
- * op: 0 -> add, 1 -> sub
*/
-static void disas_add_sub_imm_with_tags(DisasContext *s, uint32_t insn)
+
+static bool gen_add_sub_imm_with_tags(DisasContext *s, arg_rri_tag *a,
+ bool sub_op)
{
- int rd = extract32(insn, 0, 5);
- int rn = extract32(insn, 5, 5);
- int uimm4 = extract32(insn, 10, 4);
- int uimm6 = extract32(insn, 16, 6);
- bool sub_op = extract32(insn, 30, 1);
TCGv_i64 tcg_rn, tcg_rd;
int imm;
- /* Test all of sf=1, S=0, o2=0, o3=0. */
- if ((insn & 0xa040c000u) != 0x80000000u ||
- !dc_isar_feature(aa64_mte_insn_reg, s)) {
- unallocated_encoding(s);
- return;
- }
-
- imm = uimm6 << LOG2_TAG_GRANULE;
+ imm = a->uimm6 << LOG2_TAG_GRANULE;
if (sub_op) {
imm = -imm;
}
- tcg_rn = cpu_reg_sp(s, rn);
- tcg_rd = cpu_reg_sp(s, rd);
+ tcg_rn = cpu_reg_sp(s, a->rn);
+ tcg_rd = cpu_reg_sp(s, a->rd);
if (s->ata) {
gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn,
tcg_constant_i32(imm),
- tcg_constant_i32(uimm4));
+ tcg_constant_i32(a->uimm4));
} else {
tcg_gen_addi_i64(tcg_rd, tcg_rn, imm);
gen_address_with_allocation_tag0(tcg_rd, tcg_rd);
}
+ return true;
}
+TRANS_FEAT(ADDG_i, aa64_mte_insn_reg, gen_add_sub_imm_with_tags, a, false)
+TRANS_FEAT(SUBG_i, aa64_mte_insn_reg, gen_add_sub_imm_with_tags, a, true)
+
/* The input should be a value in the bottom e bits (with higher
* bits zero); returns that value replicated into every element
* of size e in a 64 bit integer.
@@ -4312,14 +4225,12 @@ static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
return mask;
}
-/* Return a value with the bottom len bits set (where 0 < len <= 64) */
-static inline uint64_t bitmask64(unsigned int length)
-{
- assert(length > 0 && length <= 64);
- return ~0ULL >> (64 - length);
-}
+/*
+ * Logical (immediate)
+ */
-/* Simplified variant of pseudocode DecodeBitMasks() for the case where we
+/*
+ * Simplified variant of pseudocode DecodeBitMasks() for the case where we
* only require the wmask. Returns false if the imms/immr/immn are a reserved
* value (ie should cause a guest UNDEF exception), and true if they are
* valid, in which case the decoded bit pattern is written to result.
@@ -4374,10 +4285,10 @@ bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
/* Create the value of one element: s+1 set bits rotated
* by r within the element (which is e bits wide)...
*/
- mask = bitmask64(s + 1);
+ mask = MAKE_64BIT_MASK(0, s + 1);
if (r) {
mask = (mask >> r) | (mask << (e - r));
- mask &= bitmask64(e);
+ mask &= MAKE_64BIT_MASK(0, e);
}
/* ...then replicate the element over the whole 64 bit value */
mask = bitfield_replicate(mask, e);
@@ -4385,295 +4296,215 @@ bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
return true;
}
-/* Logical (immediate)
- * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
- * +----+-----+-------------+---+------+------+------+------+
- * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
- * +----+-----+-------------+---+------+------+------+------+
- */
-static void disas_logic_imm(DisasContext *s, uint32_t insn)
+static bool gen_rri_log(DisasContext *s, arg_rri_log *a, bool set_cc,
+ void (*fn)(TCGv_i64, TCGv_i64, int64_t))
{
- unsigned int sf, opc, is_n, immr, imms, rn, rd;
TCGv_i64 tcg_rd, tcg_rn;
- uint64_t wmask;
- bool is_and = false;
-
- sf = extract32(insn, 31, 1);
- opc = extract32(insn, 29, 2);
- is_n = extract32(insn, 22, 1);
- immr = extract32(insn, 16, 6);
- imms = extract32(insn, 10, 6);
- rn = extract32(insn, 5, 5);
- rd = extract32(insn, 0, 5);
-
- if (!sf && is_n) {
- unallocated_encoding(s);
- return;
- }
+ uint64_t imm;
- if (opc == 0x3) { /* ANDS */
- tcg_rd = cpu_reg(s, rd);
- } else {
- tcg_rd = cpu_reg_sp(s, rd);
+ /* Some immediate field values are reserved. */
+ if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1),
+ extract32(a->dbm, 0, 6),
+ extract32(a->dbm, 6, 6))) {
+ return false;
}
- tcg_rn = cpu_reg(s, rn);
-
- if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
- /* some immediate field values are reserved */
- unallocated_encoding(s);
- return;
+ if (!a->sf) {
+ imm &= 0xffffffffull;
}
- if (!sf) {
- wmask &= 0xffffffff;
- }
+ tcg_rd = set_cc ? cpu_reg(s, a->rd) : cpu_reg_sp(s, a->rd);
+ tcg_rn = cpu_reg(s, a->rn);
- switch (opc) {
- case 0x3: /* ANDS */
- case 0x0: /* AND */
- tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
- is_and = true;
- break;
- case 0x1: /* ORR */
- tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
- break;
- case 0x2: /* EOR */
- tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
- break;
- default:
- assert(FALSE); /* must handle all above */
- break;
+ fn(tcg_rd, tcg_rn, imm);
+ if (set_cc) {
+ gen_logic_CC(a->sf, tcg_rd);
}
-
- if (!sf && !is_and) {
- /* zero extend final result; we know we can skip this for AND
- * since the immediate had the high 32 bits clear.
- */
+ if (!a->sf) {
tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
}
-
- if (opc == 3) { /* ANDS */
- gen_logic_CC(sf, tcg_rd);
- }
+ return true;
}
+TRANS(AND_i, gen_rri_log, a, false, tcg_gen_andi_i64)
+TRANS(ORR_i, gen_rri_log, a, false, tcg_gen_ori_i64)
+TRANS(EOR_i, gen_rri_log, a, false, tcg_gen_xori_i64)
+TRANS(ANDS_i, gen_rri_log, a, true, tcg_gen_andi_i64)
+
/*
* Move wide (immediate)
- *
- * 31 30 29 28 23 22 21 20 5 4 0
- * +--+-----+-------------+-----+----------------+------+
- * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd |
- * +--+-----+-------------+-----+----------------+------+
- *
- * sf: 0 -> 32 bit, 1 -> 64 bit
- * opc: 00 -> N, 10 -> Z, 11 -> K
- * hw: shift/16 (0,16, and sf only 32, 48)
*/
-static void disas_movw_imm(DisasContext *s, uint32_t insn)
+
+static bool trans_MOVZ(DisasContext *s, arg_movw *a)
{
- int rd = extract32(insn, 0, 5);
- uint64_t imm = extract32(insn, 5, 16);
- int sf = extract32(insn, 31, 1);
- int opc = extract32(insn, 29, 2);
- int pos = extract32(insn, 21, 2) << 4;
- TCGv_i64 tcg_rd = cpu_reg(s, rd);
+ int pos = a->hw << 4;
+ tcg_gen_movi_i64(cpu_reg(s, a->rd), (uint64_t)a->imm << pos);
+ return true;
+}
- if (!sf && (pos >= 32)) {
- unallocated_encoding(s);
- return;
+static bool trans_MOVN(DisasContext *s, arg_movw *a)
+{
+ int pos = a->hw << 4;
+ uint64_t imm = a->imm;
+
+ imm = ~(imm << pos);
+ if (!a->sf) {
+ imm = (uint32_t)imm;
}
+ tcg_gen_movi_i64(cpu_reg(s, a->rd), imm);
+ return true;
+}
- switch (opc) {
- case 0: /* MOVN */
- case 2: /* MOVZ */
- imm <<= pos;
- if (opc == 0) {
- imm = ~imm;
- }
- if (!sf) {
- imm &= 0xffffffffu;
- }
- tcg_gen_movi_i64(tcg_rd, imm);
- break;
- case 3: /* MOVK */
- tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_constant_i64(imm), pos, 16);
- if (!sf) {
- tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
- }
- break;
- default:
- unallocated_encoding(s);
- break;
+static bool trans_MOVK(DisasContext *s, arg_movw *a)
+{
+ int pos = a->hw << 4;
+ TCGv_i64 tcg_rd, tcg_im;
+
+ tcg_rd = cpu_reg(s, a->rd);
+ tcg_im = tcg_constant_i64(a->imm);
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_im, pos, 16);
+ if (!a->sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
}
+ return true;
}
-/* Bitfield
- * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
- * +----+-----+-------------+---+------+------+------+------+
- * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
- * +----+-----+-------------+---+------+------+------+------+
+/*
+ * Bitfield
*/
-static void disas_bitfield(DisasContext *s, uint32_t insn)
+
+static bool trans_SBFM(DisasContext *s, arg_SBFM *a)
{
- unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
- TCGv_i64 tcg_rd, tcg_tmp;
+ TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
+ TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1);
+ unsigned int bitsize = a->sf ? 64 : 32;
+ unsigned int ri = a->immr;
+ unsigned int si = a->imms;
+ unsigned int pos, len;
- sf = extract32(insn, 31, 1);
- opc = extract32(insn, 29, 2);
- n = extract32(insn, 22, 1);
- ri = extract32(insn, 16, 6);
- si = extract32(insn, 10, 6);
- rn = extract32(insn, 5, 5);
- rd = extract32(insn, 0, 5);
- bitsize = sf ? 64 : 32;
+ if (si >= ri) {
+ /* Wd<s-r:0> = Wn<s:r> */
+ len = (si - ri) + 1;
+ tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
+ if (!a->sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+ } else {
+ /* Wd<32+s-r,32-r> = Wn<s:0> */
+ len = si + 1;
+ pos = (bitsize - ri) & (bitsize - 1);
- if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
- unallocated_encoding(s);
- return;
+ if (len < ri) {
+ /*
+ * Sign extend the destination field from len to fill the
+ * balance of the word. Let the deposit below insert all
+ * of those sign bits.
+ */
+ tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
+ len = ri;
+ }
+
+ /*
+ * We start with zero, and we haven't modified any bits outside
+ * bitsize, therefore no final zero-extension is unneeded for !sf.
+ */
+ tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
}
+ return true;
+}
- tcg_rd = cpu_reg(s, rd);
+static bool trans_UBFM(DisasContext *s, arg_UBFM *a)
+{
+ TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
+ TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1);
+ unsigned int bitsize = a->sf ? 64 : 32;
+ unsigned int ri = a->immr;
+ unsigned int si = a->imms;
+ unsigned int pos, len;
- /* Suppress the zero-extend for !sf. Since RI and SI are constrained
- to be smaller than bitsize, we'll never reference data outside the
- low 32-bits anyway. */
- tcg_tmp = read_cpu_reg(s, rn, 1);
+ tcg_rd = cpu_reg(s, a->rd);
+ tcg_tmp = read_cpu_reg(s, a->rn, 1);
- /* Recognize simple(r) extractions. */
if (si >= ri) {
/* Wd<s-r:0> = Wn<s:r> */
len = (si - ri) + 1;
- if (opc == 0) { /* SBFM: ASR, SBFX, SXTB, SXTH, SXTW */
- tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
- goto done;
- } else if (opc == 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */
- tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
- return;
- }
- /* opc == 1, BFXIL fall through to deposit */
- tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
- pos = 0;
+ tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
} else {
- /* Handle the ri > si case with a deposit
- * Wd<32+s-r,32-r> = Wn<s:0>
- */
+ /* Wd<32+s-r,32-r> = Wn<s:0> */
len = si + 1;
pos = (bitsize - ri) & (bitsize - 1);
+ tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
}
+ return true;
+}
- if (opc == 0 && len < ri) {
- /* SBFM: sign extend the destination field from len to fill
- the balance of the word. Let the deposit below insert all
- of those sign bits. */
- tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
- len = ri;
- }
+static bool trans_BFM(DisasContext *s, arg_BFM *a)
+{
+ TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
+ TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1);
+ unsigned int bitsize = a->sf ? 64 : 32;
+ unsigned int ri = a->immr;
+ unsigned int si = a->imms;
+ unsigned int pos, len;
+
+ tcg_rd = cpu_reg(s, a->rd);
+ tcg_tmp = read_cpu_reg(s, a->rn, 1);
- if (opc == 1) { /* BFM, BFXIL */
- tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
+ if (si >= ri) {
+ /* Wd<s-r:0> = Wn<s:r> */
+ tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
+ len = (si - ri) + 1;
+ pos = 0;
} else {
- /* SBFM or UBFM: We start with zero, and we haven't modified
- any bits outside bitsize, therefore the zero-extension
- below is unneeded. */
- tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
- return;
+ /* Wd<32+s-r,32-r> = Wn<s:0> */
+ len = si + 1;
+ pos = (bitsize - ri) & (bitsize - 1);
}
- done:
- if (!sf) { /* zero extend final result */
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
+ if (!a->sf) {
tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
}
+ return true;
}
-/* Extract
- * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
- * +----+------+-------------+---+----+------+--------+------+------+
- * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
- * +----+------+-------------+---+----+------+--------+------+------+
- */
-static void disas_extract(DisasContext *s, uint32_t insn)
+static bool trans_EXTR(DisasContext *s, arg_extract *a)
{
- unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
+ TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
- sf = extract32(insn, 31, 1);
- n = extract32(insn, 22, 1);
- rm = extract32(insn, 16, 5);
- imm = extract32(insn, 10, 6);
- rn = extract32(insn, 5, 5);
- rd = extract32(insn, 0, 5);
- op21 = extract32(insn, 29, 2);
- op0 = extract32(insn, 21, 1);
- bitsize = sf ? 64 : 32;
+ tcg_rd = cpu_reg(s, a->rd);
- if (sf != n || op21 || op0 || imm >= bitsize) {
- unallocated_encoding(s);
+ if (unlikely(a->imm == 0)) {
+ /*
+ * tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
+ * so an extract from bit 0 is a special case.
+ */
+ if (a->sf) {
+ tcg_gen_mov_i64(tcg_rd, cpu_reg(s, a->rm));
+ } else {
+ tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, a->rm));
+ }
} else {
- TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
+ tcg_rm = cpu_reg(s, a->rm);
+ tcg_rn = cpu_reg(s, a->rn);
- tcg_rd = cpu_reg(s, rd);
-
- if (unlikely(imm == 0)) {
- /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
- * so an extract from bit 0 is a special case.
- */
- if (sf) {
- tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
- } else {
- tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
- }
+ if (a->sf) {
+ /* Specialization to ROR happens in EXTRACT2. */
+ tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, a->imm);
} else {
- tcg_rm = cpu_reg(s, rm);
- tcg_rn = cpu_reg(s, rn);
+ TCGv_i32 t0 = tcg_temp_new_i32();
- if (sf) {
- /* Specialization to ROR happens in EXTRACT2. */
- tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, imm);
+ tcg_gen_extrl_i64_i32(t0, tcg_rm);
+ if (a->rm == a->rn) {
+ tcg_gen_rotri_i32(t0, t0, a->imm);
} else {
- TCGv_i32 t0 = tcg_temp_new_i32();
-
- tcg_gen_extrl_i64_i32(t0, tcg_rm);
- if (rm == rn) {
- tcg_gen_rotri_i32(t0, t0, imm);
- } else {
- TCGv_i32 t1 = tcg_temp_new_i32();
- tcg_gen_extrl_i64_i32(t1, tcg_rn);
- tcg_gen_extract2_i32(t0, t0, t1, imm);
- }
- tcg_gen_extu_i32_i64(tcg_rd, t0);
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t1, tcg_rn);
+ tcg_gen_extract2_i32(t0, t0, t1, a->imm);
}
+ tcg_gen_extu_i32_i64(tcg_rd, t0);
}
}
-}
-
-/* Data processing - immediate */
-static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
-{
- switch (extract32(insn, 23, 6)) {
- case 0x20: case 0x21: /* PC-rel. addressing */
- disas_pc_rel_adr(s, insn);
- break;
- case 0x22: /* Add/subtract (immediate) */
- disas_add_sub_imm(s, insn);
- break;
- case 0x23: /* Add/subtract (immediate, with tags) */
- disas_add_sub_imm_with_tags(s, insn);
- break;
- case 0x24: /* Logical (immediate) */
- disas_logic_imm(s, insn);
- break;
- case 0x25: /* Move wide (immediate) */
- disas_movw_imm(s, insn);
- break;
- case 0x26: /* Bitfield */
- disas_bitfield(s, insn);
- break;
- case 0x27: /* Extract */
- disas_extract(s, insn);
- break;
- default:
- unallocated_encoding(s);
- break;
- }
+ return true;
}
/* Shift a TCGv src by TCGv shift_amount, put result in dst.
@@ -14100,12 +13931,6 @@ static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
}
}
-/*
- * Include the generated SME FA64 decoder.
- */
-
-#include "decode-sme-fa64.c.inc"
-
static bool trans_OK(DisasContext *s, arg_OK *a)
{
return true;
@@ -14200,6 +14025,33 @@ static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
return false;
}
+/* C3.1 A64 instruction index by encoding */
+static void disas_a64_legacy(DisasContext *s, uint32_t insn)
+{
+ switch (extract32(insn, 25, 4)) {
+ case 0xa: case 0xb: /* Branch, exception generation and system insns */
+ disas_b_exc_sys(s, insn);
+ break;
+ case 0x4:
+ case 0x6:
+ case 0xc:
+ case 0xe: /* Loads and stores */
+ disas_ldst(s, insn);
+ break;
+ case 0x5:
+ case 0xd: /* Data processing - register */
+ disas_data_proc_reg(s, insn);
+ break;
+ case 0x7:
+ case 0xf: /* Data processing - SIMD and floating point */
+ disas_data_proc_simd_fp(s, insn);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
CPUState *cpu)
{
@@ -14401,43 +14253,10 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
disas_sme_fa64(s, insn);
}
- switch (extract32(insn, 25, 4)) {
- case 0x0:
- if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) {
- unallocated_encoding(s);
- }
- break;
- case 0x1: case 0x3: /* UNALLOCATED */
- unallocated_encoding(s);
- break;
- case 0x2:
- if (!disas_sve(s, insn)) {
- unallocated_encoding(s);
- }
- break;
- case 0x8: case 0x9: /* Data processing - immediate */
- disas_data_proc_imm(s, insn);
- break;
- case 0xa: case 0xb: /* Branch, exception generation and system insns */
- disas_b_exc_sys(s, insn);
- break;
- case 0x4:
- case 0x6:
- case 0xc:
- case 0xe: /* Loads and stores */
- disas_ldst(s, insn);
- break;
- case 0x5:
- case 0xd: /* Data processing - register */
- disas_data_proc_reg(s, insn);
- break;
- case 0x7:
- case 0xf: /* Data processing - SIMD and floating point */
- disas_data_proc_simd_fp(s, insn);
- break;
- default:
- assert(FALSE); /* all 15 cases should be handled above */
- break;
+ if (!disas_a64(s, insn) &&
+ !disas_sme(s, insn) &&
+ !disas_sve(s, insn)) {
+ disas_a64_legacy(s, insn);
}
/*
diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h
index f02d4685b4..a9d1f4adc2 100644
--- a/target/arm/tcg/translate.h
+++ b/target/arm/tcg/translate.h
@@ -220,6 +220,11 @@ static inline int rsub_8(DisasContext *s, int x)
return 8 - x;
}
+static inline int shl_12(DisasContext *s, int x)
+{
+ return x << 12;
+}
+
static inline int neon_3same_fp_size(DisasContext *s, int x)
{
/* Convert 0==fp32, 1==fp16 into a MO_* value */