aboutsummaryrefslogtreecommitdiff
path: root/target/arm/cpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/cpu.h')
-rw-r--r--target/arm/cpu.h120
1 files changed, 98 insertions, 22 deletions
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 8d41f783dc..521444a5a1 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -153,6 +153,49 @@ typedef struct {
uint32_t base_mask;
} TCR;
+/* Define a maximum sized vector register.
+ * For 32-bit, this is a 128-bit NEON/AdvSIMD register.
+ * For 64-bit, this is a 2048-bit SVE register.
+ *
+ * Note that the mapping between S, D, and Q views of the register bank
+ * differs between AArch64 and AArch32.
+ * In AArch32:
+ * Qn = regs[n].d[1]:regs[n].d[0]
+ * Dn = regs[n / 2].d[n & 1]
+ * Sn = regs[n / 4].d[n % 4 / 2],
+ * bits 31..0 for even n, and bits 63..32 for odd n
+ * (and regs[16] to regs[31] are inaccessible)
+ * In AArch64:
+ * Zn = regs[n].d[*]
+ * Qn = regs[n].d[1]:regs[n].d[0]
+ * Dn = regs[n].d[0]
+ * Sn = regs[n].d[0] bits 31..0
+ *
+ * This corresponds to the architecturally defined mapping between
+ * the two execution states, and means we do not need to explicitly
+ * map these registers when changing states.
+ *
+ * Align the data for use with TCG host vector operations.
+ */
+
+#ifdef TARGET_AARCH64
+# define ARM_MAX_VQ 16
+#else
+# define ARM_MAX_VQ 1
+#endif
+
+typedef struct ARMVectorReg {
+ uint64_t d[2 * ARM_MAX_VQ] QEMU_ALIGNED(16);
+} ARMVectorReg;
+
+/* In AArch32 mode, predicate registers do not exist at all. */
+#ifdef TARGET_AARCH64
+typedef struct ARMPredicateReg {
+ uint64_t p[2 * ARM_MAX_VQ / 8] QEMU_ALIGNED(16);
+} ARMPredicateReg;
+#endif
+
+
typedef struct CPUARMState {
/* Regs for current mode. */
uint32_t regs[16];
@@ -477,22 +520,12 @@ typedef struct CPUARMState {
/* VFP coprocessor state. */
struct {
- /* VFP/Neon register state. Note that the mapping between S, D and Q
- * views of the register bank differs between AArch64 and AArch32:
- * In AArch32:
- * Qn = regs[2n+1]:regs[2n]
- * Dn = regs[n]
- * Sn = regs[n/2] bits 31..0 for even n, and bits 63..32 for odd n
- * (and regs[32] to regs[63] are inaccessible)
- * In AArch64:
- * Qn = regs[2n+1]:regs[2n]
- * Dn = regs[2n]
- * Sn = regs[2n] bits 31..0
- * This corresponds to the architecturally defined mapping between
- * the two execution states, and means we do not need to explicitly
- * map these registers when changing states.
- */
- uint64_t regs[64] QEMU_ALIGNED(16);
+ ARMVectorReg zregs[32];
+
+#ifdef TARGET_AARCH64
+ /* Store FFR as pregs[16] to make it easier to treat as any other. */
+ ARMPredicateReg pregs[17];
+#endif
uint32_t xregs[16];
/* We store these fpcsr fields separately for convenience. */
@@ -516,6 +549,9 @@ typedef struct CPUARMState {
*/
float_status fp_status;
float_status standard_fp_status;
+
+ /* ZCR_EL[1-3] */
+ uint64_t zcr_el[4];
} vfp;
uint64_t exclusive_addr;
uint64_t exclusive_val;
@@ -890,6 +926,8 @@ void pmccntr_sync(CPUARMState *env);
#define CPTR_TCPAC (1U << 31)
#define CPTR_TTA (1U << 20)
#define CPTR_TFP (1U << 10)
+#define CPTR_TZ (1U << 8) /* CPTR_EL2 */
+#define CPTR_EZ (1U << 8) /* CPTR_EL3 */
#define MDCR_EPMAD (1U << 21)
#define MDCR_EDAD (1U << 20)
@@ -1341,6 +1379,10 @@ enum arm_features {
ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
ARM_FEATURE_JAZELLE, /* has (trivial) Jazelle implementation */
ARM_FEATURE_SVE, /* has Scalable Vector Extension */
+ ARM_FEATURE_V8_SHA512, /* implements SHA512 part of v8 Crypto Extensions */
+ ARM_FEATURE_V8_SHA3, /* implements SHA3 part of v8 Crypto Extensions */
+ ARM_FEATURE_V8_SM3, /* implements SM3 part of v8 Crypto Extensions */
+ ARM_FEATURE_V8_SM4, /* implements SM4 part of v8 Crypto Extensions */
};
static inline int arm_feature(CPUARMState *env, int feature)
@@ -1506,16 +1548,42 @@ static inline bool armv7m_nvic_can_take_pending_exception(void *opaque)
*/
void armv7m_nvic_set_pending(void *opaque, int irq, bool secure);
/**
+ * armv7m_nvic_set_pending_derived: mark this derived exception as pending
+ * @opaque: the NVIC
+ * @irq: the exception number to mark pending
+ * @secure: false for non-banked exceptions or for the nonsecure
+ * version of a banked exception, true for the secure version of a banked
+ * exception.
+ *
+ * Similar to armv7m_nvic_set_pending(), but specifically for derived
+ * exceptions (exceptions generated in the course of trying to take
+ * a different exception).
+ */
+void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure);
+/**
+ * armv7m_nvic_get_pending_irq_info: return highest priority pending
+ * exception, and whether it targets Secure state
+ * @opaque: the NVIC
+ * @pirq: set to pending exception number
+ * @ptargets_secure: set to whether pending exception targets Secure
+ *
+ * This function writes the number of the highest priority pending
+ * exception (the one which would be made active by
+ * armv7m_nvic_acknowledge_irq()) to @pirq, and sets @ptargets_secure
+ * to true if the current highest priority pending exception should
+ * be taken to Secure state, false for NS.
+ */
+void armv7m_nvic_get_pending_irq_info(void *opaque, int *pirq,
+ bool *ptargets_secure);
+/**
* armv7m_nvic_acknowledge_irq: make highest priority pending exception active
* @opaque: the NVIC
*
* Move the current highest priority pending exception from the pending
* state to the active state, and update v7m.exception to indicate that
* it is the exception currently being handled.
- *
- * Returns: true if exception should be taken to Secure state, false for NS
*/
-bool armv7m_nvic_acknowledge_irq(void *opaque);
+void armv7m_nvic_acknowledge_irq(void *opaque);
/**
* armv7m_nvic_complete_irq: complete specified interrupt or exception
* @opaque: the NVIC
@@ -2610,6 +2678,10 @@ static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
#define ARM_TBFLAG_TBI0_MASK (0x1ull << ARM_TBFLAG_TBI0_SHIFT)
#define ARM_TBFLAG_TBI1_SHIFT 1 /* TBI1 for EL0/1 */
#define ARM_TBFLAG_TBI1_MASK (0x1ull << ARM_TBFLAG_TBI1_SHIFT)
+#define ARM_TBFLAG_SVEEXC_EL_SHIFT 2
+#define ARM_TBFLAG_SVEEXC_EL_MASK (0x3 << ARM_TBFLAG_SVEEXC_EL_SHIFT)
+#define ARM_TBFLAG_ZCR_LEN_SHIFT 4
+#define ARM_TBFLAG_ZCR_LEN_MASK (0xf << ARM_TBFLAG_ZCR_LEN_SHIFT)
/* some convenience accessor macros */
#define ARM_TBFLAG_AARCH64_STATE(F) \
@@ -2646,6 +2718,10 @@ static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
(((F) & ARM_TBFLAG_TBI0_MASK) >> ARM_TBFLAG_TBI0_SHIFT)
#define ARM_TBFLAG_TBI1(F) \
(((F) & ARM_TBFLAG_TBI1_MASK) >> ARM_TBFLAG_TBI1_SHIFT)
+#define ARM_TBFLAG_SVEEXC_EL(F) \
+ (((F) & ARM_TBFLAG_SVEEXC_EL_MASK) >> ARM_TBFLAG_SVEEXC_EL_SHIFT)
+#define ARM_TBFLAG_ZCR_LEN(F) \
+ (((F) & ARM_TBFLAG_ZCR_LEN_MASK) >> ARM_TBFLAG_ZCR_LEN_SHIFT)
static inline bool bswap_code(bool sctlr_b)
{
@@ -2769,7 +2845,7 @@ static inline void *arm_get_el_change_hook_opaque(ARMCPU *cpu)
*/
static inline uint64_t *aa32_vfp_dreg(CPUARMState *env, unsigned regno)
{
- return &env->vfp.regs[regno];
+ return &env->vfp.zregs[regno >> 1].d[regno & 1];
}
/**
@@ -2778,7 +2854,7 @@ static inline uint64_t *aa32_vfp_dreg(CPUARMState *env, unsigned regno)
*/
static inline uint64_t *aa32_vfp_qreg(CPUARMState *env, unsigned regno)
{
- return &env->vfp.regs[2 * regno];
+ return &env->vfp.zregs[regno].d[0];
}
/**
@@ -2787,7 +2863,7 @@ static inline uint64_t *aa32_vfp_qreg(CPUARMState *env, unsigned regno)
*/
static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno)
{
- return &env->vfp.regs[2 * regno];
+ return &env->vfp.zregs[regno].d[0];
}
#endif