aboutsummaryrefslogtreecommitdiff
path: root/target/arm/cpu64.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/cpu64.c')
-rw-r--r--target/arm/cpu64.c117
1 files changed, 56 insertions, 61 deletions
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index cce68dd82a..15665c962b 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -355,8 +355,11 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
* any of the above. Finally, if SVE is not disabled, then at least one
* vector length must be enabled.
*/
- DECLARE_BITMAP(tmp, ARM_MAX_VQ);
- uint32_t vq, max_vq = 0;
+ uint32_t vq_map = cpu->sve_vq_map;
+ uint32_t vq_init = cpu->sve_vq_init;
+ uint32_t vq_supported;
+ uint32_t vq_mask = 0;
+ uint32_t tmp, vq, max_vq = 0;
/*
* CPU models specify a set of supported vector lengths which are
@@ -364,10 +367,16 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
* in the supported bitmap results in an error. When KVM is enabled we
* fetch the supported bitmap from the host.
*/
- if (kvm_enabled() && kvm_arm_sve_supported()) {
- kvm_arm_sve_get_vls(CPU(cpu), cpu->sve_vq_supported);
- } else if (kvm_enabled()) {
- assert(!cpu_isar_feature(aa64_sve, cpu));
+ if (kvm_enabled()) {
+ if (kvm_arm_sve_supported()) {
+ cpu->sve_vq_supported = kvm_arm_sve_get_vls(CPU(cpu));
+ vq_supported = cpu->sve_vq_supported;
+ } else {
+ assert(!cpu_isar_feature(aa64_sve, cpu));
+ vq_supported = 0;
+ }
+ } else {
+ vq_supported = cpu->sve_vq_supported;
}
/*
@@ -375,8 +384,9 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
* From the properties, sve_vq_map<N> implies sve_vq_init<N>.
* Check first for any sve<N> enabled.
*/
- if (!bitmap_empty(cpu->sve_vq_map, ARM_MAX_VQ)) {
- max_vq = find_last_bit(cpu->sve_vq_map, ARM_MAX_VQ) + 1;
+ if (vq_map != 0) {
+ max_vq = 32 - clz32(vq_map);
+ vq_mask = MAKE_64BIT_MASK(0, max_vq);
if (cpu->sve_max_vq && max_vq > cpu->sve_max_vq) {
error_setg(errp, "cannot enable sve%d", max_vq * 128);
@@ -392,15 +402,10 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
* For KVM we have to automatically enable all supported unitialized
* lengths, even when the smaller lengths are not all powers-of-two.
*/
- bitmap_andnot(tmp, cpu->sve_vq_supported, cpu->sve_vq_init, max_vq);
- bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
+ vq_map |= vq_supported & ~vq_init & vq_mask;
} else {
/* Propagate enabled bits down through required powers-of-two. */
- for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
- if (!test_bit(vq - 1, cpu->sve_vq_init)) {
- set_bit(vq - 1, cpu->sve_vq_map);
- }
- }
+ vq_map |= SVE_VQ_POW2_MAP & ~vq_init & vq_mask;
}
} else if (cpu->sve_max_vq == 0) {
/*
@@ -413,25 +418,18 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
if (kvm_enabled()) {
/* Disabling a supported length disables all larger lengths. */
- for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
- if (test_bit(vq - 1, cpu->sve_vq_init) &&
- test_bit(vq - 1, cpu->sve_vq_supported)) {
- break;
- }
- }
+ tmp = vq_init & vq_supported;
} else {
/* Disabling a power-of-two disables all larger lengths. */
- for (vq = 1; vq <= ARM_MAX_VQ; vq <<= 1) {
- if (test_bit(vq - 1, cpu->sve_vq_init)) {
- break;
- }
- }
+ tmp = vq_init & SVE_VQ_POW2_MAP;
}
+ vq = ctz32(tmp) + 1;
max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
- bitmap_andnot(cpu->sve_vq_map, cpu->sve_vq_supported,
- cpu->sve_vq_init, max_vq);
- if (max_vq == 0 || bitmap_empty(cpu->sve_vq_map, max_vq)) {
+ vq_mask = MAKE_64BIT_MASK(0, max_vq);
+ vq_map = vq_supported & ~vq_init & vq_mask;
+
+ if (max_vq == 0 || vq_map == 0) {
error_setg(errp, "cannot disable sve%d", vq * 128);
error_append_hint(errp, "Disabling sve%d results in all "
"vector lengths being disabled.\n",
@@ -441,7 +439,8 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
return;
}
- max_vq = find_last_bit(cpu->sve_vq_map, max_vq) + 1;
+ max_vq = 32 - clz32(vq_map);
+ vq_mask = MAKE_64BIT_MASK(0, max_vq);
}
/*
@@ -451,9 +450,9 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
*/
if (cpu->sve_max_vq != 0) {
max_vq = cpu->sve_max_vq;
+ vq_mask = MAKE_64BIT_MASK(0, max_vq);
- if (!test_bit(max_vq - 1, cpu->sve_vq_map) &&
- test_bit(max_vq - 1, cpu->sve_vq_init)) {
+ if (vq_init & ~vq_map & (1 << (max_vq - 1))) {
error_setg(errp, "cannot disable sve%d", max_vq * 128);
error_append_hint(errp, "The maximum vector length must be "
"enabled, sve-max-vq=%d (%d bits)\n",
@@ -462,8 +461,7 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
}
/* Set all bits not explicitly set within sve-max-vq. */
- bitmap_complement(tmp, cpu->sve_vq_init, max_vq);
- bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
+ vq_map |= ~vq_init & vq_mask;
}
/*
@@ -472,13 +470,14 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
* are clear, just in case anybody looks.
*/
assert(max_vq != 0);
- bitmap_clear(cpu->sve_vq_map, max_vq, ARM_MAX_VQ - max_vq);
+ assert(vq_mask != 0);
+ vq_map &= vq_mask;
/* Ensure the set of lengths matches what is supported. */
- bitmap_xor(tmp, cpu->sve_vq_map, cpu->sve_vq_supported, max_vq);
- if (!bitmap_empty(tmp, max_vq)) {
- vq = find_last_bit(tmp, max_vq) + 1;
- if (test_bit(vq - 1, cpu->sve_vq_map)) {
+ tmp = vq_map ^ (vq_supported & vq_mask);
+ if (tmp) {
+ vq = 32 - clz32(tmp);
+ if (vq_map & (1 << (vq - 1))) {
if (cpu->sve_max_vq) {
error_setg(errp, "cannot set sve-max-vq=%d", cpu->sve_max_vq);
error_append_hint(errp, "This CPU does not support "
@@ -502,15 +501,15 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
return;
} else {
/* Ensure all required powers-of-two are enabled. */
- for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
- if (!test_bit(vq - 1, cpu->sve_vq_map)) {
- error_setg(errp, "cannot disable sve%d", vq * 128);
- error_append_hint(errp, "sve%d is required as it "
- "is a power-of-two length smaller "
- "than the maximum, sve%d\n",
- vq * 128, max_vq * 128);
- return;
- }
+ tmp = SVE_VQ_POW2_MAP & vq_mask & ~vq_map;
+ if (tmp) {
+ vq = 32 - clz32(tmp);
+ error_setg(errp, "cannot disable sve%d", vq * 128);
+ error_append_hint(errp, "sve%d is required as it "
+ "is a power-of-two length smaller "
+ "than the maximum, sve%d\n",
+ vq * 128, max_vq * 128);
+ return;
}
}
}
@@ -530,6 +529,7 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
/* From now on sve_max_vq is the actual maximum supported length. */
cpu->sve_max_vq = max_vq;
+ cpu->sve_vq_map = vq_map;
}
static void cpu_max_get_sve_max_vq(Object *obj, Visitor *v, const char *name,
@@ -590,7 +590,7 @@ static void cpu_arm_get_sve_vq(Object *obj, Visitor *v, const char *name,
if (!cpu_isar_feature(aa64_sve, cpu)) {
value = false;
} else {
- value = test_bit(vq - 1, cpu->sve_vq_map);
+ value = extract32(cpu->sve_vq_map, vq - 1, 1);
}
visit_type_bool(v, name, &value, errp);
}
@@ -612,12 +612,8 @@ static void cpu_arm_set_sve_vq(Object *obj, Visitor *v, const char *name,
return;
}
- if (value) {
- set_bit(vq - 1, cpu->sve_vq_map);
- } else {
- clear_bit(vq - 1, cpu->sve_vq_map);
- }
- set_bit(vq - 1, cpu->sve_vq_init);
+ cpu->sve_vq_map = deposit32(cpu->sve_vq_map, vq - 1, 1, value);
+ cpu->sve_vq_init |= 1 << (vq - 1);
}
static bool cpu_arm_get_sve(Object *obj, Error **errp)
@@ -979,7 +975,7 @@ static void aarch64_max_initfn(Object *obj)
cpu->dcz_blocksize = 7; /* 512 bytes */
#endif
- bitmap_fill(cpu->sve_vq_supported, ARM_MAX_VQ);
+ cpu->sve_vq_supported = MAKE_64BIT_MASK(0, ARM_MAX_VQ);
aarch64_add_pauth_properties(obj);
aarch64_add_sve_properties(obj);
@@ -1026,12 +1022,11 @@ static void aarch64_a64fx_initfn(Object *obj)
cpu->gic_vprebits = 5;
cpu->gic_pribits = 5;
- /* Suppport of A64FX's vector length are 128,256 and 512bit only */
+ /* The A64FX supports only 128, 256 and 512 bit vector lengths */
aarch64_add_sve_properties(obj);
- bitmap_zero(cpu->sve_vq_supported, ARM_MAX_VQ);
- set_bit(0, cpu->sve_vq_supported); /* 128bit */
- set_bit(1, cpu->sve_vq_supported); /* 256bit */
- set_bit(3, cpu->sve_vq_supported); /* 512bit */
+ cpu->sve_vq_supported = (1 << 0) /* 128bit */
+ | (1 << 1) /* 256bit */
+ | (1 << 3); /* 512bit */
cpu->isar.reset_pmcr_el0 = 0x46014040;