diff options
Diffstat (limited to 'target/arm/kvm64.c')
-rw-r--r-- | target/arm/kvm64.c | 100 |
1 files changed, 99 insertions, 1 deletions
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c index c7ecefbed7..c93bbee425 100644 --- a/target/arm/kvm64.c +++ b/target/arm/kvm64.c @@ -613,6 +613,100 @@ bool kvm_arm_sve_supported(CPUState *cpu) return kvm_check_extension(s, KVM_CAP_ARM_SVE); } +QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1); + +void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map) +{ + /* Only call this function if kvm_arm_sve_supported() returns true. */ + static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS]; + static bool probed; + uint32_t vq = 0; + int i, j; + + bitmap_clear(map, 0, ARM_MAX_VQ); + + /* + * KVM ensures all host CPUs support the same set of vector lengths. + * So we only need to create the scratch VCPUs once and then cache + * the results. + */ + if (!probed) { + struct kvm_vcpu_init init = { + .target = -1, + .features[0] = (1 << KVM_ARM_VCPU_SVE), + }; + struct kvm_one_reg reg = { + .id = KVM_REG_ARM64_SVE_VLS, + .addr = (uint64_t)&vls[0], + }; + int fdarray[3], ret; + + probed = true; + + if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) { + error_report("failed to create scratch VCPU with SVE enabled"); + abort(); + } + ret = ioctl(fdarray[2], KVM_GET_ONE_REG, ®); + kvm_arm_destroy_scratch_host_vcpu(fdarray); + if (ret) { + error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s", + strerror(errno)); + abort(); + } + + for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) { + if (vls[i]) { + vq = 64 - clz64(vls[i]) + i * 64; + break; + } + } + if (vq > ARM_MAX_VQ) { + warn_report("KVM supports vector lengths larger than " + "QEMU can enable"); + } + } + + for (i = 0; i < KVM_ARM64_SVE_VLS_WORDS; ++i) { + if (!vls[i]) { + continue; + } + for (j = 1; j <= 64; ++j) { + vq = j + i * 64; + if (vq > ARM_MAX_VQ) { + return; + } + if (vls[i] & (1UL << (j - 1))) { + set_bit(vq - 1, map); + } + } + } +} + +static int kvm_arm_sve_set_vls(CPUState *cs) +{ + uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = {0}; + struct kvm_one_reg reg = { + .id = KVM_REG_ARM64_SVE_VLS, + .addr = (uint64_t)&vls[0], + }; + ARMCPU *cpu = ARM_CPU(cs); + uint32_t vq; + int i, j; + + assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX); + + for (vq = 1; vq <= cpu->sve_max_vq; ++vq) { + if (test_bit(vq - 1, cpu->sve_vq_map)) { + i = (vq - 1) / 64; + j = (vq - 1) % 64; + vls[i] |= 1UL << j; + } + } + + return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); +} + #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5 int kvm_arch_init_vcpu(CPUState *cs) @@ -624,7 +718,7 @@ int kvm_arch_init_vcpu(CPUState *cs) if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE || !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) { - fprintf(stderr, "KVM is not supported for this guest CPU type\n"); + error_report("KVM is not supported for this guest CPU type"); return -EINVAL; } @@ -660,6 +754,10 @@ int kvm_arch_init_vcpu(CPUState *cs) } if (cpu_isar_feature(aa64_sve, cpu)) { + ret = kvm_arm_sve_set_vls(cs); + if (ret) { + return ret; + } ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_SVE); if (ret) { return ret; |