diff options
author | Zhao Liu <zhao1.liu@intel.com> | 2024-04-24 23:49:28 +0800 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2024-05-22 19:56:27 +0200 |
commit | f602eb925ac5d51d09de6c4b32ba8a5142055492 (patch) | |
tree | df78330e7c9e73693112a7b3eb5860bd24e2fd47 /target/i386/cpu.c | |
parent | 9fcba76ab9c264d06394696c304f2462d9296918 (diff) |
i386/cpu: Use CPUCacheInfo.share_level to encode CPUID[4]
CPUID[4].EAX[bits 25:14] is used to represent the cache topology for
Intel CPUs.
After cache models have topology information, we can use
CPUCacheInfo.share_level to decide which topology level to be encoded
into CPUID[4].EAX[bits 25:14].
And since with the helper max_processor_ids_for_cache(), the filed
CPUID[4].EAX[bits 25:14] (original virable "num_apic_ids") is parsed
based on cpu topology levels, which are verified when parsing -smp, it's
no need to check this value by "assert(num_apic_ids > 0)" again, so
remove this assert().
Additionally, wrap the encoding of CPUID[4].EAX[bits 31:26] into a
helper to make the code cleaner.
Tested-by: Yongwei Ma <yongwei.ma@intel.com>
Signed-off-by: Zhao Liu <zhao1.liu@intel.com>
Tested-by: Babu Moger <babu.moger@amd.com>
Message-ID: <20240424154929.1487382-21-zhao1.liu@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'target/i386/cpu.c')
-rw-r--r-- | target/i386/cpu.c | 94 |
1 files changed, 51 insertions, 43 deletions
diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 656b65ad33..f91e150026 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -235,22 +235,53 @@ static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ 0 /* Invalid value */) +static uint32_t max_thread_ids_for_cache(X86CPUTopoInfo *topo_info, + enum CPUTopoLevel share_level) +{ + uint32_t num_ids = 0; + + switch (share_level) { + case CPU_TOPO_LEVEL_CORE: + num_ids = 1 << apicid_core_offset(topo_info); + break; + case CPU_TOPO_LEVEL_DIE: + num_ids = 1 << apicid_die_offset(topo_info); + break; + case CPU_TOPO_LEVEL_PACKAGE: + num_ids = 1 << apicid_pkg_offset(topo_info); + break; + default: + /* + * Currently there is no use case for SMT and MODULE, so use + * assert directly to facilitate debugging. + */ + g_assert_not_reached(); + } + + return num_ids - 1; +} + +static uint32_t max_core_ids_in_package(X86CPUTopoInfo *topo_info) +{ + uint32_t num_cores = 1 << (apicid_pkg_offset(topo_info) - + apicid_core_offset(topo_info)); + return num_cores - 1; +} /* Encode cache info for CPUID[4] */ static void encode_cache_cpuid4(CPUCacheInfo *cache, - int num_apic_ids, int num_cores, + X86CPUTopoInfo *topo_info, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) { assert(cache->size == cache->line_size * cache->associativity * cache->partitions * cache->sets); - assert(num_apic_ids > 0); *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | - ((num_cores - 1) << 26) | - ((num_apic_ids - 1) << 14); + (max_core_ids_in_package(topo_info) << 26) | + (max_thread_ids_for_cache(topo_info, cache->share_level) << 14); assert(cache->line_size > 0); assert(cache->partitions > 0); @@ -6392,18 +6423,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); break; - case 4: { - /* - * CPUID.04H:EAX[bits 25:14]: Maximum number of addressable IDs for - * logical processors sharing this cache. - */ - int addressable_threads_width; - /* - * CPUID.04H:EAX[bits 31:26]: Maximum number of addressable IDs for - * processor cores in the physical package. - */ - int addressable_cores_width; - + case 4: /* cache info: needed for Core compatibility */ if (cpu->cache_info_passthrough) { x86_cpu_get_cache_cpuid(index, count, eax, ebx, ecx, edx); @@ -6415,59 +6435,48 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, int host_vcpus_per_cache = 1 + ((*eax & 0x3FFC000) >> 14); if (cores_per_pkg > 1) { - addressable_cores_width = apicid_pkg_offset(&topo_info) - - apicid_core_offset(&topo_info); - *eax &= ~0xFC000000; - *eax |= ((1 << addressable_cores_width) - 1) << 26; + *eax |= max_core_ids_in_package(&topo_info) << 26; } if (host_vcpus_per_cache > threads_per_pkg) { - /* Share the cache at package level. */ - addressable_threads_width = apicid_pkg_offset(&topo_info); - *eax &= ~0x3FFC000; - *eax |= ((1 << addressable_threads_width) - 1) << 14; + + /* Share the cache at package level. */ + *eax |= max_thread_ids_for_cache(&topo_info, + CPU_TOPO_LEVEL_PACKAGE) << 14; } } } else if (cpu->vendor_cpuid_only && IS_AMD_CPU(env)) { *eax = *ebx = *ecx = *edx = 0; } else { *eax = 0; - addressable_cores_width = apicid_pkg_offset(&topo_info) - - apicid_core_offset(&topo_info); switch (count) { case 0: /* L1 dcache info */ - addressable_threads_width = cpu->l1_cache_per_core - ? apicid_core_offset(&topo_info) - : 0; encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, - (1 << addressable_threads_width), - (1 << addressable_cores_width), + &topo_info, eax, ebx, ecx, edx); + if (!cpu->l1_cache_per_core) { + *eax &= ~MAKE_64BIT_MASK(14, 12); + } break; case 1: /* L1 icache info */ - addressable_threads_width = cpu->l1_cache_per_core - ? apicid_core_offset(&topo_info) - : 0; encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, - (1 << addressable_threads_width), - (1 << addressable_cores_width), + &topo_info, eax, ebx, ecx, edx); + if (!cpu->l1_cache_per_core) { + *eax &= ~MAKE_64BIT_MASK(14, 12); + } break; case 2: /* L2 cache info */ - addressable_threads_width = apicid_core_offset(&topo_info); encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, - (1 << addressable_threads_width), - (1 << addressable_cores_width), + &topo_info, eax, ebx, ecx, edx); break; case 3: /* L3 cache info */ if (cpu->enable_l3_cache) { - addressable_threads_width = apicid_die_offset(&topo_info); encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, - (1 << addressable_threads_width), - (1 << addressable_cores_width), + &topo_info, eax, ebx, ecx, edx); break; } @@ -6478,7 +6487,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } } break; - } case 5: /* MONITOR/MWAIT Leaf */ *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ |