[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250905121515.192792-28-darwi@linutronix.de>
Date: Fri, 5 Sep 2025 14:15:07 +0200
From: "Ahmed S. Darwish" <darwi@...utronix.de>
To: Borislav Petkov <bp@...en8.de>,
Ingo Molnar <mingo@...hat.com>,
Dave Hansen <dave.hansen@...ux.intel.com>
Cc: Thomas Gleixner <tglx@...utronix.de>,
Andrew Cooper <andrew.cooper3@...rix.com>,
Sean Christopherson <seanjc@...gle.com>,
David Woodhouse <dwmw2@...radead.org>,
"H. Peter Anvin" <hpa@...or.com>,
Peter Zijlstra <peterz@...radead.org>,
Sohil Mehta <sohil.mehta@...el.com>,
John Ogness <john.ogness@...utronix.de>,
x86@...nel.org,
x86-cpuid@...ts.linux.dev,
LKML <linux-kernel@...r.kernel.org>,
"Ahmed S. Darwish" <darwi@...utronix.de>
Subject: [PATCH v5 27/35] x86/cacheinfo: Use parsed CPUID(0x8000001d)
At the AMD cacheinfo code, use parsed CPUID(0x8000001d) access instead of
issuing direct CPUID queries.
Beside the CPUID parser centralization benefits, this allows using the
auto-generated <asm/cpuid/leaf_types.h> data types, and their full C99
bitfields, instead of doing ugly bitwise operations on CPUID register
output.
Since parsed CPUID access requires a 'struct cpuinfo_x86' reference,
trickle it down to relevant functions.
Use the parsed CPUID API:
cpuid_subleaf_count(c, 0x8000001d)
to find the number of cache leaves, replacing amd_find_num_cache_leaves()
and its direct CPUID queries. Drop that function completely as it is no
longer needed.
For now, keep using the 'union _cpuid4_leaf_eax/ebx/ecx' structures as
they are required by the AMD CPUID(0x4) emulation code paths. A follow
up commit will replace them with their auto-generated equivalents.
Signed-off-by: Ahmed S. Darwish <darwi@...utronix.de>
---
arch/x86/kernel/cpu/cacheinfo.c | 41 +++++++++++++--------------------
1 file changed, 16 insertions(+), 25 deletions(-)
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index 76fa3a01a34b..3e1ccab56e4c 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -237,16 +237,22 @@ static int cpuid4_info_fill_done(struct _cpuid4_info *id4, union _cpuid4_leaf_ea
return 0;
}
-static int amd_fill_cpuid4_info(int index, struct _cpuid4_info *id4)
+static int amd_fill_cpuid4_info(struct cpuinfo_x86 *c, int index, struct _cpuid4_info *id4)
{
union _cpuid4_leaf_eax eax;
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
- u32 ignored;
- if (boot_cpu_has(X86_FEATURE_TOPOEXT) || boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
- cpuid_count(0x8000001d, index, &eax.full, &ebx.full, &ecx.full, &ignored);
- else
+ if (boot_cpu_has(X86_FEATURE_TOPOEXT) || boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
+ const struct cpuid_regs *regs = cpuid_subleaf_n_raw(c, 0x8000001d, index);
+
+ if (!regs)
+ return -EIO;
+
+ eax.full = regs->eax;
+ ebx.full = regs->ebx;
+ ecx.full = regs->ecx;
+ } else
legacy_amd_cpuid4(index, &eax, &ebx, &ecx);
return cpuid4_info_fill_done(id4, eax, ebx, ecx);
@@ -270,25 +276,10 @@ static int fill_cpuid4_info(struct cpuinfo_x86 *c, int index, struct _cpuid4_inf
u8 cpu_vendor = boot_cpu_data.x86_vendor;
return (cpu_vendor == X86_VENDOR_AMD || cpu_vendor == X86_VENDOR_HYGON) ?
- amd_fill_cpuid4_info(index, id4) :
+ amd_fill_cpuid4_info(c, index, id4) :
intel_fill_cpuid4_info(c, index, id4);
}
-static int amd_find_num_cache_leaves(struct cpuinfo_x86 *c)
-{
- union _cpuid4_leaf_eax cache_eax;
- unsigned int eax, ebx, ecx, edx;
- int i = -1;
-
- /* Do a CPUID(0x8000001d) loop to calculate num_cache_leaves */
- do {
- ++i;
- cpuid_count(0x8000001d, i, &eax, &ebx, &ecx, &edx);
- cache_eax.full = eax;
- } while (cache_eax.split.type != CTYPE_NULL);
- return i;
-}
-
/*
* The max shared threads number comes from CPUID(0x4) EAX[25-14] with input
* ECX as cache index. Then right shift apicid by the number's order to get
@@ -328,10 +319,10 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, u16 die_id)
* Newer families: LLC ID is calculated from the number
* of threads sharing the L3 cache.
*/
- u32 llc_index = amd_find_num_cache_leaves(c) - 1;
+ u32 llc_index = cpuid_subleaf_count(c, 0x8000001d) - 1;
struct _cpuid4_info id4 = {};
- if (!amd_fill_cpuid4_info(llc_index, &id4))
+ if (!amd_fill_cpuid4_info(c, llc_index, &id4))
c->topo.llc_id = get_cache_id(c->topo.apicid, &id4);
}
}
@@ -353,7 +344,7 @@ void init_amd_cacheinfo(struct cpuinfo_x86 *c)
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
if (boot_cpu_has(X86_FEATURE_TOPOEXT))
- ci->num_leaves = amd_find_num_cache_leaves(c);
+ ci->num_leaves = cpuid_subleaf_count(c, 0x8000001d);
else if (c->extended_cpuid_level >= 0x80000006)
ci->num_leaves = (cpuid_edx(0x80000006) & 0xf000) ? 4 : 3;
}
@@ -362,7 +353,7 @@ void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
{
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
- ci->num_leaves = amd_find_num_cache_leaves(c);
+ ci->num_leaves = cpuid_subleaf_count(c, 0x8000001d);
}
static void intel_cacheinfo_done(struct cpuinfo_x86 *c, unsigned int l3,
--
2.50.1
Powered by blists - more mailing lists