[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250304085152.51092-15-darwi@linutronix.de>
Date: Tue, 4 Mar 2025 09:51:25 +0100
From: "Ahmed S. Darwish" <darwi@...utronix.de>
To: Borislav Petkov <bp@...en8.de>,
Ingo Molnar <mingo@...hat.com>,
Dave Hansen <dave.hansen@...ux.intel.com>
Cc: Thomas Gleixner <tglx@...utronix.de>,
John Ogness <john.ogness@...utronix.de>,
"H. Peter Anvin" <hpa@...or.com>,
Andrew Cooper <andrew.cooper3@...rix.com>,
x86@...nel.org,
x86-cpuid@...ts.linux.dev,
LKML <linux-kernel@...r.kernel.org>,
"Ahmed S. Darwish" <darwi@...utronix.de>
Subject: [PATCH v1 14/40] x86/cacheinfo: Refactor leaf 0x2 cache descriptor lookup
From: Thomas Gleixner <tglx@...utronix.de>
Extract the cache descriptor lookup logic out of the leaf 0x2 parsing
code and into a dedicated function. This disentangles such lookup from
the deeply nested leaf 0x2 parsing loop.
Remove the cache table termination entry, as it is no longer needed
after the ARRAY_SIZE()-based lookup.
[darwi: Move refactoring logic into this separate commit + commit log.
Remove the cache table termination entry.]
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Signed-off-by: Ahmed S. Darwish <darwi@...utronix.de>
---
arch/x86/kernel/cpu/cacheinfo.c | 45 +++++++++++++++------------------
1 file changed, 20 insertions(+), 25 deletions(-)
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index ebd72016e7a2..3be7ea8444ec 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -123,7 +123,6 @@ static const struct _cache_table cache_table[] =
{ 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
{ 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
{ 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
- { 0x00, 0, 0}
};
@@ -728,6 +727,16 @@ void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
ci->num_leaves = find_num_cache_leaves(c);
}
+static const struct _cache_table *cache_table_get(u8 desc)
+{
+ for (int i = 0; i < ARRAY_SIZE(cache_table); i++) {
+ if (cache_table[i].descriptor == desc)
+ return &cache_table[i];
+ }
+
+ return NULL;
+}
+
void init_intel_cacheinfo(struct cpuinfo_x86 *c)
{
/* Cache sizes */
@@ -784,35 +793,21 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
/* Don't use CPUID(2) if CPUID(4) is supported. */
if (!ci->num_leaves && c->cpuid_level > 1) {
+ const struct _cache_table *entry;
union leaf_0x2_regs regs;
u8 *desc;
get_leaf_0x2_regs(®s);
for_each_leaf_0x2_desc(regs, desc) {
- unsigned char k = 0;
-
- /* look up this descriptor in the table */
- while (cache_table[k].descriptor != 0) {
- if (cache_table[k].descriptor == *desc) {
- switch (cache_table[k].cache_type) {
- case LVL_1_INST:
- l1i += cache_table[k].size;
- break;
- case LVL_1_DATA:
- l1d += cache_table[k].size;
- break;
- case LVL_2:
- l2 += cache_table[k].size;
- break;
- case LVL_3:
- l3 += cache_table[k].size;
- break;
- }
-
- break;
- }
-
- k++;
+ entry = cache_table_get(*desc);
+ if (!entry)
+ continue;
+
+ switch (entry->cache_type) {
+ case LVL_1_INST: l1i += entry->size; break;
+ case LVL_1_DATA: l1d += entry->size; break;
+ case LVL_2: l2 += entry->size; break;
+ case LVL_3: l3 += entry->size; break;
}
}
}
--
2.48.1
Powered by blists - more mailing lists