lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 7 Jun 2012 18:45:55 +0200
From:	Hans Rosenfeld <hans.rosenfeld@....com>
To:	<hpa@...or.com>
CC:	<tglx@...utronix.de>, <mingo@...e.hu>,
	<linux-kernel@...r.kernel.org>, <x86@...nel.org>,
	Hans Rosenfeld <hans.rosenfeld@....com>
Subject: [PATCH 1/5] x86, cacheinfo, amd: use cpuid topology extensions to enumerate caches

AMD CPU family 0x15 introduced a new CPUID leaf for topology extensions,
which can be used to enumerate the systems caches. Although a different
CPUID leaf number is used, it is essentially compatible with Intels
CPUID leaf 4. This means the existing code parsing CPUID leaf 4 can be
used here. The old code emulating Intels CPUID leaf 4 for AMD systems will
still be used on AMD CPUs that don't support the topology extensions
leaf.

There are two differences to the information in Intels CPUID leaf 4:
- the "number of cores on the die" field is missing
- AMD returns extra information in the EDX register about WBINVD
  behaviour and cache inclusiveness
Both are currently unused in Linux.

The shared_cpu_map can now be set up using the extended topology
information on systems that support it, the old code for family 0x15
cache indices 1 and 2 is gone. The old code for cache index 3 stays for
CPUs not supporting the topology extensions.

Signed-off-by: Hans Rosenfeld <hans.rosenfeld@....com>
---
 arch/x86/include/asm/cpufeature.h     |    1 +
 arch/x86/kernel/cpu/intel_cacheinfo.c |   47 +++++++++++++++++++++++----------
 2 files changed, 34 insertions(+), 14 deletions(-)

diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 340ee49..080bfc7 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -303,6 +303,7 @@ extern const char * const x86_power_flags[32];
 #define cpu_has_perfctr_core	boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
 #define cpu_has_cx8		boot_cpu_has(X86_FEATURE_CX8)
 #define cpu_has_cx16		boot_cpu_has(X86_FEATURE_CX16)
+#define cpu_has_topoext	boot_cpu_has(X86_FEATURE_TOPOEXT)
 
 #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
 # define cpu_has_invlpg		1
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 9a7c90d..0973ca1 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -538,7 +538,11 @@ __cpuinit cpuid4_cache_lookup_regs(int index,
 	unsigned		edx;
 
 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
-		amd_cpuid4(index, &eax, &ebx, &ecx);
+		if (cpu_has_topoext)
+			cpuid_count(0x8000001d, index, &eax.full, &ebx.full,
+				    &ecx.full, &edx);
+		else
+			amd_cpuid4(index, &eax, &ebx, &ecx);
 		amd_init_l3_cache(this_leaf, index);
 	} else {
 		cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
@@ -728,37 +732,52 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
 static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
 {
 	struct _cpuid4_info *this_leaf;
-	int ret, i, sibling;
-	struct cpuinfo_x86 *c = &cpu_data(cpu);
+	int i, sibling;
+
+	if (cpu_has_topoext) {
+		unsigned int apicid = cpu_data(cpu).apicid;
+		int nshared, first;
+
+		if (!per_cpu(ici_cpuid4_info, cpu))
+			return 0;
+
+		this_leaf = CPUID4_INFO_IDX(cpu, index);
+		nshared = 1 + this_leaf->base.eax.split.num_threads_sharing;
+		first = apicid - apicid % nshared;
+
+		for_each_online_cpu(i) {
+			if (cpu_data(i).apicid < first ||
+			    cpu_data(i).apicid >= first + nshared)
+				continue;
 
-	ret = 0;
-	if (index == 3) {
-		ret = 1;
-		for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
 			if (!per_cpu(ici_cpuid4_info, i))
 				continue;
+
 			this_leaf = CPUID4_INFO_IDX(i, index);
-			for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
-				if (!cpu_online(sibling))
+			for_each_online_cpu(sibling) {
+				if (cpu_data(sibling).apicid < first ||
+				    cpu_data(sibling).apicid >= first + nshared)
 					continue;
+
 				set_bit(sibling, this_leaf->shared_cpu_map);
 			}
 		}
-	} else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) {
-		ret = 1;
-		for_each_cpu(i, cpu_sibling_mask(cpu)) {
+	} else if (index == 3) {
+		for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
 			if (!per_cpu(ici_cpuid4_info, i))
 				continue;
 			this_leaf = CPUID4_INFO_IDX(i, index);
-			for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
+			for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
 				if (!cpu_online(sibling))
 					continue;
 				set_bit(sibling, this_leaf->shared_cpu_map);
 			}
 		}
+	} else {
+		return 0;
 	}
 
-	return ret;
+	return 1;
 }
 
 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
-- 
1.7.7


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ