lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Thu, 22 Feb 2024 10:39:44 -0800
From: Dave Hansen <dave.hansen@...ux.intel.com>
To: linux-kernel@...r.kernel.org
Cc: kirill.shutemov@...ux.intel.com,pbonzini@...hat.com,tglx@...utronix.de,x86@...nel.org,bp@...en8.de,Dave Hansen <dave.hansen@...ux.intel.com>
Subject: [RFC][PATCH 13/34] x86/boot: Use address reduction config to handle erratum


From: Dave Hansen <dave.hansen@...ux.intel.com>

Continue to chip away at sites that muck with ->x86_phys_bits.  This
one is an oldie but a goodie:

	af9c142de94e ("[PATCH] x86_64: Force correct address space size for MTRR on some 64bit Intel Xeons")

Evidently, the CPUs in question "report 40bit, but only have 36bits
of physical address space."  Since there's now a handy way to reduce
the amount of physical address bits, use that to reduce from 40->36.

This means there are now two (Intel) users of the address bits
reduction feature.  There is no way a 2005-era CPU will ever have TME,
but it is still nice to be more explicit about the possibility of a
collision.

Signed-off-by: Dave Hansen <dave.hansen@...ux.intel.com>
---

 b/arch/x86/kernel/cpu/intel.c |   27 +++++++++++++++++----------
 1 file changed, 17 insertions(+), 10 deletions(-)

diff -puN arch/x86/kernel/cpu/intel.c~intel-phys-addr-errata arch/x86/kernel/cpu/intel.c
--- a/arch/x86/kernel/cpu/intel.c~intel-phys-addr-errata	2024-02-22 10:08:54.772701150 -0800
+++ b/arch/x86/kernel/cpu/intel.c	2024-02-22 10:08:54.776701306 -0800
@@ -242,11 +242,6 @@ static void early_init_intel(struct cpui
 		c->x86_cache_alignment = 128;
 #endif
 
-	/* CPUID workaround for 0F33/0F34 CPU */
-	if (c->x86 == 0xF && c->x86_model == 0x3
-	    && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
-		c->x86_phys_bits = 36;
-
 	/*
 	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
 	 * with P/T states and does not stop in deep C-states.
@@ -344,7 +339,7 @@ static void early_init_intel(struct cpui
 #define MKTME_UNINITIALIZED		2
 static int mktme_status = MKTME_UNINITIALIZED;
 
-static void detect_tme(struct cpuinfo_x86 *c)
+static int detect_tme(struct cpuinfo_x86 *c)
 {
 	u64 tme_activate, tme_policy, tme_crypto_algs;
 	int keyid_bits = 0, nr_keyids = 0;
@@ -368,7 +363,7 @@ static void detect_tme(struct cpuinfo_x8
 	if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
 		pr_info_once("x86/tme: not enabled by BIOS\n");
 		mktme_status = MKTME_DISABLED;
-		return;
+		return 0;
 	}
 
 	if (mktme_status != MKTME_UNINITIALIZED)
@@ -401,16 +396,28 @@ detect_keyid_bits:
 		mktme_status = MKTME_ENABLED;
 	}
 
-	/* KeyID bits effectively lower the number of physical address bits */
-	bsp_addr_config.phys_addr_reduction_bits = keyid_bits;
+	return keyid_bits;
 }
 
 static void bsp_init_intel(struct cpuinfo_x86 *c)
 {
+	int keyid_bits = 0;
+
 	resctrl_cpu_detect(c);
 
 	if (cpu_has(c, X86_FEATURE_TME))
-		detect_tme(c);
+		keyid_bits = detect_tme(c);
+
+	/* KeyID bits effectively lower the number of physical address bits */
+	bsp_addr_config.phys_addr_reduction_bits = keyid_bits;
+
+	/* CPUID workaround for 0F33/0F34 CPU */
+	if (c->x86 == 0xF && c->x86_model == 0x3
+	    && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4)) {
+		/* Warn if MKTME and this workaround collide: */
+		WARN_ON_ONCE(keyid_bits);
+		bsp_addr_config.phys_addr_reduction_bits = 4;
+	}
 }
 
 #ifdef CONFIG_X86_32
_

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ