lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Thu, 22 Feb 2024 10:40:02 -0800
From: Dave Hansen <dave.hansen@...ux.intel.com>
To: linux-kernel@...r.kernel.org
Cc: kirill.shutemov@...ux.intel.com,pbonzini@...hat.com,tglx@...utronix.de,x86@...nel.org,bp@...en8.de,Dave Hansen <dave.hansen@...ux.intel.com>
Subject: [RFC][PATCH 27/34] x86/cpu: Enforce read-only x86_config state (lightly)


From: Dave Hansen <dave.hansen@...ux.intel.com>

Part of the reason that this all turned into such a mess is that there
were no rules around when 'struct cpuinfo_x86' was written.  It was
(and is) just a free-for-all.

Establish that 'x86_config' has two phases of its lifetime: an
C_INITIALIZING phase where it can be written and a later C_FINALIZED
stage where it can only be read.  It is simple to audit the fact
that this state transition happens just where the comment says it
should be.

Check that the config is C_FINALIZED in each of the wrappers that
read a 'x86_config' value.  If something reads too early, stash
some information to the caller so that it can spit out a warning
later.

This goofy stash-then-warn construct is necessary here because any
hapless readers are likely to be in a spot where they can not easily
WARN() themselves, like the early Xen PV boot that's caused so many
problems.

This also moves one x86_clflush_size() reference over to the more
direct x86_config.cache_alignment because otherwise it would trip
the !C_FINALIZED check.

Signed-off-by: Dave Hansen <dave.hansen@...ux.intel.com>
---

 b/arch/x86/include/asm/processor.h |   22 ++++++++++++++++++++++
 b/arch/x86/kernel/cpu/common.c     |    5 ++++-
 b/arch/x86/kernel/setup.c          |    7 +++++++
 3 files changed, 33 insertions(+), 1 deletion(-)

diff -puN arch/x86/include/asm/processor.h~x86_config-finalize arch/x86/include/asm/processor.h
--- a/arch/x86/include/asm/processor.h~x86_config-finalize	2024-02-22 10:09:01.772975960 -0800
+++ b/arch/x86/include/asm/processor.h	2024-02-22 10:09:01.780976274 -0800
@@ -183,6 +183,11 @@ struct x86_addr_config {
 	u8 min_cache_bits;
 };
 
+enum x86_sys_config_state {
+	C_INITIALIZING,
+	C_FINALIZED
+};
+
 /*
  * System-wide configuration that is shared by all processors.
  *
@@ -190,6 +195,9 @@ struct x86_addr_config {
  * modified after that.
  */
 struct x86_sys_config {
+	enum x86_sys_config_state conf_state;
+	void *early_reader;
+
 	/* Address bits supported by all processors */
 	u8	phys_bits;
 	u8	virt_bits;
@@ -805,23 +813,37 @@ static inline void weak_wrmsr_fence(void
 	alternative("mfence; lfence", "", ALT_NOT(X86_FEATURE_APIC_MSRS_FENCE));
 }
 
+static inline void read_x86_config(void)
+{
+	if (x86_config.conf_state == C_FINALIZED)
+		return;
+
+	/* Only record the first one: */
+	if (!x86_config.early_reader)
+		x86_config.early_reader = __builtin_return_address(0);
+}
+
 static inline u8 x86_phys_bits(void)
 {
+	read_x86_config();
 	return x86_config.phys_bits;
 }
 
 static inline u8 x86_virt_bits(void)
 {
+	read_x86_config();
 	return x86_config.virt_bits;
 }
 
 static inline u8 x86_cache_bits(void)
 {
+	read_x86_config();
 	return x86_config.cache_bits;
 }
 
 static inline u8 x86_clflush_size(void)
 {
+	read_x86_config();
 	return x86_config.clflush_size;
 }
 
diff -puN arch/x86/kernel/cpu/common.c~x86_config-finalize arch/x86/kernel/cpu/common.c
--- a/arch/x86/kernel/cpu/common.c~x86_config-finalize	2024-02-22 10:09:01.776976117 -0800
+++ b/arch/x86/kernel/cpu/common.c	2024-02-22 10:09:01.780976274 -0800
@@ -1114,6 +1114,9 @@ void get_cpu_address_sizes(struct cpuinf
 	u32 eax, ebx, ecx, edx;
 	bool vp_bits_from_cpuid = true;
 
+	WARN_ON_ONCE(x86_config.conf_state > C_INITIALIZING);
+	x86_config.conf_state = C_INITIALIZING;
+
 	if (!cpu_has(c, X86_FEATURE_CPUID) ||
 	    (c->extended_cpuid_level < 0x80000008))
 		vp_bits_from_cpuid = false;
@@ -1142,7 +1145,7 @@ void get_cpu_address_sizes(struct cpuinf
 	if (x86_config.cache_bits < bsp_addr_config.min_cache_bits)
 		x86_config.cache_bits = bsp_addr_config.min_cache_bits;
 
-	x86_config.cache_alignment = x86_clflush_size();
+	x86_config.cache_alignment = x86_config.clflush_size;
 	if (bsp_addr_config.cache_align_mult)
 		x86_config.cache_alignment *= bsp_addr_config.cache_align_mult;
 
diff -puN arch/x86/kernel/setup.c~x86_config-finalize arch/x86/kernel/setup.c
--- a/arch/x86/kernel/setup.c~x86_config-finalize	2024-02-22 10:09:01.776976117 -0800
+++ b/arch/x86/kernel/setup.c	2024-02-22 10:09:01.780976274 -0800
@@ -762,7 +762,14 @@ void __init setup_arch(char **cmdline_p)
 	olpc_ofw_detect();
 
 	idt_setup_early_traps();
+
 	early_cpu_init();
+
+	/* Ensure no readers snuck in before the config was finished: */
+	WARN_ONCE(x86_config.early_reader, "x86_config.early_reader: %pS\n",
+		  x86_config.early_reader);
+	x86_config.conf_state = C_FINALIZED;
+
 	jump_label_init();
 	static_call_init();
 	early_ioremap_init();
_

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ