lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220820092533.29420-2-jgross@suse.com>
Date:   Sat, 20 Aug 2022 11:25:24 +0200
From:   Juergen Gross <jgross@...e.com>
To:     xen-devel@...ts.xenproject.org, x86@...nel.org,
        linux-kernel@...r.kernel.org
Cc:     Juergen Gross <jgross@...e.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
        Dave Hansen <dave.hansen@...ux.intel.com>,
        "H. Peter Anvin" <hpa@...or.com>, stable@...r.kernel.org
Subject: [PATCH v2 01/10] x86/mtrr: fix MTRR fixup on APs

When booting or resuming the system MTRR state is saved on the boot
processor and then this state is loaded into MTRRs of all other cpus.
During update of the MTRRs the MTRR mechanism needs to be disabled by
writing the related MSR. The old contents of this MSR are saved in a
set of static variables and later those static variables are used to
restore the MSR.

In case the MSR contents need to be modified on a cpu due to the MSR
not having been initialized properly by the BIOS, the related update
function is modifying the static variables accordingly.

Unfortunately the MTRR state update is usually running on all cpus
at the same time, so using just one set of static variables for all
cpus is racy in case the MSR contents differ across cpus.

Fix that by using percpu variables for saving the MSR contents.

Cc: stable@...r.kernel.org
Signed-off-by: Juergen Gross <jgross@...e.com>
---
I thought adding a "Fixes:" tag for the kernel's initial git commit
would maybe be entertaining, but without being really helpful.
The percpu variables were preferred over on-stack ones in order to
avoid more code churn in followup patches decoupling PAT from MTRR
support.
V2:
- new patch
---
 arch/x86/kernel/cpu/mtrr/generic.c | 21 ++++++++++++++-------
 1 file changed, 14 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 558108296f3c..3d185fcf08ca 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -679,7 +679,8 @@ static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
 	return changed;
 }
 
-static u32 deftype_lo, deftype_hi;
+static DEFINE_PER_CPU(u32, deftype_lo);
+static DEFINE_PER_CPU(u32, deftype_hi);
 
 /**
  * set_mtrr_state - Set the MTRR state for this CPU.
@@ -691,6 +692,7 @@ static unsigned long set_mtrr_state(void)
 {
 	unsigned long change_mask = 0;
 	unsigned int i;
+	u32 *lo = this_cpu_ptr(&deftype_lo);
 
 	for (i = 0; i < num_var_ranges; i++) {
 		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
@@ -704,10 +706,10 @@ static unsigned long set_mtrr_state(void)
 	 * Set_mtrr_restore restores the old value of MTRRdefType,
 	 * so to set it we fiddle with the saved value:
 	 */
-	if ((deftype_lo & 0xff) != mtrr_state.def_type
-	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
+	if ((*lo & 0xff) != mtrr_state.def_type
+	    || ((*lo & 0xc00) >> 10) != mtrr_state.enabled) {
 
-		deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type |
+		*lo = (*lo & ~0xcff) | mtrr_state.def_type |
 			     (mtrr_state.enabled << 10);
 		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
 	}
@@ -729,6 +731,8 @@ static DEFINE_RAW_SPINLOCK(set_atomicity_lock);
 static void prepare_set(void) __acquires(set_atomicity_lock)
 {
 	unsigned long cr0;
+	u32 *lo = this_cpu_ptr(&deftype_lo);
+	u32 *hi = this_cpu_ptr(&deftype_hi);
 
 	/*
 	 * Note that this is not ideal
@@ -763,10 +767,10 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
 	flush_tlb_local();
 
 	/* Save MTRR state */
-	rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
+	rdmsr(MSR_MTRRdefType, *lo, *hi);
 
 	/* Disable MTRRs, and set the default type to uncached */
-	mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
+	mtrr_wrmsr(MSR_MTRRdefType, *lo & ~0xcff, *hi);
 
 	/* Again, only flush caches if we have to. */
 	if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
@@ -775,12 +779,15 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
 
 static void post_set(void) __releases(set_atomicity_lock)
 {
+	u32 *lo = this_cpu_ptr(&deftype_lo);
+	u32 *hi = this_cpu_ptr(&deftype_hi);
+
 	/* Flush TLBs (no need to flush caches - they are disabled) */
 	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
 	flush_tlb_local();
 
 	/* Intel (P6) standard MTRRs */
-	mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
+	mtrr_wrmsr(MSR_MTRRdefType, *lo, *hi);
 
 	/* Enable caches */
 	write_cr0(read_cr0() & ~X86_CR0_CD);
-- 
2.35.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ