lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260121141106.755458-2-jgross@suse.com>
Date: Wed, 21 Jan 2026 15:11:03 +0100
From: Juergen Gross <jgross@...e.com>
To: linux-kernel@...r.kernel.org,
	x86@...nel.org
Cc: Juergen Gross <jgross@...e.com>,
	Thomas Gleixner <tglx@...nel.org>,
	Ingo Molnar <mingo@...hat.com>,
	Borislav Petkov <bp@...en8.de>,
	Dave Hansen <dave.hansen@...ux.intel.com>,
	"H. Peter Anvin" <hpa@...or.com>
Subject: [PATCH 1/4] x86/mtrr: Move cache_enable() and cache_disable() to mtrr/generic.c

cache_enable() and cache_disable() are used for generic MTRR code only.
Move them and related stuff to mtrr/generic.c, allowing to make them
static. This requires to move the cache_enable() and cache_disable()
calls from cache_cpu_init() into mtrr_generic_set_state().

This allows to make mtrr_enable() and mtrr_disable() static, too.

While moving the code, drop the comment related to the PAT MSR, as this
is not true anymore.

No change of functionality.

Signed-off-by: Juergen Gross <jgross@...e.com>
---
 arch/x86/include/asm/cacheinfo.h   |  2 -
 arch/x86/include/asm/mtrr.h        |  2 -
 arch/x86/kernel/cpu/cacheinfo.c    | 80 +---------------------------
 arch/x86/kernel/cpu/mtrr/generic.c | 83 +++++++++++++++++++++++++++++-
 4 files changed, 82 insertions(+), 85 deletions(-)

diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h
index 5aa061199866..07b0e5e6d5bb 100644
--- a/arch/x86/include/asm/cacheinfo.h
+++ b/arch/x86/include/asm/cacheinfo.h
@@ -7,8 +7,6 @@ extern unsigned int memory_caching_control;
 #define CACHE_MTRR 0x01
 #define CACHE_PAT  0x02
 
-void cache_disable(void);
-void cache_enable(void);
 void set_cache_aps_delayed_init(bool val);
 bool get_cache_aps_delayed_init(void);
 void cache_bp_init(void);
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 76b95bd1a405..d547b364ce65 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -58,8 +58,6 @@ extern int mtrr_del(int reg, unsigned long base, unsigned long size);
 extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
 extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
 extern int amd_special_default_mtrr(void);
-void mtrr_disable(void);
-void mtrr_enable(void);
 void mtrr_generic_set_state(void);
 #  else
 static inline void guest_force_mtrr_state(struct mtrr_var_range *var,
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index 51a95b07831f..0d2150de0120 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -635,92 +635,14 @@ int populate_cache_leaves(unsigned int cpu)
 	return 0;
 }
 
-/*
- * Disable and enable caches. Needed for changing MTRRs and the PAT MSR.
- *
- * Since we are disabling the cache don't allow any interrupts,
- * they would run extremely slow and would only increase the pain.
- *
- * The caller must ensure that local interrupts are disabled and
- * are reenabled after cache_enable() has been called.
- */
-static unsigned long saved_cr4;
-static DEFINE_RAW_SPINLOCK(cache_disable_lock);
-
-/*
- * Cache flushing is the most time-consuming step when programming the
- * MTRRs.  On many Intel CPUs without known erratas, it can be skipped
- * if the CPU declares cache self-snooping support.
- */
-static void maybe_flush_caches(void)
-{
-	if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
-		wbinvd();
-}
-
-void cache_disable(void) __acquires(cache_disable_lock)
-{
-	unsigned long cr0;
-
-	/*
-	 * This is not ideal since the cache is only flushed/disabled
-	 * for this CPU while the MTRRs are changed, but changing this
-	 * requires more invasive changes to the way the kernel boots.
-	 */
-	raw_spin_lock(&cache_disable_lock);
-
-	/* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
-	cr0 = read_cr0() | X86_CR0_CD;
-	write_cr0(cr0);
-
-	maybe_flush_caches();
-
-	/* Save value of CR4 and clear Page Global Enable (bit 7) */
-	if (cpu_feature_enabled(X86_FEATURE_PGE)) {
-		saved_cr4 = __read_cr4();
-		__write_cr4(saved_cr4 & ~X86_CR4_PGE);
-	}
-
-	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
-	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-	flush_tlb_local();
-
-	if (cpu_feature_enabled(X86_FEATURE_MTRR))
-		mtrr_disable();
-
-	maybe_flush_caches();
-}
-
-void cache_enable(void) __releases(cache_disable_lock)
-{
-	/* Flush TLBs (no need to flush caches - they are disabled) */
-	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-	flush_tlb_local();
-
-	if (cpu_feature_enabled(X86_FEATURE_MTRR))
-		mtrr_enable();
-
-	/* Enable caches */
-	write_cr0(read_cr0() & ~X86_CR0_CD);
-
-	/* Restore value of CR4 */
-	if (cpu_feature_enabled(X86_FEATURE_PGE))
-		__write_cr4(saved_cr4);
-
-	raw_spin_unlock(&cache_disable_lock);
-}
-
 static void cache_cpu_init(void)
 {
 	unsigned long flags;
 
 	local_irq_save(flags);
 
-	if (memory_caching_control & CACHE_MTRR) {
-		cache_disable();
+	if (memory_caching_control & CACHE_MTRR)
 		mtrr_generic_set_state();
-		cache_enable();
-	}
 
 	if (memory_caching_control & CACHE_PAT)
 		pat_cpu_init();
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 0863733858dc..2c874b88e12c 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -945,7 +945,7 @@ static unsigned long set_mtrr_state(void)
 	return change_mask;
 }
 
-void mtrr_disable(void)
+static void mtrr_disable(void)
 {
 	/* Save MTRR state */
 	rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
@@ -954,16 +954,93 @@ void mtrr_disable(void)
 	mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & MTRR_DEF_TYPE_DISABLE, deftype_hi);
 }
 
-void mtrr_enable(void)
+static void mtrr_enable(void)
 {
 	/* Intel (P6) standard MTRRs */
 	mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
 }
 
+/*
+ * Disable and enable caches. Needed for changing MTRRs.
+ *
+ * Since we are disabling the cache don't allow any interrupts,
+ * they would run extremely slow and would only increase the pain.
+ *
+ * The caller must ensure that local interrupts are disabled and
+ * are reenabled after cache_enable() has been called.
+ */
+static unsigned long saved_cr4;
+static DEFINE_RAW_SPINLOCK(cache_disable_lock);
+
+/*
+ * Cache flushing is the most time-consuming step when programming the
+ * MTRRs.  On many Intel CPUs without known erratas, it can be skipped
+ * if the CPU declares cache self-snooping support.
+ */
+static void maybe_flush_caches(void)
+{
+	if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
+		wbinvd();
+}
+
+static void cache_disable(void) __acquires(cache_disable_lock)
+{
+	unsigned long cr0;
+
+	/*
+	 * This is not ideal since the cache is only flushed/disabled
+	 * for this CPU while the MTRRs are changed, but changing this
+	 * requires more invasive changes to the way the kernel boots.
+	 */
+	raw_spin_lock(&cache_disable_lock);
+
+	/* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
+	cr0 = read_cr0() | X86_CR0_CD;
+	write_cr0(cr0);
+
+	maybe_flush_caches();
+
+	/* Save value of CR4 and clear Page Global Enable (bit 7) */
+	if (cpu_feature_enabled(X86_FEATURE_PGE)) {
+		saved_cr4 = __read_cr4();
+		__write_cr4(saved_cr4 & ~X86_CR4_PGE);
+	}
+
+	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
+	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+	flush_tlb_local();
+
+	if (cpu_feature_enabled(X86_FEATURE_MTRR))
+		mtrr_disable();
+
+	maybe_flush_caches();
+}
+
+static void cache_enable(void) __releases(cache_disable_lock)
+{
+	/* Flush TLBs (no need to flush caches - they are disabled) */
+	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+	flush_tlb_local();
+
+	if (cpu_feature_enabled(X86_FEATURE_MTRR))
+		mtrr_enable();
+
+	/* Enable caches */
+	write_cr0(read_cr0() & ~X86_CR0_CD);
+
+	/* Restore value of CR4 */
+	if (cpu_feature_enabled(X86_FEATURE_PGE))
+		__write_cr4(saved_cr4);
+
+	raw_spin_unlock(&cache_disable_lock);
+}
+
 void mtrr_generic_set_state(void)
 {
 	unsigned long mask, count;
 
+	cache_disable();
+
 	/* Actually set the state */
 	mask = set_mtrr_state();
 
@@ -973,6 +1050,8 @@ void mtrr_generic_set_state(void)
 			set_bit(count, &smp_changes_mask);
 		mask >>= 1;
 	}
+
+	cache_enable();
 }
 
 /**
-- 
2.52.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ