[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260121141106.755458-3-jgross@suse.com>
Date: Wed, 21 Jan 2026 15:11:04 +0100
From: Juergen Gross <jgross@...e.com>
To: linux-kernel@...r.kernel.org,
x86@...nel.org
Cc: Juergen Gross <jgross@...e.com>,
Thomas Gleixner <tglx@...nel.org>,
Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
"H. Peter Anvin" <hpa@...or.com>
Subject: [PATCH 2/4] x86/mtrr: Introduce MTRR work state structure
Instead of using static variables for storing local state across
cache_disable() ... cache_enable(), use a structure allocated on the
stack for the same purpose.
Signed-off-by: Juergen Gross <jgross@...e.com>
---
arch/x86/kernel/cpu/mtrr/generic.c | 58 ++++++++++++++++--------------
1 file changed, 31 insertions(+), 27 deletions(-)
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 2c874b88e12c..ac95b19b01d0 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -905,18 +905,19 @@ static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
return changed;
}
-static u32 deftype_lo, deftype_hi;
+struct mtrr_work_state {
+ unsigned long cr4;
+ u32 lo;
+ u32 hi;
+};
/**
* set_mtrr_state - Set the MTRR state for this CPU.
*
- * NOTE: The CPU must already be in a safe state for MTRR changes, including
- * measures that only a single CPU can be active in set_mtrr_state() in
- * order to not be subject to races for usage of deftype_lo. This is
- * accomplished by taking cache_disable_lock.
+ * NOTE: The CPU must already be in a safe state for MTRR changes.
* RETURNS: 0 if no changes made, else a mask indicating what was changed.
*/
-static unsigned long set_mtrr_state(void)
+static unsigned long set_mtrr_state(struct mtrr_work_state *state)
{
unsigned long change_mask = 0;
unsigned int i;
@@ -933,10 +934,10 @@ static unsigned long set_mtrr_state(void)
* Set_mtrr_restore restores the old value of MTRRdefType,
* so to set it we fiddle with the saved value:
*/
- if ((deftype_lo & MTRR_DEF_TYPE_TYPE) != mtrr_state.def_type ||
- ((deftype_lo & MTRR_DEF_TYPE_ENABLE) >> MTRR_STATE_SHIFT) != mtrr_state.enabled) {
+ if ((state->lo & MTRR_DEF_TYPE_TYPE) != mtrr_state.def_type ||
+ ((state->lo & MTRR_DEF_TYPE_ENABLE) >> MTRR_STATE_SHIFT) != mtrr_state.enabled) {
- deftype_lo = (deftype_lo & MTRR_DEF_TYPE_DISABLE) |
+ state->lo = (state->lo & MTRR_DEF_TYPE_DISABLE) |
mtrr_state.def_type |
(mtrr_state.enabled << MTRR_STATE_SHIFT);
change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
@@ -945,19 +946,19 @@ static unsigned long set_mtrr_state(void)
return change_mask;
}
-static void mtrr_disable(void)
+static void mtrr_disable(struct mtrr_work_state *state)
{
/* Save MTRR state */
- rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
+ rdmsr(MSR_MTRRdefType, state->lo, state->hi);
/* Disable MTRRs, and set the default type to uncached */
- mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & MTRR_DEF_TYPE_DISABLE, deftype_hi);
+ mtrr_wrmsr(MSR_MTRRdefType, state->lo & MTRR_DEF_TYPE_DISABLE, state->hi);
}
-static void mtrr_enable(void)
+static void mtrr_enable(struct mtrr_work_state *state)
{
/* Intel (P6) standard MTRRs */
- mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
+ mtrr_wrmsr(MSR_MTRRdefType, state->lo, state->hi);
}
/*
@@ -969,7 +970,6 @@ static void mtrr_enable(void)
* The caller must ensure that local interrupts are disabled and
* are reenabled after cache_enable() has been called.
*/
-static unsigned long saved_cr4;
static DEFINE_RAW_SPINLOCK(cache_disable_lock);
/*
@@ -983,7 +983,8 @@ static void maybe_flush_caches(void)
wbinvd();
}
-static void cache_disable(void) __acquires(cache_disable_lock)
+static void cache_disable(struct mtrr_work_state *state)
+ __acquires(cache_disable_lock)
{
unsigned long cr0;
@@ -1002,8 +1003,8 @@ static void cache_disable(void) __acquires(cache_disable_lock)
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if (cpu_feature_enabled(X86_FEATURE_PGE)) {
- saved_cr4 = __read_cr4();
- __write_cr4(saved_cr4 & ~X86_CR4_PGE);
+ state->cr4 = __read_cr4();
+ __write_cr4(state->cr4 & ~X86_CR4_PGE);
}
/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
@@ -1011,26 +1012,27 @@ static void cache_disable(void) __acquires(cache_disable_lock)
flush_tlb_local();
if (cpu_feature_enabled(X86_FEATURE_MTRR))
- mtrr_disable();
+ mtrr_disable(state);
maybe_flush_caches();
}
-static void cache_enable(void) __releases(cache_disable_lock)
+static void cache_enable(struct mtrr_work_state *state)
+ __releases(cache_disable_lock)
{
/* Flush TLBs (no need to flush caches - they are disabled) */
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
flush_tlb_local();
if (cpu_feature_enabled(X86_FEATURE_MTRR))
- mtrr_enable();
+ mtrr_enable(state);
/* Enable caches */
write_cr0(read_cr0() & ~X86_CR0_CD);
/* Restore value of CR4 */
if (cpu_feature_enabled(X86_FEATURE_PGE))
- __write_cr4(saved_cr4);
+ __write_cr4(state->cr4);
raw_spin_unlock(&cache_disable_lock);
}
@@ -1038,11 +1040,12 @@ static void cache_enable(void) __releases(cache_disable_lock)
void mtrr_generic_set_state(void)
{
unsigned long mask, count;
+ struct mtrr_work_state state;
- cache_disable();
+ cache_disable(&state);
/* Actually set the state */
- mask = set_mtrr_state();
+ mask = set_mtrr_state(&state);
/* Use the atomic bitops to update the global mask */
for (count = 0; count < sizeof(mask) * 8; ++count) {
@@ -1051,7 +1054,7 @@ void mtrr_generic_set_state(void)
mask >>= 1;
}
- cache_enable();
+ cache_enable(&state);
}
/**
@@ -1069,11 +1072,12 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base,
{
unsigned long flags;
struct mtrr_var_range *vr;
+ struct mtrr_work_state state;
vr = &mtrr_state.var_ranges[reg];
local_irq_save(flags);
- cache_disable();
+ cache_disable(&state);
if (size == 0) {
/*
@@ -1092,7 +1096,7 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base,
mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
}
- cache_enable();
+ cache_enable(&state);
local_irq_restore(flags);
}
--
2.52.0
Powered by blists - more mailing lists