[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220908084914.21703-9-jgross@suse.com>
Date: Thu, 8 Sep 2022 10:49:12 +0200
From: Juergen Gross <jgross@...e.com>
To: xen-devel@...ts.xenproject.org, x86@...nel.org,
linux-kernel@...r.kernel.org
Cc: Juergen Gross <jgross@...e.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
"H. Peter Anvin" <hpa@...or.com>
Subject: [PATCH v3 08/10] x86/mtrr: let cache_aps_delayed_init replace mtrr_aps_delayed_init
In order to prepare decoupling MTRR and PAT replace the MTRR specific
mtrr_aps_delayed_init flag with a more generic cache_aps_delayed_init
one.
Signed-off-by: Juergen Gross <jgross@...e.com>
---
V2:
- new patch
---
arch/x86/include/asm/cacheinfo.h | 2 ++
arch/x86/include/asm/mtrr.h | 2 --
arch/x86/kernel/cpu/cacheinfo.c | 2 ++
arch/x86/kernel/cpu/mtrr/mtrr.c | 17 ++++-------------
arch/x86/kernel/smpboot.c | 5 +++--
5 files changed, 11 insertions(+), 17 deletions(-)
diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h
index 563d9cb5fcf5..e80ed3c523c8 100644
--- a/arch/x86/include/asm/cacheinfo.h
+++ b/arch/x86/include/asm/cacheinfo.h
@@ -7,6 +7,8 @@ extern unsigned int cache_generic;
#define CACHE_GENERIC_MTRR 0x01
#define CACHE_GENERIC_PAT 0x02
+extern bool cache_aps_delayed_init;
+
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu);
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 986249a2b9b6..5d31219c8529 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -43,7 +43,6 @@ extern int mtrr_del(int reg, unsigned long base, unsigned long size);
extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
extern void mtrr_ap_init(void);
-extern void set_mtrr_aps_delayed_init(void);
extern void mtrr_aps_init(void);
extern void mtrr_bp_restore(void);
extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
@@ -87,7 +86,6 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
{
}
#define mtrr_ap_init() do {} while (0)
-#define set_mtrr_aps_delayed_init() do {} while (0)
#define mtrr_aps_init() do {} while (0)
#define mtrr_bp_restore() do {} while (0)
#define mtrr_disable() do {} while (0)
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index 36378604ec61..c6e7c93e45e8 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -1139,3 +1139,5 @@ void cache_cpu_init(void)
cache_enable();
local_irq_restore(flags);
}
+
+bool cache_aps_delayed_init;
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 956838bb4481..a47d46035240 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -65,7 +65,6 @@ unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
static DEFINE_MUTEX(mtrr_mutex);
u64 size_or_mask, size_and_mask;
-static bool mtrr_aps_delayed_init;
static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init;
@@ -172,7 +171,7 @@ static int mtrr_rendezvous_handler(void *info)
if (data->smp_reg != ~0U) {
mtrr_if->set(data->smp_reg, data->smp_base,
data->smp_size, data->smp_type);
- } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
+ } else if (cache_aps_delayed_init || !cpu_online(smp_processor_id())) {
cache_cpu_init();
}
return 0;
@@ -784,7 +783,7 @@ void __init mtrr_bp_init(void)
void mtrr_ap_init(void)
{
- if (!cache_generic || mtrr_aps_delayed_init)
+ if (!cache_generic || cache_aps_delayed_init)
return;
/*
@@ -818,14 +817,6 @@ void mtrr_save_state(void)
smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1);
}
-void set_mtrr_aps_delayed_init(void)
-{
- if (!cache_generic)
- return;
-
- mtrr_aps_delayed_init = true;
-}
-
/*
* Delayed MTRR initialization for all AP's
*/
@@ -839,11 +830,11 @@ void mtrr_aps_init(void)
* by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
* then we are done.
*/
- if (!mtrr_aps_delayed_init)
+ if (!cache_aps_delayed_init)
return;
set_mtrr(~0U, 0, 0, 0);
- mtrr_aps_delayed_init = false;
+ cache_aps_delayed_init = false;
}
void mtrr_bp_restore(void)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f24227bc3220..ef7bce21cbe8 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -58,6 +58,7 @@
#include <linux/overflow.h>
#include <asm/acpi.h>
+#include <asm/cacheinfo.h>
#include <asm/desc.h>
#include <asm/nmi.h>
#include <asm/irq.h>
@@ -1428,7 +1429,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
uv_system_init();
- set_mtrr_aps_delayed_init();
+ cache_aps_delayed_init = true;
smp_quirk_init_udelay();
@@ -1439,7 +1440,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
void arch_thaw_secondary_cpus_begin(void)
{
- set_mtrr_aps_delayed_init();
+ cache_aps_delayed_init = true;
}
void arch_thaw_secondary_cpus_end(void)
--
2.35.3
Powered by blists - more mailing lists