[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <fa56c7d9-cd93-47a5-bc48-0962b364d8be@suse.com>
Date: Wed, 25 Jun 2025 14:04:24 +0300
From: Nikolay Borisov <nik.borisov@...e.com>
To: Yazen Ghannam <yazen.ghannam@....com>, x86@...nel.org,
Tony Luck <tony.luck@...el.com>, "Rafael J. Wysocki" <rafael@...nel.org>,
Len Brown <lenb@...nel.org>
Cc: linux-kernel@...r.kernel.org, linux-edac@...r.kernel.org,
Smita.KoralahalliChannabasappa@....com, Qiuxu Zhuo <qiuxu.zhuo@...el.com>,
linux-acpi@...r.kernel.org
Subject: Re: [PATCH v4 11/22] x86/mce: Define BSP-only init
On 6/24/25 17:16, Yazen Ghannam wrote:
> Currently, MCA initialization is executed identically on each CPU as
> they are brought online. However, a number of MCA initialization tasks
> only need to be done once.
>
> Define a function to collect all 'global' init tasks and call this from
> the BSP only. Start with CPU features.
>
> Reviewed-by: Qiuxu Zhuo <qiuxu.zhuo@...el.com>
> Tested-by: Tony Luck <tony.luck@...el.com>
> Reviewed-by: Tony Luck <tony.luck@...el.com>
> Signed-off-by: Yazen Ghannam <yazen.ghannam@....com>
> ---
>
> Notes:
> Link:
> https://lore.kernel.org/r/20250415-wip-mca-updates-v3-7-8ffd9eb4aa56@amd.com
>
> v3->v4:
> * Change cpu_mca_init() to mca_bsp_init().
> * Drop code comment.
>
> v2->v3:
> * Add tags from Qiuxu and Tony.
>
> v1->v2:
> * New in v2.
>
> arch/x86/include/asm/mce.h | 2 ++
> arch/x86/kernel/cpu/common.c | 1 +
> arch/x86/kernel/cpu/mce/amd.c | 3 ---
> arch/x86/kernel/cpu/mce/core.c | 28 +++++++++++++++++++++-------
> 4 files changed, 24 insertions(+), 10 deletions(-)
>
> diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
> index 3224f3862dc8..31e3cb550fb3 100644
> --- a/arch/x86/include/asm/mce.h
> +++ b/arch/x86/include/asm/mce.h
> @@ -241,12 +241,14 @@ struct cper_ia_proc_ctx;
>
> #ifdef CONFIG_X86_MCE
> int mcheck_init(void);
> +void mca_bsp_init(struct cpuinfo_x86 *c);
> void mcheck_cpu_init(struct cpuinfo_x86 *c);
> void mcheck_cpu_clear(struct cpuinfo_x86 *c);
> int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
> u64 lapic_id);
> #else
> static inline int mcheck_init(void) { return 0; }
> +static inline void mca_bsp_init(struct cpuinfo_x86 *c) {}
> static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {}
> static inline void mcheck_cpu_clear(struct cpuinfo_x86 *c) {}
> static inline int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
> diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
> index 8feb8fd2957a..8a00faa1042a 100644
> --- a/arch/x86/kernel/cpu/common.c
> +++ b/arch/x86/kernel/cpu/common.c
> @@ -1771,6 +1771,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
> setup_clear_cpu_cap(X86_FEATURE_LA57);
>
> detect_nopl();
> + mca_bsp_init(c);
> }
>
> void __init init_cpu_devs(void)
> diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
> index 292109e46a94..25a24d0b9cf9 100644
> --- a/arch/x86/kernel/cpu/mce/amd.c
> +++ b/arch/x86/kernel/cpu/mce/amd.c
> @@ -655,9 +655,6 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
> u32 low = 0, high = 0, address = 0;
> int offset = -1;
>
> - mce_flags.overflow_recov = cpu_feature_enabled(X86_FEATURE_OVERFLOW_RECOV);
> - mce_flags.succor = cpu_feature_enabled(X86_FEATURE_SUCCOR);
> - mce_flags.smca = cpu_feature_enabled(X86_FEATURE_SMCA);
> mce_flags.amd_threshold = 1;
>
> for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) {
> diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
> index ebe3e98f7606..c55462e6af1c 100644
> --- a/arch/x86/kernel/cpu/mce/core.c
> +++ b/arch/x86/kernel/cpu/mce/core.c
> @@ -1837,13 +1837,6 @@ static void __mcheck_cpu_cap_init(void)
> this_cpu_write(mce_num_banks, b);
>
> __mcheck_cpu_mce_banks_init();
> -
> - /* Use accurate RIP reporting if available. */
> - if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
> - mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
> -
> - if (cap & MCG_SER_P)
> - mca_cfg.ser = 1;
> }
>
> static void __mcheck_cpu_init_generic(void)
> @@ -2243,6 +2236,27 @@ DEFINE_IDTENTRY_RAW(exc_machine_check)
> }
> #endif
>
> +void mca_bsp_init(struct cpuinfo_x86 *c)
> +{
> + u64 cap;
> +
> + if (!mce_available(c))
> + return;
> +
> + mce_flags.overflow_recov = cpu_feature_enabled(X86_FEATURE_OVERFLOW_RECOV);
> + mce_flags.succor = cpu_feature_enabled(X86_FEATURE_SUCCOR);
> + mce_flags.smca = cpu_feature_enabled(X86_FEATURE_SMCA);
nit: Why use cpu_feature_enabled VS say boot_cpu_has since none of the 3
features are defined in cpufeaturemasks.h, meaning that
cpu_feature_enabled is essentially static_cpu_has, given that this is
not a fast path?
It's not wrong per-se but I think the cpu_feature_enabled api is
somewhat of a trainwreck i.e we ought to have a version that uses
boot_cpu_has for "ordinary uses" and probably cpu_feature_enabled_fast
for fastpaths.
> +
> + rdmsrq(MSR_IA32_MCG_CAP, cap);
> +
> + /* Use accurate RIP reporting if available. */
> + if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
> + mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
> +
> + if (cap & MCG_SER_P)
> + mca_cfg.ser = 1;
> +}
> +
> /*
> * Called for each booted CPU to set up machine checks.
> * Must be called with preempt off:
>
Powered by blists - more mailing lists