[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <27f10680-e0df-7da3-8ef3-22e1b9476728@amd.com>
Date: Tue, 4 Feb 2025 10:59:01 -0600
From: Tom Lendacky <thomas.lendacky@....com>
To: Kevin Loughlin <kevinloughlin@...gle.com>, linux-kernel@...r.kernel.org
Cc: tglx@...utronix.de, mingo@...hat.com, bp@...en8.de,
dave.hansen@...ux.intel.com, x86@...nel.org, hpa@...or.com,
seanjc@...gle.com, pbonzini@...hat.com, kirill.shutemov@...ux.intel.com,
kai.huang@...el.com, ubizjak@...il.com, jgross@...e.com,
kvm@...r.kernel.org, pgonda@...gle.com, sidtelang@...gle.com,
mizhang@...gle.com, rientjes@...gle.com, manalinandan@...gle.com,
szy0127@...u.edu.cn
Subject: Re: [PATCH v6 1/2] x86, lib: Add WBNOINVD helper functions
On 1/31/25 18:02, Kevin Loughlin wrote:
> In line with WBINVD usage, add WBONINVD helper functions. For the
> wbnoinvd() helper, fall back to WBINVD if via alternative() if
> X86_FEATURE_WBNOINVD is not present. alternative() ensures
> compatibility with early boot code if needed.
>
> Signed-off-by: Kevin Loughlin <kevinloughlin@...gle.com>
Reviewed-by: Tom Lendacky <thomas.lendacky@....com>
> ---
> arch/x86/include/asm/smp.h | 7 +++++++
> arch/x86/include/asm/special_insns.h | 19 ++++++++++++++++++-
> arch/x86/lib/cache-smp.c | 12 ++++++++++++
> 3 files changed, 37 insertions(+), 1 deletion(-)
>
> diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
> index ca073f40698f..ecf93a243b83 100644
> --- a/arch/x86/include/asm/smp.h
> +++ b/arch/x86/include/asm/smp.h
> @@ -112,6 +112,7 @@ void native_play_dead(void);
> void play_dead_common(void);
> void wbinvd_on_cpu(int cpu);
> int wbinvd_on_all_cpus(void);
> +int wbnoinvd_on_all_cpus(void);
>
> void smp_kick_mwait_play_dead(void);
>
> @@ -160,6 +161,12 @@ static inline int wbinvd_on_all_cpus(void)
> return 0;
> }
>
> +static inline int wbnoinvd_on_all_cpus(void)
> +{
> + wbnoinvd();
> + return 0;
> +}
> +
> static inline struct cpumask *cpu_llc_shared_mask(int cpu)
> {
> return (struct cpumask *)cpumask_of(0);
> diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
> index 03e7c2d49559..86a903742139 100644
> --- a/arch/x86/include/asm/special_insns.h
> +++ b/arch/x86/include/asm/special_insns.h
> @@ -117,7 +117,24 @@ static inline void wrpkru(u32 pkru)
>
> static __always_inline void wbinvd(void)
> {
> - asm volatile("wbinvd": : :"memory");
> + asm volatile("wbinvd" : : : "memory");
> +}
> +
> +/* Instruction encoding provided for binutils backwards compatibility. */
> +#define WBNOINVD ".byte 0xf3,0x0f,0x09"
> +
> +/*
> + * Cheaper version of wbinvd(). Call when caches
> + * need to be written back but not invalidated.
> + */
> +static __always_inline void wbnoinvd(void)
> +{
> + /*
> + * If WBNOINVD is unavailable, fall back to the compatible but
> + * more destructive WBINVD (which still writes the caches back
> + * but also invalidates them).
> + */
> + alternative("wbinvd", WBNOINVD, X86_FEATURE_WBNOINVD);
> }
>
> static inline unsigned long __read_cr4(void)
> diff --git a/arch/x86/lib/cache-smp.c b/arch/x86/lib/cache-smp.c
> index 7af743bd3b13..7ac5cca53031 100644
> --- a/arch/x86/lib/cache-smp.c
> +++ b/arch/x86/lib/cache-smp.c
> @@ -20,3 +20,15 @@ int wbinvd_on_all_cpus(void)
> return 0;
> }
> EXPORT_SYMBOL(wbinvd_on_all_cpus);
> +
> +static void __wbnoinvd(void *dummy)
> +{
> + wbnoinvd();
> +}
> +
> +int wbnoinvd_on_all_cpus(void)
> +{
> + on_each_cpu(__wbnoinvd, NULL, 1);
> + return 0;
> +}
> +EXPORT_SYMBOL(wbnoinvd_on_all_cpus);
Powered by blists - more mailing lists