[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Z8EWEe-zdPzKlOD8@Mac.home>
Date: Thu, 27 Feb 2025 17:49:05 -0800
From: Boqun Feng <boqun.feng@...il.com>
To: Lyude Paul <lyude@...hat.com>
Cc: rust-for-linux@...r.kernel.org, Thomas Gleixner <tglx@...utronix.de>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>, Heiko Carstens <hca@...ux.ibm.com>,
Vasily Gorbik <gor@...ux.ibm.com>,
Alexander Gordeev <agordeev@...ux.ibm.com>,
Christian Borntraeger <borntraeger@...ux.ibm.com>,
Sven Schnelle <svens@...ux.ibm.com>, Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
"maintainer:X86 ARCHITECTURE (32-BIT AND 64-BIT)" <x86@...nel.org>,
"H. Peter Anvin" <hpa@...or.com>, Arnd Bergmann <arnd@...db.de>,
Juergen Christ <jchrist@...ux.ibm.com>,
Ilya Leoshkevich <iii@...ux.ibm.com>,
"moderated list:ARM64 PORT (AARCH64 ARCHITECTURE)" <linux-arm-kernel@...ts.infradead.org>,
open list <linux-kernel@...r.kernel.org>,
"open list:S390 ARCHITECTURE" <linux-s390@...r.kernel.org>,
"open list:GENERIC INCLUDE/ASM HEADER FILES" <linux-arch@...r.kernel.org>
Subject: Re: [PATCH v9 2/9] preempt: Introduce __preempt_count_{sub,
add}_return()
On Thu, Feb 27, 2025 at 05:10:13PM -0500, Lyude Paul wrote:
> From: Boqun Feng <boqun.feng@...il.com>
>
Lyude, please add something similar to below as the changelog in the
future version.
In order to use preempt_count() to tracking the interrupt disable
nesting level, __preempt_count_{add,sub}_return() are introduced, as
their name suggest, these primitives return the new value of the
preempt_count() after changing it. The following example shows the usage
of it in local_interrupt_disable():
// increase the HARDIRQ_DISABLE bit
new_count = __preempt_count_add_return(HARDIRQ_DISABLE_OFFSET);
// if it's the first-time increment, then disable the interrupt
// at hardware level.
if (new_count & HARDIRQ_DISABLE_MASK == HARDIRQ_DISABLE_OFFSET) {
local_irq_save(flags);
raw_cpu_write(local_interrupt_disable_state.flags, flags);
}
Having these primitives will avoid a read of preempt_count() after
changing preempt_count() on certain architectures.
Regards,
Boqun
> Signed-off-by: Boqun Feng <boqun.feng@...il.com>
> Signed-off-by: Lyude Paul <lyude@...hat.com>
> ---
> arch/arm64/include/asm/preempt.h | 18 ++++++++++++++++++
> arch/s390/include/asm/preempt.h | 19 +++++++++++++++++++
> arch/x86/include/asm/preempt.h | 10 ++++++++++
> include/asm-generic/preempt.h | 14 ++++++++++++++
> 4 files changed, 61 insertions(+)
>
> diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
> index 0159b625cc7f0..49cb886c8e1dd 100644
> --- a/arch/arm64/include/asm/preempt.h
> +++ b/arch/arm64/include/asm/preempt.h
> @@ -56,6 +56,24 @@ static inline void __preempt_count_sub(int val)
> WRITE_ONCE(current_thread_info()->preempt.count, pc);
> }
>
> +static inline int __preempt_count_add_return(int val)
> +{
> + u32 pc = READ_ONCE(current_thread_info()->preempt.count);
> + pc += val;
> + WRITE_ONCE(current_thread_info()->preempt.count, pc);
> +
> + return pc;
> +}
> +
> +static inline int __preempt_count_sub_return(int val)
> +{
> + u32 pc = READ_ONCE(current_thread_info()->preempt.count);
> + pc -= val;
> + WRITE_ONCE(current_thread_info()->preempt.count, pc);
> +
> + return pc;
> +}
> +
> static inline bool __preempt_count_dec_and_test(void)
> {
> struct thread_info *ti = current_thread_info();
> diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
> index 6ccd033acfe52..67a6e265e9fff 100644
> --- a/arch/s390/include/asm/preempt.h
> +++ b/arch/s390/include/asm/preempt.h
> @@ -98,6 +98,25 @@ static __always_inline bool should_resched(int preempt_offset)
> return unlikely(READ_ONCE(get_lowcore()->preempt_count) == preempt_offset);
> }
>
> +static __always_inline int __preempt_count_add_return(int val)
> +{
> + /*
> + * With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
> + * enabled, gcc 12 fails to handle __builtin_constant_p().
> + */
> + if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) {
> + if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) {
> + return val + __atomic_add_const(val, &get_lowcore()->preempt_count);
> + }
> + }
> + return val + __atomic_add(val, &get_lowcore()->preempt_count);
> +}
> +
> +static __always_inline int __preempt_count_sub_return(int val)
> +{
> + return __preempt_count_add_return(-val);
> +}
> +
> #define init_task_preempt_count(p) do { } while (0)
> /* Deferred to CPU bringup time */
> #define init_idle_preempt_count(p, cpu) do { } while (0)
> diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
> index 919909d8cb77e..405e60f4e1a77 100644
> --- a/arch/x86/include/asm/preempt.h
> +++ b/arch/x86/include/asm/preempt.h
> @@ -84,6 +84,16 @@ static __always_inline void __preempt_count_sub(int val)
> raw_cpu_add_4(pcpu_hot.preempt_count, -val);
> }
>
> +static __always_inline int __preempt_count_add_return(int val)
> +{
> + return raw_cpu_add_return_4(pcpu_hot.preempt_count, val);
> +}
> +
> +static __always_inline int __preempt_count_sub_return(int val)
> +{
> + return raw_cpu_add_return_4(pcpu_hot.preempt_count, -val);
> +}
> +
> /*
> * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
> * a decrement which hits zero means we have no preempt_count and should
> diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
> index 51f8f3881523a..c8683c046615d 100644
> --- a/include/asm-generic/preempt.h
> +++ b/include/asm-generic/preempt.h
> @@ -59,6 +59,20 @@ static __always_inline void __preempt_count_sub(int val)
> *preempt_count_ptr() -= val;
> }
>
> +static __always_inline int __preempt_count_add_return(int val)
> +{
> + *preempt_count_ptr() += val;
> +
> + return *preempt_count_ptr();
> +}
> +
> +static __always_inline int __preempt_count_sub_return(int val)
> +{
> + *preempt_count_ptr() -= val;
> +
> + return *preempt_count_ptr();
> +}
> +
> static __always_inline bool __preempt_count_dec_and_test(void)
> {
> /*
> --
> 2.48.1
>
Powered by blists - more mailing lists