[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <7c342b54-553c-013e-bed0-bc723c20464a@huawei.com>
Date: Thu, 14 Aug 2025 17:31:30 +0800
From: Jinjie Ruan <ruanjinjie@...wei.com>
To: Mark Rutland <mark.rutland@....com>
CC: <catalin.marinas@....com>, <will@...nel.org>, <oleg@...hat.com>,
<sstabellini@...nel.org>, <puranjay@...nel.org>, <broonie@...nel.org>,
<mbenes@...e.cz>, <ryan.roberts@....com>, <akpm@...ux-foundation.org>,
<chenl311@...natelecom.cn>, <ada.coupriediaz@....com>,
<anshuman.khandual@....com>, <kristina.martsenko@....com>,
<liaochang1@...wei.com>, <ardb@...nel.org>, <leitao@...ian.org>,
<linux-arm-kernel@...ts.infradead.org>, <linux-kernel@...r.kernel.org>,
<xen-devel@...ts.xenproject.org>
Subject: Re: [PATCH -next v7 5/7] arm64: entry: Refactor
preempt_schedule_irq() check code
On 2025/8/12 19:13, Mark Rutland wrote:
> On Tue, Jul 29, 2025 at 09:54:54AM +0800, Jinjie Ruan wrote:
>> ARM64 requires an additional check whether to reschedule on return
>> from interrupt. So add arch_irqentry_exit_need_resched() as the default
>> NOP implementation and hook it up into the need_resched() condition in
>> raw_irqentry_exit_cond_resched(). This allows ARM64 to implement
>> the architecture specific version for switching over to
>> the generic entry code.
>>
>> To align the structure of the code with irqentry_exit_cond_resched()
>> from the generic entry code, hoist the need_irq_preemption()
>> and IS_ENABLED() check earlier. And different preemption check functions
>> are defined based on whether dynamic preemption is enabled.
>>
>> Suggested-by: Mark Rutland <mark.rutland@....com>
>> Suggested-by: Kevin Brodsky <kevin.brodsky@....com>
>> Suggested-by: Thomas Gleixner <tglx@...utronix.de>
>> Signed-off-by: Jinjie Ruan <ruanjinjie@...wei.com>
>> ---
>> arch/arm64/include/asm/preempt.h | 4 ++++
>> arch/arm64/kernel/entry-common.c | 35 ++++++++++++++++++--------------
>> kernel/entry/common.c | 16 ++++++++++++++-
>> 3 files changed, 39 insertions(+), 16 deletions(-)
>
> Can you please split the change to kernel/entry/common.c into a separate
> patch? That doesn't depend on the arm64-specific changes, and it'll make
> it easier to handle any conflcits when merging this.
Sure, I'll split the change into separate patch.
>
> Mark.
>
>>
>> diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
>> index 0159b625cc7f..0f0ba250efe8 100644
>> --- a/arch/arm64/include/asm/preempt.h
>> +++ b/arch/arm64/include/asm/preempt.h
>> @@ -85,6 +85,7 @@ static inline bool should_resched(int preempt_offset)
>> void preempt_schedule(void);
>> void preempt_schedule_notrace(void);
>>
>> +void raw_irqentry_exit_cond_resched(void);
>> #ifdef CONFIG_PREEMPT_DYNAMIC
>>
>> DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
>> @@ -92,11 +93,14 @@ void dynamic_preempt_schedule(void);
>> #define __preempt_schedule() dynamic_preempt_schedule()
>> void dynamic_preempt_schedule_notrace(void);
>> #define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
>> +void dynamic_irqentry_exit_cond_resched(void);
>> +#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
>>
>> #else /* CONFIG_PREEMPT_DYNAMIC */
>>
>> #define __preempt_schedule() preempt_schedule()
>> #define __preempt_schedule_notrace() preempt_schedule_notrace()
>> +#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
>>
>> #endif /* CONFIG_PREEMPT_DYNAMIC */
>> #endif /* CONFIG_PREEMPTION */
>> diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
>> index 7c2299c1ba79..4f92664fd46c 100644
>> --- a/arch/arm64/kernel/entry-common.c
>> +++ b/arch/arm64/kernel/entry-common.c
>> @@ -285,19 +285,8 @@ static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs,
>> lockdep_hardirqs_on(CALLER_ADDR0);
>> }
>>
>> -#ifdef CONFIG_PREEMPT_DYNAMIC
>> -DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
>> -#define need_irq_preemption() \
>> - (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
>> -#else
>> -#define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION))
>> -#endif
>> -
>> static inline bool arm64_preempt_schedule_irq(void)
>> {
>> - if (!need_irq_preemption())
>> - return false;
>> -
>> /*
>> * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
>> * priority masking is used the GIC irqchip driver will clear DAIF.IF
>> @@ -672,6 +661,24 @@ static __always_inline void __el1_pnmi(struct pt_regs *regs,
>> arm64_exit_nmi(regs, state);
>> }
>>
>> +void raw_irqentry_exit_cond_resched(void)
>> +{
>> + if (!preempt_count()) {
>> + if (need_resched() && arm64_preempt_schedule_irq())
>> + preempt_schedule_irq();
>> + }
>> +}
>> +
>> +#ifdef CONFIG_PREEMPT_DYNAMIC
>> +DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
>> +void dynamic_irqentry_exit_cond_resched(void)
>> +{
>> + if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
>> + return;
>> + raw_irqentry_exit_cond_resched();
>> +}
>> +#endif
>> +
>> static __always_inline void __el1_irq(struct pt_regs *regs,
>> void (*handler)(struct pt_regs *))
>> {
>> @@ -681,10 +688,8 @@ static __always_inline void __el1_irq(struct pt_regs *regs,
>> do_interrupt_handler(regs, handler);
>> irq_exit_rcu();
>>
>> - if (!preempt_count() && need_resched()) {
>> - if (arm64_preempt_schedule_irq())
>> - preempt_schedule_irq();
>> - }
>> + if (IS_ENABLED(CONFIG_PREEMPTION))
>> + irqentry_exit_cond_resched();
>>
>> exit_to_kernel_mode(regs, state);
>> }
>> diff --git a/kernel/entry/common.c b/kernel/entry/common.c
>> index b82032777310..4aa9656fa1b4 100644
>> --- a/kernel/entry/common.c
>> +++ b/kernel/entry/common.c
>> @@ -142,6 +142,20 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
>> return ret;
>> }
>>
>> +/**
>> + * arch_irqentry_exit_need_resched - Architecture specific need resched function
>> + *
>> + * Invoked from raw_irqentry_exit_cond_resched() to check if need resched.
>> + * Defaults return true.
>> + *
>> + * The main purpose is to permit arch to skip preempt a task from an IRQ.
>> + */
>> +static inline bool arch_irqentry_exit_need_resched(void);
>> +
>> +#ifndef arch_irqentry_exit_need_resched
>> +static inline bool arch_irqentry_exit_need_resched(void) { return true; }
>> +#endif
>> +
>> void raw_irqentry_exit_cond_resched(void)
>> {
>> if (!preempt_count()) {
>> @@ -149,7 +163,7 @@ void raw_irqentry_exit_cond_resched(void)
>> rcu_irq_exit_check_preempt();
>> if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
>> WARN_ON_ONCE(!on_thread_stack());
>> - if (need_resched())
>> + if (need_resched() && arch_irqentry_exit_need_resched())
>> preempt_schedule_irq();
>> }
>> }
>> --
>> 2.34.1
>>
>
>
Powered by blists - more mailing lists