[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZUus3jr6gXV6ZG1T@FVFF77S0Q05N>
Date: Wed, 8 Nov 2023 15:44:30 +0000
From: Mark Rutland <mark.rutland@....com>
To: Ankur Arora <ankur.a.arora@...cle.com>
Cc: linux-kernel@...r.kernel.org, tglx@...utronix.de,
peterz@...radead.org, torvalds@...ux-foundation.org,
paulmck@...nel.org, linux-mm@...ck.org, x86@...nel.org,
akpm@...ux-foundation.org, luto@...nel.org, bp@...en8.de,
dave.hansen@...ux.intel.com, hpa@...or.com, mingo@...hat.com,
juri.lelli@...hat.com, vincent.guittot@...aro.org,
willy@...radead.org, mgorman@...e.de, jon.grimm@....com,
bharata@....com, raghavendra.kt@....com,
boris.ostrovsky@...cle.com, konrad.wilk@...cle.com,
jgross@...e.com, andrew.cooper3@...rix.com, mingo@...nel.org,
bristot@...nel.org, mathieu.desnoyers@...icios.com,
geert@...ux-m68k.org, glaubitz@...sik.fu-berlin.de,
anton.ivanov@...bridgegreys.com, mattst88@...il.com,
krypton@...ich-teichert.org, rostedt@...dmis.org,
David.Laight@...lab.com, richard@....at, mjguzik@...il.com
Subject: Re: [RFC PATCH 08/86] Revert "arm64: Support PREEMPT_DYNAMIC"
On Tue, Nov 07, 2023 at 01:56:54PM -0800, Ankur Arora wrote:
> This reverts commit 1b2d3451ee50a0968cb9933f726e50b368ba5073.
As the author of the commit being reverted, I'd appreciate being Cc'd on
subsequent versions of this patch (and ideally, for the series as a whole).
Mark.
>
> Signed-off-by: Ankur Arora <ankur.a.arora@...cle.com>
> ---
> arch/arm64/Kconfig | 1 -
> arch/arm64/include/asm/preempt.h | 19 ++-----------------
> arch/arm64/kernel/entry-common.c | 10 +---------
> 3 files changed, 3 insertions(+), 27 deletions(-)
>
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 78f20e632712..856d7be2ee45 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -221,7 +221,6 @@ config ARM64
> select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI
> select HAVE_PERF_REGS
> select HAVE_PERF_USER_STACK_DUMP
> - select HAVE_PREEMPT_DYNAMIC_KEY
> select HAVE_REGS_AND_STACK_ACCESS_API
> select HAVE_POSIX_CPU_TIMERS_TASK_WORK
> select HAVE_FUNCTION_ARG_ACCESS_API
> diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
> index 0159b625cc7f..e83f0982b99c 100644
> --- a/arch/arm64/include/asm/preempt.h
> +++ b/arch/arm64/include/asm/preempt.h
> @@ -2,7 +2,6 @@
> #ifndef __ASM_PREEMPT_H
> #define __ASM_PREEMPT_H
>
> -#include <linux/jump_label.h>
> #include <linux/thread_info.h>
>
> #define PREEMPT_NEED_RESCHED BIT(32)
> @@ -81,24 +80,10 @@ static inline bool should_resched(int preempt_offset)
> }
>
> #ifdef CONFIG_PREEMPTION
> -
> void preempt_schedule(void);
> +#define __preempt_schedule() preempt_schedule()
> void preempt_schedule_notrace(void);
> -
> -#ifdef CONFIG_PREEMPT_DYNAMIC
> -
> -DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
> -void dynamic_preempt_schedule(void);
> -#define __preempt_schedule() dynamic_preempt_schedule()
> -void dynamic_preempt_schedule_notrace(void);
> -#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
> -
> -#else /* CONFIG_PREEMPT_DYNAMIC */
> -
> -#define __preempt_schedule() preempt_schedule()
> -#define __preempt_schedule_notrace() preempt_schedule_notrace()
> -
> -#endif /* CONFIG_PREEMPT_DYNAMIC */
> +#define __preempt_schedule_notrace() preempt_schedule_notrace()
> #endif /* CONFIG_PREEMPTION */
>
> #endif /* __ASM_PREEMPT_H */
> diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
> index 0fc94207e69a..5d9c9951562b 100644
> --- a/arch/arm64/kernel/entry-common.c
> +++ b/arch/arm64/kernel/entry-common.c
> @@ -225,17 +225,9 @@ static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
> lockdep_hardirqs_on(CALLER_ADDR0);
> }
>
> -#ifdef CONFIG_PREEMPT_DYNAMIC
> -DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
> -#define need_irq_preemption() \
> - (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
> -#else
> -#define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION))
> -#endif
> -
> static void __sched arm64_preempt_schedule_irq(void)
> {
> - if (!need_irq_preemption())
> + if (!IS_ENABLED(CONFIG_PREEMPTION))
> return;
>
> /*
> --
> 2.31.1
>
Powered by blists - more mailing lists