[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211025122102.46089-4-frederic@kernel.org>
Date: Mon, 25 Oct 2021 14:21:01 +0200
From: Frederic Weisbecker <frederic@...nel.org>
To: Peter Zijlstra <peterz@...radead.org>,
Ard Biesheuvel <ardb@...nel.org>
Cc: LKML <linux-kernel@...r.kernel.org>,
Frederic Weisbecker <frederic@...nel.org>,
James Morse <james.morse@....com>,
David Laight <David.Laight@...LAB.COM>,
Quentin Perret <qperret@...gle.com>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
Mark Rutland <mark.rutland@....com>
Subject: [PATCH 3/4] arm64: Implement IRQ exit preemption static call for dynamic preemption
arm64 doesn't support generic entry yet, so the architecture's own IRQ
exit preemption path needs to be exposed through the relevant static
call.
Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
Cc: Mark Rutland <mark.rutland@....com>
Cc: Quentin Perret <qperret@...gle.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Catalin Marinas <catalin.marinas@....com>
Cc: James Morse <james.morse@....com>
Cc: Will Deacon <will@...nel.org>
Cc: Ard Biesheuvel <ardb@...nel.org>
Cc: David Laight <David.Laight@...LAB.COM>
---
arch/arm64/include/asm/preempt.h | 7 +++++++
arch/arm64/kernel/entry-common.c | 15 ++++++++++++---
2 files changed, 19 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
index e83f0982b99c..4fbbe644532f 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -3,6 +3,7 @@
#define __ASM_PREEMPT_H
#include <linux/thread_info.h>
+#include <linux/static_call_types.h>
#define PREEMPT_NEED_RESCHED BIT(32)
#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
@@ -86,4 +87,10 @@ void preempt_schedule_notrace(void);
#define __preempt_schedule_notrace() preempt_schedule_notrace()
#endif /* CONFIG_PREEMPTION */
+#ifdef CONFIG_PREEMPT_DYNAMIC
+void arm64_preempt_schedule_irq(void);
+#define __irqentry_exit_cond_resched_func arm64_preempt_schedule_irq
+DECLARE_STATIC_CALL(irqentry_exit_cond_resched, __irqentry_exit_cond_resched_func);
+#endif /* CONFIG_PREEMPT_DYNAMIC */
+
#endif /* __ASM_PREEMPT_H */
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 32f9796c4ffe..f1c739dd874d 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/thread_info.h>
+#include <linux/static_call.h>
#include <asm/cpufeature.h>
#include <asm/daifflags.h>
@@ -235,7 +236,7 @@ static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
exit_to_kernel_mode(regs);
}
-static void __sched arm64_preempt_schedule_irq(void)
+void __sched arm64_preempt_schedule_irq(void)
{
lockdep_assert_irqs_disabled();
@@ -259,6 +260,9 @@ static void __sched arm64_preempt_schedule_irq(void)
if (system_capabilities_finalized())
preempt_schedule_irq();
}
+#ifdef CONFIG_PREEMPT_DYNAMIC
+DEFINE_STATIC_CALL(irqentry_exit_cond_resched, arm64_preempt_schedule_irq);
+#endif
static void do_interrupt_handler(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
@@ -446,8 +450,13 @@ static void noinstr el1_interrupt(struct pt_regs *regs,
* preempt_count().
*/
if (IS_ENABLED(CONFIG_PREEMPTION) &&
- READ_ONCE(current_thread_info()->preempt_count) == 0)
- arm64_preempt_schedule_irq();
+ READ_ONCE(current_thread_info()->preempt_count) == 0) {
+#ifdef CONFIG_PREEMPT_DYNAMIC
+ static_call(irqentry_exit_cond_resched)();
+#else
+ arm64_preempt_schedule_irq();
+#endif
+ }
exit_el1_irq_or_nmi(regs);
}
--
2.25.1
Powered by blists - more mailing lists