[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260123073916.956498-5-mkchauras@linux.ibm.com>
Date: Fri, 23 Jan 2026 13:09:12 +0530
From: Mukesh Kumar Chaurasiya <mkchauras@...ux.ibm.com>
To: maddy@...ux.ibm.com, mpe@...erman.id.au, npiggin@...il.com,
chleroy@...nel.org, ryabinin.a.a@...il.com, glider@...gle.com,
andreyknvl@...il.com, dvyukov@...gle.com, vincenzo.frascino@....com,
oleg@...hat.com, kees@...nel.org, luto@...capital.net,
wad@...omium.org, mchauras@...ux.ibm.com, thuth@...hat.com,
ruanjinjie@...wei.com, sshegde@...ux.ibm.com,
akpm@...ux-foundation.org, charlie@...osinc.com, deller@....de,
ldv@...ace.io, macro@...am.me.uk, segher@...nel.crashing.org,
peterz@...radead.org, bigeasy@...utronix.de, namcao@...utronix.de,
tglx@...utronix.de, mark.barnett@....com,
linuxppc-dev@...ts.ozlabs.org, linux-kernel@...r.kernel.org,
kasan-dev@...glegroups.com
Subject: [PATCH v4 4/8] powerpc: Introduce syscall exit arch functions
From: Mukesh Kumar Chaurasiya <mchauras@...ux.ibm.com>
Add PowerPC-specific implementations of the generic syscall exit hooks
used by the generic entry/exit framework:
- arch_exit_to_user_mode_work_prepare()
- arch_exit_to_user_mode_work()
These helpers handle user state restoration when returning from the
kernel to userspace, including FPU/VMX/VSX state, transactional memory,
KUAP restore, and per-CPU accounting.
Additionally, move check_return_regs_valid() from interrupt.c to
interrupt.h so it can be shared by the new entry/exit logic.
No functional change is intended with this patch.
Signed-off-by: Mukesh Kumar Chaurasiya <mchauras@...ux.ibm.com>
---
arch/powerpc/include/asm/entry-common.h | 49 +++++++++++++++++++++++++
1 file changed, 49 insertions(+)
diff --git a/arch/powerpc/include/asm/entry-common.h b/arch/powerpc/include/asm/entry-common.h
index 837a7e020e82..ff0625e04778 100644
--- a/arch/powerpc/include/asm/entry-common.h
+++ b/arch/powerpc/include/asm/entry-common.h
@@ -6,6 +6,7 @@
#include <asm/cputime.h>
#include <asm/interrupt.h>
#include <asm/stacktrace.h>
+#include <asm/switch_to.h>
#include <asm/tm.h>
static __always_inline void booke_load_dbcr0(void)
@@ -123,4 +124,52 @@ static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs)
#define arch_enter_from_user_mode arch_enter_from_user_mode
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+ unsigned long mathflags;
+
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
+ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+ unlikely((ti_work & _TIF_RESTORE_TM))) {
+ restore_tm_state(regs);
+ } else {
+ mathflags = MSR_FP;
+
+ if (cpu_has_feature(CPU_FTR_VSX))
+ mathflags |= MSR_VEC | MSR_VSX;
+ else if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ mathflags |= MSR_VEC;
+
+ /*
+ * If userspace MSR has all available FP bits set,
+ * then they are live and no need to restore. If not,
+ * it means the regs were given up and restore_math
+ * may decide to restore them (to avoid taking an FP
+ * fault).
+ */
+ if ((regs->msr & mathflags) != mathflags)
+ restore_math(regs);
+ }
+ }
+
+ check_return_regs_valid(regs);
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ local_paca->tm_scratch = regs->msr;
+#endif
+ /* Restore user access locks last */
+ kuap_user_restore(regs);
+}
+
+#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
+
+static __always_inline void arch_exit_to_user_mode(void)
+{
+ booke_load_dbcr0();
+
+ account_cpu_user_exit();
+}
+
+#define arch_exit_to_user_mode arch_exit_to_user_mode
+
#endif /* _ASM_PPC_ENTRY_COMMON_H */
--
2.52.0
Powered by blists - more mailing lists