[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <164681250198.16921.8901861440480788939.tip-bot2@tip-bot2>
Date: Wed, 09 Mar 2022 07:55:01 -0000
From: "tip-bot2 for Peter Zijlstra" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Josh Poimboeuf <jpoimboe@...hat.com>,
"Peter Zijlstra (Intel)" <peterz@...radead.org>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [tip: x86/core] x86/livepatch: Validate __fentry__ location
The following commit has been merged into the x86/core branch of tip:
Commit-ID: a557abfd1a16072e4d25bdf226e3b761e6f10741
Gitweb: https://git.kernel.org/tip/a557abfd1a16072e4d25bdf226e3b761e6f10741
Author: Peter Zijlstra <peterz@...radead.org>
AuthorDate: Tue, 08 Mar 2022 16:30:30 +01:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Tue, 08 Mar 2022 23:53:31 +01:00
x86/livepatch: Validate __fentry__ location
Currently livepatch assumes __fentry__ lives at func+0, which is most
likely untrue with IBT on. Instead make it use ftrace_location() by
default which both validates and finds the actual ip if there is any
in the same symbol.
Suggested-by: Josh Poimboeuf <jpoimboe@...hat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Acked-by: Josh Poimboeuf <jpoimboe@...hat.com>
Link: https://lore.kernel.org/r/20220308154318.285971256@infradead.org
---
arch/powerpc/include/asm/livepatch.h | 10 ----------
kernel/livepatch/patch.c | 19 ++-----------------
2 files changed, 2 insertions(+), 27 deletions(-)
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
index 4fe018c..7b9dcd5 100644
--- a/arch/powerpc/include/asm/livepatch.h
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -19,16 +19,6 @@ static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
regs_set_return_ip(regs, ip);
}
-#define klp_get_ftrace_location klp_get_ftrace_location
-static inline unsigned long klp_get_ftrace_location(unsigned long faddr)
-{
- /*
- * Live patch works only with -mprofile-kernel on PPC. In this case,
- * the ftrace location is always within the first 16 bytes.
- */
- return ftrace_location_range(faddr, faddr + 16);
-}
-
static inline void klp_init_thread_info(struct task_struct *p)
{
/* + 1 to account for STACK_END_MAGIC */
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index fe316c0..c172bf9 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -124,19 +124,6 @@ unlock:
ftrace_test_recursion_unlock(bit);
}
-/*
- * Convert a function address into the appropriate ftrace location.
- *
- * Usually this is just the address of the function, but on some architectures
- * it's more complicated so allow them to provide a custom behaviour.
- */
-#ifndef klp_get_ftrace_location
-static unsigned long klp_get_ftrace_location(unsigned long faddr)
-{
- return faddr;
-}
-#endif
-
static void klp_unpatch_func(struct klp_func *func)
{
struct klp_ops *ops;
@@ -153,8 +140,7 @@ static void klp_unpatch_func(struct klp_func *func)
if (list_is_singular(&ops->func_stack)) {
unsigned long ftrace_loc;
- ftrace_loc =
- klp_get_ftrace_location((unsigned long)func->old_func);
+ ftrace_loc = ftrace_location((unsigned long)func->old_func);
if (WARN_ON(!ftrace_loc))
return;
@@ -186,8 +172,7 @@ static int klp_patch_func(struct klp_func *func)
if (!ops) {
unsigned long ftrace_loc;
- ftrace_loc =
- klp_get_ftrace_location((unsigned long)func->old_func);
+ ftrace_loc = ftrace_location((unsigned long)func->old_func);
if (!ftrace_loc) {
pr_err("failed to find location for function '%s'\n",
func->old_name);
Powered by blists - more mailing lists