[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220224230203.a5z3mppfcywuvogq@treble>
Date: Thu, 24 Feb 2022 15:02:03 -0800
From: Josh Poimboeuf <jpoimboe@...hat.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: x86@...nel.org, joao@...rdrivepizza.com, hjl.tools@...il.com,
andrew.cooper3@...rix.com, linux-kernel@...r.kernel.org,
ndesaulniers@...gle.com, keescook@...omium.org,
samitolvanen@...gle.com, mark.rutland@....com,
alyssa.milburn@...el.com, mbenes@...e.cz, rostedt@...dmis.org,
mhiramat@...nel.org, alexei.starovoitov@...il.com
Subject: Re: [PATCH v2 13/39] x86/livepatch: Validate __fentry__ location
On Thu, Feb 24, 2022 at 03:51:51PM +0100, Peter Zijlstra wrote:
> Currently livepatch assumes __fentry__ lives at func+0, which is most
> likely untrue with IBT on. Instead make it use ftrace_location() by
> default which both validates and finds the actual ip if there is any
> in the same symbol.
>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
> ---
> arch/x86/include/asm/livepatch.h | 9 +++++++++
> kernel/livepatch/patch.c | 2 +-
> 2 files changed, 10 insertions(+), 1 deletion(-)
>
> --- a/kernel/livepatch/patch.c
> +++ b/kernel/livepatch/patch.c
> @@ -133,7 +133,7 @@ static void notrace klp_ftrace_handler(u
> #ifndef klp_get_ftrace_location
> static unsigned long klp_get_ftrace_location(unsigned long faddr)
> {
> - return faddr;
> + return ftrace_location(faddr);
Now that ftrace is doing the dirty work, I think this means we can get
rid of klp_get_ftrace_location() altogether:
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
index 4fe018cc207b..7b9dcd51af32 100644
--- a/arch/powerpc/include/asm/livepatch.h
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -19,16 +19,6 @@ static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
regs_set_return_ip(regs, ip);
}
-#define klp_get_ftrace_location klp_get_ftrace_location
-static inline unsigned long klp_get_ftrace_location(unsigned long faddr)
-{
- /*
- * Live patch works only with -mprofile-kernel on PPC. In this case,
- * the ftrace location is always within the first 16 bytes.
- */
- return ftrace_location_range(faddr, faddr + 16);
-}
-
static inline void klp_init_thread_info(struct task_struct *p)
{
/* + 1 to account for STACK_END_MAGIC */
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index fd295bbbcbc7..ed3464e68bda 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -124,19 +124,6 @@ static void notrace klp_ftrace_handler(unsigned long ip,
ftrace_test_recursion_unlock(bit);
}
-/*
- * Convert a function address into the appropriate ftrace location.
- *
- * Usually this is just the address of the function, but on some architectures
- * it's more complicated so allow them to provide a custom behaviour.
- */
-#ifndef klp_get_ftrace_location
-static unsigned long klp_get_ftrace_location(unsigned long faddr)
-{
- return ftrace_location(faddr);
-}
-#endif
-
static void klp_unpatch_func(struct klp_func *func)
{
struct klp_ops *ops;
@@ -153,8 +140,7 @@ static void klp_unpatch_func(struct klp_func *func)
if (list_is_singular(&ops->func_stack)) {
unsigned long ftrace_loc;
- ftrace_loc =
- klp_get_ftrace_location((unsigned long)func->old_func);
+ ftrace_loc = ftrace_location((unsigned long)func->old_func);
if (WARN_ON(!ftrace_loc))
return;
Powered by blists - more mailing lists