[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250225104726.5e4eed32@gandalf.local.home>
Date: Tue, 25 Feb 2025 10:47:26 -0500
From: Steven Rostedt <rostedt@...dmis.org>
To: Nathan Chancellor <nathan@...nel.org>
Cc: linux-kernel@...r.kernel.org, Masami Hiramatsu <mhiramat@...nel.org>,
Mark Rutland <mark.rutland@....com>, Mathieu Desnoyers
<mathieu.desnoyers@...icios.com>, Andrew Morton
<akpm@...ux-foundation.org>, bpf <bpf@...r.kernel.org>, Peter Zijlstra
<peterz@...radead.org>, Linus Torvalds <torvalds@...ux-foundation.org>,
Masahiro Yamada <masahiroy@...nel.org>, Nicolas Schier <nicolas@...sle.eu>,
Zheng Yejian <zhengyejian1@...wei.com>, Martin Kelly
<martin.kelly@...wdstrike.com>, Christophe Leroy
<christophe.leroy@...roup.eu>, Josh Poimboeuf <jpoimboe@...hat.com>, Heiko
Carstens <hca@...ux.ibm.com>, Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>, Vasily Gorbik <gor@...ux.ibm.com>, Alexander
Gordeev <agordeev@...ux.ibm.com>
Subject: Re: [for-next][PATCH 4/6] scripts/sorttable: Zero out weak
functions in mcount_loc table
On Mon, 24 Feb 2025 22:28:33 -0500
Steven Rostedt <rostedt@...dmis.org> wrote:
> Thanks, I'm about to go to bed soon and I'll take a look more into it tomorrow.
Can you try this patch (it has the clang fix too).
-- Steve
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 27c8def2139d..bec7b5dbdb3b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -7004,7 +7004,6 @@ static int ftrace_process_locs(struct module *mod,
unsigned long count;
unsigned long *p;
unsigned long addr;
- unsigned long kaslr;
unsigned long flags = 0; /* Shut up gcc */
unsigned long pages;
int ret = -ENOMEM;
@@ -7056,25 +7055,37 @@ static int ftrace_process_locs(struct module *mod,
ftrace_pages->next = start_pg;
}
- /* For zeroed locations that were shifted for core kernel */
- kaslr = !mod ? kaslr_offset() : 0;
-
p = start;
pg = start_pg;
while (p < end) {
unsigned long end_offset;
- addr = ftrace_call_adjust(*p++);
+
+ addr = *p++;
+
/*
* Some architecture linkers will pad between
* the different mcount_loc sections of different
* object files to satisfy alignments.
* Skip any NULL pointers.
*/
- if (!addr || addr == kaslr) {
+ if (!addr) {
+ skipped++;
+ continue;
+ }
+
+ /*
+ * If this is core kernel, make sure the address is in core
+ * or inittext, as weak functions get zeroed and KASLR can
+ * move them to something other than zero. It just will not
+ * move it to an area where kernel text is.
+ */
+ if (!mod && !(is_kernel_text(addr) || is_kernel_inittext(addr))) {
skipped++;
continue;
}
+ addr = ftrace_call_adjust(addr);
+
end_offset = (pg->index+1) * sizeof(pg->records[0]);
if (end_offset > PAGE_SIZE << pg->order) {
/* We should have allocated enough */
diff --git a/scripts/sorttable.c b/scripts/sorttable.c
index 23c7e0e6c024..7b4b3714b1af 100644
--- a/scripts/sorttable.c
+++ b/scripts/sorttable.c
@@ -611,13 +611,16 @@ static int add_field(uint64_t addr, uint64_t size)
return 0;
}
+/* Used for when mcount/fentry is before the function entry */
+static int before_func;
+
/* Only return match if the address lies inside the function size */
static int cmp_func_addr(const void *K, const void *A)
{
uint64_t key = *(const uint64_t *)K;
const struct func_info *a = A;
- if (key < a->addr)
+ if (key + before_func < a->addr)
return -1;
return key >= a->addr + a->size;
}
@@ -827,9 +830,14 @@ static void *sort_mcount_loc(void *arg)
pthread_exit(m_err);
}
- if (sort_reloc)
+ if (sort_reloc) {
count = fill_relocs(vals, size, ehdr, emloc->start_mcount_loc);
- else
+ /* gcc may use relocs to save the addresses, but clang does not. */
+ if (!count) {
+ count = fill_addrs(vals, size, start_loc);
+ sort_reloc = 0;
+ }
+ } else
count = fill_addrs(vals, size, start_loc);
if (count < 0) {
@@ -1248,6 +1256,8 @@ static int do_file(char const *const fname, void *addr)
#ifdef MCOUNT_SORT_ENABLED
sort_reloc = true;
rela_type = 0x403;
+ /* arm64 uses patchable function entry placing before function */
+ before_func = 8;
#endif
/* fallthrough */
case EM_386:
Powered by blists - more mailing lists