lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1646236764.vx04n8yp12.naveen@linux.ibm.com>
Date:   Wed, 02 Mar 2022 21:47:03 +0530
From:   "Naveen N. Rao" <naveen.n.rao@...ux.vnet.ibm.com>
To:     Peter Zijlstra <peterz@...radead.org>
Cc:     alexei.starovoitov@...il.com, alyssa.milburn@...el.com,
        andrew.cooper3@...rix.com, hjl.tools@...il.com,
        joao@...rdrivepizza.com, jpoimboe@...hat.com,
        keescook@...omium.org, linux-kernel@...r.kernel.org,
        mark.rutland@....com, mbenes@...e.cz,
        Masami Hiramatsu <mhiramat@...nel.org>,
        ndesaulniers@...gle.com, rostedt@...dmis.org,
        samitolvanen@...gle.com, x86@...nel.org
Subject: Re: [PATCH v2 15/39] x86/ibt,kprobes: Fix more +0 assumptions

Peter Zijlstra wrote:
> --- a/arch/powerpc/kernel/kprobes.c
> +++ b/arch/powerpc/kernel/kprobes.c
> @@ -105,6 +105,27 @@ kprobe_opcode_t *kprobe_lookup_name(cons
>  	return addr;
>  }
> 
> +static bool arch_kprobe_on_func_entry(unsigned long offset)
> +{
> +#ifdef PPC64_ELF_ABI_v2
> +#ifdef CONFIG_KPROBES_ON_FTRACE
> +	return offset <= 16;
> +#else
> +	return offset <= 8;
> +#endif
> +#else
> +	return !offset;
> +#endif
> +}
> +
> +/* XXX try and fold the magic of kprobe_lookup_name() in this */
> +kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
> +					 bool *on_func_entry)
> +{
> +	*on_func_entry = arch_kprobe_on_func_entry(offset);
> +	return (kprobe_opcode_t *)(addr + offset);
> +}
> +

With respect to kprobe_lookup_name(), one of the primary motivations there was 
the issue with function descriptors for the previous elf v1 ABI (it likely also 
affects ia64/parisc). I'm thinking it'll be simpler if we have a way to obtain 
function entry point. Something like this:

diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 4176c7eca7b5aa..8c57cc5b77f9ae 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -73,6 +73,12 @@ int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
 /* Lookup the address for a symbol. Returns 0 if not found. */
 unsigned long kallsyms_lookup_name(const char *name);
 
+/* Return function entry point by additionally dereferencing function descriptor */
+static inline unsigned long kallsyms_lookup_function(const char *name)
+{
+	return (unsigned long)dereference_symbol_descriptor((void *)kallsyms_lookup_name(name));
+}
+
 extern int kallsyms_lookup_size_offset(unsigned long addr,
 				  unsigned long *symbolsize,
 				  unsigned long *offset);
@@ -103,6 +109,11 @@ static inline unsigned long kallsyms_lookup_name(const char *name)
 	return 0;
 }
 
+static inline unsigned long kallsyms_lookup_function(const char *name)
+{
+	return 0;
+}
+
 static inline int kallsyms_lookup_size_offset(unsigned long addr,
 					      unsigned long *symbolsize,
 					      unsigned long *offset)


With that, we can fold some of the code from kprobe_lookup_name() into 
arch_adjust_kprobe_addr() and remove kprobe_lookup_name(). This should also 
address Masami's concerns with powerpc promoting all probes at function entry 
into a probe at the ftrace location.


- Naveen


---
 arch/powerpc/kernel/kprobes.c | 70 +++--------------------------------
 include/linux/kprobes.h       |  1 -
 kernel/kprobes.c              | 19 ++--------
 kernel/trace/trace_kprobe.c   |  2 +-
 4 files changed, 9 insertions(+), 83 deletions(-)

diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 7dae0b01abfbd6..46aa2b9e44c27c 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -41,70 +41,6 @@ bool arch_within_kprobe_blacklist(unsigned long addr)
 		 addr < (unsigned long)__head_end);
 }
 
-kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
-{
-	kprobe_opcode_t *addr = NULL;
-
-#ifdef PPC64_ELF_ABI_v2
-	/* PPC64 ABIv2 needs local entry point */
-	addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
-	if (addr && !offset) {
-#ifdef CONFIG_KPROBES_ON_FTRACE
-		unsigned long faddr;
-		/*
-		 * Per livepatch.h, ftrace location is always within the first
-		 * 16 bytes of a function on powerpc with -mprofile-kernel.
-		 */
-		faddr = ftrace_location_range((unsigned long)addr,
-					      (unsigned long)addr + 16);
-		if (faddr)
-			addr = (kprobe_opcode_t *)faddr;
-		else
-#endif
-			addr = (kprobe_opcode_t *)ppc_function_entry(addr);
-	}
-#elif defined(PPC64_ELF_ABI_v1)
-	/*
-	 * 64bit powerpc ABIv1 uses function descriptors:
-	 * - Check for the dot variant of the symbol first.
-	 * - If that fails, try looking up the symbol provided.
-	 *
-	 * This ensures we always get to the actual symbol and not
-	 * the descriptor.
-	 *
-	 * Also handle <module:symbol> format.
-	 */
-	char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN];
-	bool dot_appended = false;
-	const char *c;
-	ssize_t ret = 0;
-	int len = 0;
-
-	if ((c = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) {
-		c++;
-		len = c - name;
-		memcpy(dot_name, name, len);
-	} else
-		c = name;
-
-	if (*c != '\0' && *c != '.') {
-		dot_name[len++] = '.';
-		dot_appended = true;
-	}
-	ret = strscpy(dot_name + len, c, KSYM_NAME_LEN);
-	if (ret > 0)
-		addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name);
-
-	/* Fallback to the original non-dot symbol lookup */
-	if (!addr && dot_appended)
-		addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
-#else
-	addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
-#endif
-
-	return addr;
-}
-
 static bool arch_kprobe_on_func_entry(unsigned long offset)
 {
 #ifdef PPC64_ELF_ABI_v2
@@ -118,11 +54,15 @@ static bool arch_kprobe_on_func_entry(unsigned long offset)
 #endif
 }
 
-/* XXX try and fold the magic of kprobe_lookup_name() in this */
 kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
 					 bool *on_func_entry)
 {
 	*on_func_entry = arch_kprobe_on_func_entry(offset);
+#ifdef PPC64_ELF_ABI_v2
+	/* Promote probes on the GEP to the LEP */
+	if (!offset)
+		addr = ppc_function_entry((void *)addr);
+#endif
 	return (kprobe_opcode_t *)(addr + offset);
 }
 
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 9c28f7a0ef4268..dad375056ba049 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -382,7 +382,6 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
 	return this_cpu_ptr(&kprobe_ctlblk);
 }
 
-kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset);
 kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset, bool *on_func_entry);
 
 int register_kprobe(struct kprobe *p);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 8be57fdc19bdc0..066fa644e9dfa3 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -67,12 +67,6 @@ static bool kprobes_all_disarmed;
 static DEFINE_MUTEX(kprobe_mutex);
 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance);
 
-kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
-					unsigned int __unused)
-{
-	return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
-}
-
 /*
  * Blacklist -- list of 'struct kprobe_blacklist_entry' to store info where
  * kprobes can not probe.
@@ -1481,7 +1475,7 @@ bool within_kprobe_blacklist(unsigned long addr)
 		if (!p)
 			return false;
 		*p = '\0';
-		addr = (unsigned long)kprobe_lookup_name(symname, 0);
+		addr = kallsyms_lookup_function(symname);
 		if (addr)
 			return __within_kprobe_blacklist(addr);
 	}
@@ -1524,14 +1518,7 @@ _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
 		goto invalid;
 
 	if (symbol_name) {
-		/*
-		 * Input: @sym + @offset
-		 * Output: @addr + @offset
-		 *
-		 * NOTE: kprobe_lookup_name() does *NOT* fold the offset
-		 *       argument into it's output!
-		 */
-		addr = kprobe_lookup_name(symbol_name, offset);
+		addr = (kprobe_opcode_t *)kallsyms_lookup_function(symbol_name);
 		if (!addr)
 			return ERR_PTR(-ENOENT);
 	}
@@ -2621,7 +2608,7 @@ static int __init init_kprobes(void)
 		/* lookup the function address from its name */
 		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
 			kretprobe_blacklist[i].addr =
-				kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
+				(void *)kallsyms_lookup_function(kretprobe_blacklist[i].name);
 			if (!kretprobe_blacklist[i].addr)
 				pr_err("Failed to lookup symbol '%s' for kretprobe blacklist. Maybe the target function is removed or renamed.\n",
 				       kretprobe_blacklist[i].name);
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 508f14af4f2c7e..a8d01954051e60 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -461,7 +461,7 @@ static bool within_notrace_func(struct trace_kprobe *tk)
 		if (!p)
 			return true;
 		*p = '\0';
-		addr = (unsigned long)kprobe_lookup_name(symname, 0);
+		addr = kallsyms_lookup_function(symname);
 		if (addr)
 			return __within_notrace_func(addr);
 	}
-- 
2.35.1


Powered by blists - more mailing lists