lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 16 Jan 2020 23:45:12 +0900
From:   Masami Hiramatsu <mhiramat@...nel.org>
To:     Brendan Gregg <brendan.d.gregg@...il.com>,
        Steven Rostedt <rostedt@...dmis.org>,
        Alexei Starovoitov <ast@...nel.org>
Cc:     mhiramat@...nel.org, Ingo Molnar <mingo@...nel.org>,
        bpf@...r.kernel.org, linux-kernel@...r.kernel.org,
        Daniel Borkmann <daniel@...earbox.net>,
        Arnaldo Carvalho de Melo <acme@...nel.org>,
        "David S . Miller" <davem@...emloft.net>, paulmck@...nel.org,
        joel@...lfernandes.org,
        "Naveen N . Rao" <naveen.n.rao@...ux.ibm.com>,
        Anil S Keshavamurthy <anil.s.keshavamurthy@...el.com>
Subject: [RFT PATCH 06/13] kprobes: Enable kprobe-booster with CONFIG_PREEMPT=y

As we did in commit a30b85df7d59 ("kprobes: Use synchronize_rcu_tasks()
for optprobe with CONFIG_PREEMPT=y"), we can also enable kprobe-
booster which depends on trampoline execution buffer as same as
optprobe. Before releasing the trampoline buffer (kprobe_insn_page),
the garbage collector waits for all potentially preempted tasks on
the trampoline bufer using synchronize_rcu_tasks() instead of
synchronize_rcu().

This requires to enable CONFIG_TASKS_RCU=y too, so this also
introduces HAVE_KPROBES_BOOSTER for the archs which supports
kprobe-booster (currently only x86 and ia64.)

If both of CONFIG_PREEMPTION and HAVE_KPROBES_BOOSTER is y,
CONFIG_KPROBES selects CONFIG_TASKS_RCU=y.

Signed-off-by: Masami Hiramatsu <mhiramat@...nel.org>
---
 arch/Kconfig                   |    4 ++++
 arch/ia64/Kconfig              |    1 +
 arch/ia64/kernel/kprobes.c     |    3 +--
 arch/x86/Kconfig               |    1 +
 arch/x86/kernel/kprobes/core.c |    2 --
 kernel/kprobes.c               |    4 ++--
 6 files changed, 9 insertions(+), 6 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index 48b5e103bdb0..ead87084c8bf 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -64,6 +64,7 @@ config KPROBES
 	depends on MODULES
 	depends on HAVE_KPROBES
 	select KALLSYMS
+	select TASKS_RCU if PREEMPTION && HAVE_KPROBES_BOOSTER
 	help
 	  Kprobes allows you to trap at almost any kernel address and
 	  execute a callback function.  register_kprobe() establishes
@@ -189,6 +190,9 @@ config HAVE_KPROBES
 config HAVE_KRETPROBES
 	bool
 
+config HAVE_KPROBES_BOOSTER
+	bool
+
 config HAVE_OPTPROBES
 	bool
 
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index bab7cd878464..341f9ca8a745 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -25,6 +25,7 @@ config IA64
 	select HAVE_IDE
 	select HAVE_OPROFILE
 	select HAVE_KPROBES
+	select HAVE_KPROBES_BOOSTER
 	select HAVE_KRETPROBES
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_DYNAMIC_FTRACE if (!ITANIUM)
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index a6d6a0556f08..1680a10c9f49 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -841,7 +841,6 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
 		return 1;
 	}
 
-#if !defined(CONFIG_PREEMPTION)
 	if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
 		/* Boost up -- we can execute copied instructions directly */
 		ia64_psr(regs)->ri = p->ainsn.slot;
@@ -853,7 +852,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
 		preempt_enable_no_resched();
 		return 1;
 	}
-#endif
+
 	prepare_ss(p, regs);
 	kcb->kprobe_status = KPROBE_HIT_SS;
 	return 1;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e5800e52a59a..d509578d824b 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -181,6 +181,7 @@ config X86
 	select HAVE_KERNEL_LZO
 	select HAVE_KERNEL_XZ
 	select HAVE_KPROBES
+	select HAVE_KPROBES_BOOSTER
 	select HAVE_KPROBES_ON_FTRACE
 	select HAVE_FUNCTION_ERROR_INJECTION
 	select HAVE_KRETPROBES
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 4d7022a740ab..7aba45037885 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -587,7 +587,6 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
 	if (setup_detour_execution(p, regs, reenter))
 		return;
 
-#if !defined(CONFIG_PREEMPTION)
 	if (p->ainsn.boostable && !p->post_handler) {
 		/* Boost up -- we can execute copied instructions directly */
 		if (!reenter)
@@ -600,7 +599,6 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
 		regs->ip = (unsigned long)p->ainsn.insn;
 		return;
 	}
-#endif
 	if (reenter) {
 		save_previous_kprobe(kcb);
 		set_current_kprobe(p, regs, kcb);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 9c6e230852ad..848c14e92ccc 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -220,8 +220,8 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c)
 {
 	struct kprobe_insn_page *kip, *next;
 
-	/* Ensure no-one is interrupted on the garbages */
-	synchronize_rcu();
+	/* Ensure no-one is running on the garbages. */
+	synchronize_rcu_tasks();
 
 	list_for_each_entry_safe(kip, next, &c->pages, list) {
 		int i;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ