lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200409201336.GH3309111@krava>
Date:   Thu, 9 Apr 2020 22:13:36 +0200
From:   Jiri Olsa <jolsa@...hat.com>
To:     Masami Hiramatsu <mhiramat@...nel.org>
Cc:     Jiri Olsa <jolsa@...nel.org>,
        "Naveen N. Rao" <naveen.n.rao@...ux.ibm.com>,
        Anil S Keshavamurthy <anil.s.keshavamurthy@...el.com>,
        "David S. Miller" <davem@...emloft.net>,
        Peter Zijlstra <peterz@...radead.org>,
        lkml <linux-kernel@...r.kernel.org>,
        "bibo,mao" <bibo.mao@...el.com>,
        "Ziqian SUN (Zamir)" <zsun@...hat.com>
Subject: Re: [RFC] kretprobe: Prevent triggering kretprobe from within
 kprobe_flush_task

On Thu, Apr 09, 2020 at 08:45:01PM +0200, Jiri Olsa wrote:
> On Thu, Apr 09, 2020 at 11:41:01PM +0900, Masami Hiramatsu wrote:
> 
> SNIP
> 
> > > ---
> > >  kernel/kprobes.c | 6 ++++++
> > >  1 file changed, 6 insertions(+)
> > > 
> > > diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> > > index 2625c241ac00..b13247cae752 100644
> > > --- a/kernel/kprobes.c
> > > +++ b/kernel/kprobes.c
> > > @@ -1236,6 +1236,10 @@ __releases(hlist_lock)
> > >  }
> > >  NOKPROBE_SYMBOL(kretprobe_table_unlock);
> > >  
> > > +static struct kprobe kretprobe_dummy = {
> > > +        .addr = (void *)kretprobe_trampoline,
> > > +};
> > > +
> > >  /*
> > >   * This function is called from finish_task_switch when task tk becomes dead,
> > >   * so that we can recycle any function-return probe instances associated
> > > @@ -1256,12 +1260,14 @@ void kprobe_flush_task(struct task_struct *tk)
> > >  	INIT_HLIST_HEAD(&empty_rp);
> > >  	hash = hash_ptr(tk, KPROBE_HASH_BITS);
> > >  	head = &kretprobe_inst_table[hash];
> > > +	__this_cpu_write(current_kprobe, &kretprobe_dummy);
> > 
> > Can you also set the kcb->kprobe_state = KPROBE_HIT_ACTIVE?
> > 
> > BTW, we may be better to introduce a common kprobe_reject_section_start()
> > and kprobe_reject_section_end() so that the user don't need to prepare
> > dummy kprobes.
> 
> sure, will do
> 
> thank you both for review

ok, found out it's actually arch code..  would you guys be ok with something like below?

jirka


---
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 4d7022a740ab..081d0f366c99 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -757,12 +757,33 @@ static struct kprobe kretprobe_kprobe = {
 	.addr = (void *)kretprobe_trampoline,
 };
 
+void arch_kprobe_reject_section_start(void)
+{
+	struct kprobe_ctlblk *kcb;
+
+	preempt_disable();
+
+	/*
+	 * Set a dummy kprobe for avoiding kretprobe recursion.
+	 * Since kretprobe never run in kprobe handler, kprobe must not
+	 * be running behind this point.
+	 */
+	__this_cpu_write(current_kprobe, &kretprobe_kprobe);
+	kcb = get_kprobe_ctlblk();
+	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+}
+
+void arch_kprobe_reject_section_end(void)
+{
+	__this_cpu_write(current_kprobe, NULL);
+	preempt_enable();
+}
+
 /*
  * Called from kretprobe_trampoline
  */
 __used __visible void *trampoline_handler(struct pt_regs *regs)
 {
-	struct kprobe_ctlblk *kcb;
 	struct kretprobe_instance *ri = NULL;
 	struct hlist_head *head, empty_rp;
 	struct hlist_node *tmp;
@@ -772,16 +793,7 @@ __used __visible void *trampoline_handler(struct pt_regs *regs)
 	void *frame_pointer;
 	bool skipped = false;
 
-	preempt_disable();
-
-	/*
-	 * Set a dummy kprobe for avoiding kretprobe recursion.
-	 * Since kretprobe never run in kprobe handler, kprobe must not
-	 * be running at this point.
-	 */
-	kcb = get_kprobe_ctlblk();
-	__this_cpu_write(current_kprobe, &kretprobe_kprobe);
-	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+	arch_kprobe_reject_section_start();
 
 	INIT_HLIST_HEAD(&empty_rp);
 	kretprobe_hash_lock(current, &head, &flags);
@@ -873,8 +885,7 @@ __used __visible void *trampoline_handler(struct pt_regs *regs)
 
 	kretprobe_hash_unlock(current, &flags);
 
-	__this_cpu_write(current_kprobe, NULL);
-	preempt_enable();
+	arch_kprobe_reject_section_end();
 
 	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
 		hlist_del(&ri->hlist);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 2625c241ac00..935dd8c87705 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1236,6 +1236,14 @@ __releases(hlist_lock)
 }
 NOKPROBE_SYMBOL(kretprobe_table_unlock);
 
+void __weak arch_kprobe_reject_section_start(void)
+{
+}
+
+void __weak arch_kprobe_reject_section_end(void)
+{
+}
+
 /*
  * This function is called from finish_task_switch when task tk becomes dead,
  * so that we can recycle any function-return probe instances associated
@@ -1253,6 +1261,8 @@ void kprobe_flush_task(struct task_struct *tk)
 		/* Early boot.  kretprobe_table_locks not yet initialized. */
 		return;
 
+	arch_kprobe_reject_section_start();
+
 	INIT_HLIST_HEAD(&empty_rp);
 	hash = hash_ptr(tk, KPROBE_HASH_BITS);
 	head = &kretprobe_inst_table[hash];
@@ -1266,6 +1276,8 @@ void kprobe_flush_task(struct task_struct *tk)
 		hlist_del(&ri->hlist);
 		kfree(ri);
 	}
+
+	arch_kprobe_reject_section_end();
 }
 NOKPROBE_SYMBOL(kprobe_flush_task);
 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ