lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240708092415.906217175@infradead.org>
Date: Mon, 08 Jul 2024 11:12:48 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: mingo@...nel.org,
 andrii@...nel.org
Cc: linux-kernel@...r.kernel.org,
 peterz@...radead.org,
 rostedt@...dmis.org,
 mhiramat@...nel.org,
 oleg@...hat.com,
 jolsa@...nel.org,
 clm@...a.com,
 paulmck@...nel.org
Subject: [PATCH 07/10] perf/uprobe: Convert (some) uprobe->refcount to SRCU

With handle_swbp() hitting concurrently on (all) CPUs, potentially on
the same uprobe, the uprobe->refcount can get *very* hot. Move the
struct uprobe lifetime into uprobes_srcu such that it covers both the
uprobe and the uprobe->consumers list.

With this, handle_swbp() can use a single large SRCU critical section
to avoid taking a refcount on the uprobe for it's duration.

Notably, the single-step and uretprobe paths need a reference that
leaves handle_swbp() and will, for now, still use ->refcount.

Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
 kernel/events/uprobes.c |   31 +++++++++++++++----------------
 1 file changed, 15 insertions(+), 16 deletions(-)

--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -615,7 +615,7 @@ static void put_uprobe(struct uprobe *up
 		mutex_lock(&delayed_uprobe_lock);
 		delayed_uprobe_remove(uprobe, NULL);
 		mutex_unlock(&delayed_uprobe_lock);
-		call_rcu(&uprobe->rcu, uprobe_free_rcu);
+		call_srcu(&uprobes_srcu, &uprobe->rcu, uprobe_free_rcu);
 	}
 }
 
@@ -667,7 +667,7 @@ static struct uprobe *__find_uprobe(stru
 	struct rb_node *node = rb_find_rcu(&key, &uprobes_tree, __uprobe_cmp_key);
 
 	if (node)
-		return get_uprobe(__node_2_uprobe(node));
+		return __node_2_uprobe(node);
 
 	return NULL;
 }
@@ -680,7 +680,7 @@ static struct uprobe *find_uprobe(struct
 {
 	unsigned int seq;
 
-	guard(rcu)();
+	lockdep_assert(srcu_read_lock_held(&uprobes_srcu));
 
 	do {
 		seq = read_seqcount_begin(&uprobes_seqcount);
@@ -1130,6 +1130,8 @@ void uprobe_unregister_nosync(struct ino
 {
 	struct uprobe *uprobe;
 
+	guard(srcu)(&uprobes_srcu);
+
 	uprobe = find_uprobe(inode, offset);
 	if (WARN_ON(!uprobe))
 		return;
@@ -1137,7 +1139,6 @@ void uprobe_unregister_nosync(struct ino
 	mutex_lock(&uprobe->register_mutex);
 	__uprobe_unregister(uprobe, uc);
 	mutex_unlock(&uprobe->register_mutex);
-	put_uprobe(uprobe);
 }
 EXPORT_SYMBOL_GPL(uprobe_unregister_nosync);
 
@@ -1247,6 +1248,8 @@ int uprobe_apply(struct inode *inode, lo
 	struct uprobe_consumer *con;
 	int ret = -ENOENT;
 
+	guard(srcu)(&uprobes_srcu);
+
 	uprobe = find_uprobe(inode, offset);
 	if (WARN_ON(!uprobe))
 		return ret;
@@ -1257,7 +1260,6 @@ int uprobe_apply(struct inode *inode, lo
 	if (con)
 		ret = register_for_each_vma(uprobe, add ? uc : NULL);
 	mutex_unlock(&uprobe->register_mutex);
-	put_uprobe(uprobe);
 
 	return ret;
 }
@@ -1977,7 +1979,7 @@ pre_ssout(struct uprobe *uprobe, struct
 		return err;
 	}
 
-	utask->active_uprobe = uprobe;
+	utask->active_uprobe = get_uprobe(uprobe);
 	utask->state = UTASK_SSTEP;
 	return 0;
 }
@@ -2108,7 +2110,7 @@ static void handler_chain(struct uprobe
 	int remove = UPROBE_HANDLER_REMOVE;
 	bool need_prep = false; /* prepare return uprobe, when needed */
 
-	guard(srcu)(&uprobes_srcu);
+	lockdep_assert(srcu_read_lock_held(&uprobes_srcu));
 
 	for_each_consumer_rcu(uc, uprobe->consumers) {
 		int rc = 0;
@@ -2227,6 +2229,8 @@ static void handle_swbp(struct pt_regs *
 	if (bp_vaddr == get_trampoline_vaddr())
 		return handle_trampoline(regs);
 
+	guard(srcu)(&uprobes_srcu);
+
 	uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
 	if (!uprobe) {
 		if (is_swbp > 0) {
@@ -2255,7 +2259,7 @@ static void handle_swbp(struct pt_regs *
 	 * new and not-yet-analyzed uprobe at the same address, restart.
 	 */
 	if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
-		goto out;
+		return;
 
 	/*
 	 * Pairs with the smp_wmb() in prepare_uprobe().
@@ -2268,22 +2272,17 @@ static void handle_swbp(struct pt_regs *
 
 	/* Tracing handlers use ->utask to communicate with fetch methods */
 	if (!get_utask())
-		goto out;
+		return;
 
 	if (arch_uprobe_ignore(&uprobe->arch, regs))
-		goto out;
+		return;
 
 	handler_chain(uprobe, regs);
 
 	if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
-		goto out;
-
-	if (!pre_ssout(uprobe, regs, bp_vaddr))
 		return;
 
-	/* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
-out:
-	put_uprobe(uprobe);
+	pre_ssout(uprobe, regs, bp_vaddr);
 }
 
 /*



Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ