lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240813042917.506057-14-andrii@kernel.org>
Date: Mon, 12 Aug 2024 21:29:17 -0700
From: Andrii Nakryiko <andrii@...nel.org>
To: linux-trace-kernel@...r.kernel.org,
	peterz@...radead.org,
	oleg@...hat.com
Cc: rostedt@...dmis.org,
	mhiramat@...nel.org,
	bpf@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	jolsa@...nel.org,
	paulmck@...nel.org,
	willy@...radead.org,
	surenb@...gle.com,
	akpm@...ux-foundation.org,
	linux-mm@...ck.org,
	Andrii Nakryiko <andrii@...nel.org>
Subject: [PATCH RFC v3 13/13] uprobes: add speculative lockless VMA to inode resolution

Now that files_cachep is SLAB_TYPESAFE_BY_RCU, we can safely access
vma->vm_file->f_inode lockless only under rcu_read_lock() protection,
attempting uprobe look up speculatively.

We rely on newly added mmap_lock_speculation_{start,end}() helpers to
validate that mm_struct stays intact for entire duration of this
speculation. If not, we fall back to mmap_lock-protected lookup.

This allows to avoid contention on mmap_lock in absolutely majority of
cases, nicely improving uprobe/uretprobe scalability.

BEFORE
======
uprobe-nop            ( 1 cpus):    3.417 ± 0.013M/s  (  3.417M/s/cpu)
uprobe-nop            ( 2 cpus):    5.724 ± 0.006M/s  (  2.862M/s/cpu)
uprobe-nop            ( 3 cpus):    8.543 ± 0.012M/s  (  2.848M/s/cpu)
uprobe-nop            ( 4 cpus):   11.094 ± 0.004M/s  (  2.774M/s/cpu)
uprobe-nop            ( 5 cpus):   13.703 ± 0.006M/s  (  2.741M/s/cpu)
uprobe-nop            ( 6 cpus):   16.350 ± 0.010M/s  (  2.725M/s/cpu)
uprobe-nop            ( 7 cpus):   19.100 ± 0.031M/s  (  2.729M/s/cpu)
uprobe-nop            ( 8 cpus):   20.138 ± 0.029M/s  (  2.517M/s/cpu)
uprobe-nop            (10 cpus):   20.161 ± 0.020M/s  (  2.016M/s/cpu)
uprobe-nop            (12 cpus):   15.129 ± 0.011M/s  (  1.261M/s/cpu)
uprobe-nop            (14 cpus):   15.013 ± 0.013M/s  (  1.072M/s/cpu)
uprobe-nop            (16 cpus):   13.352 ± 0.007M/s  (  0.834M/s/cpu)
uprobe-nop            (24 cpus):   12.470 ± 0.005M/s  (  0.520M/s/cpu)
uprobe-nop            (32 cpus):   11.252 ± 0.042M/s  (  0.352M/s/cpu)
uprobe-nop            (40 cpus):   10.308 ± 0.001M/s  (  0.258M/s/cpu)
uprobe-nop            (48 cpus):   11.037 ± 0.007M/s  (  0.230M/s/cpu)
uprobe-nop            (56 cpus):   12.055 ± 0.002M/s  (  0.215M/s/cpu)
uprobe-nop            (64 cpus):   12.895 ± 0.004M/s  (  0.201M/s/cpu)
uprobe-nop            (72 cpus):   13.995 ± 0.005M/s  (  0.194M/s/cpu)
uprobe-nop            (80 cpus):   15.224 ± 0.030M/s  (  0.190M/s/cpu)

AFTER
=====
uprobe-nop            ( 1 cpus):    3.562 ± 0.006M/s  (  3.562M/s/cpu)
uprobe-nop            ( 2 cpus):    6.751 ± 0.007M/s  (  3.376M/s/cpu)
uprobe-nop            ( 3 cpus):   10.121 ± 0.007M/s  (  3.374M/s/cpu)
uprobe-nop            ( 4 cpus):   13.100 ± 0.007M/s  (  3.275M/s/cpu)
uprobe-nop            ( 5 cpus):   16.321 ± 0.008M/s  (  3.264M/s/cpu)
uprobe-nop            ( 6 cpus):   19.612 ± 0.004M/s  (  3.269M/s/cpu)
uprobe-nop            ( 7 cpus):   22.910 ± 0.037M/s  (  3.273M/s/cpu)
uprobe-nop            ( 8 cpus):   24.705 ± 0.011M/s  (  3.088M/s/cpu)
uprobe-nop            (10 cpus):   30.772 ± 0.020M/s  (  3.077M/s/cpu)
uprobe-nop            (12 cpus):   33.614 ± 0.009M/s  (  2.801M/s/cpu)
uprobe-nop            (14 cpus):   39.166 ± 0.004M/s  (  2.798M/s/cpu)
uprobe-nop            (16 cpus):   41.692 ± 0.014M/s  (  2.606M/s/cpu)
uprobe-nop            (24 cpus):   64.802 ± 0.048M/s  (  2.700M/s/cpu)
uprobe-nop            (32 cpus):   84.226 ± 0.223M/s  (  2.632M/s/cpu)
uprobe-nop            (40 cpus):  102.071 ± 0.067M/s  (  2.552M/s/cpu)
uprobe-nop            (48 cpus):  106.603 ± 1.198M/s  (  2.221M/s/cpu)
uprobe-nop            (56 cpus):  117.695 ± 0.059M/s  (  2.102M/s/cpu)
uprobe-nop            (64 cpus):  124.291 ± 0.485M/s  (  1.942M/s/cpu)
uprobe-nop            (72 cpus):  135.527 ± 0.134M/s  (  1.882M/s/cpu)
uprobe-nop            (80 cpus):  146.195 ± 0.230M/s  (  1.827M/s/cpu)

Previously total throughput was maxing out at 20mln/s with 8-10 cores,
declining afterwards. With this change, it now keeps growing with each
added CPU, reaching 146mln/s at 80 CPUs (this was measured on a 80-core
Intel(R) Xeon(R) Gold 6138 CPU @ 2.00GHz).

Suggested-by: Matthew Wilcox <willy@...radead.org>
Signed-off-by: Andrii Nakryiko <andrii@...nel.org>
---
 kernel/events/uprobes.c | 51 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 51 insertions(+)

diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 713824c8ca77..12f3edf2ffb1 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -2286,6 +2286,53 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
 	return is_trap_insn(&opcode);
 }
 
+static struct uprobe *find_active_uprobe_speculative(unsigned long bp_vaddr)
+{
+	const vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
+	struct mm_struct *mm = current->mm;
+	struct uprobe *uprobe;
+	struct vm_area_struct *vma;
+	struct file *vm_file;
+	struct inode *vm_inode;
+	unsigned long vm_pgoff, vm_start;
+	int seq;
+	loff_t offset;
+
+	if (!mmap_lock_speculation_start(mm, &seq))
+		return NULL;
+
+	rcu_read_lock();
+
+	vma = vma_lookup(mm, bp_vaddr);
+	if (!vma)
+		goto bail;
+
+	vm_file = data_race(vma->vm_file);
+	if (!vm_file || (vma->vm_flags & flags) != VM_MAYEXEC)
+		goto bail;
+
+	vm_inode = data_race(vm_file->f_inode);
+	vm_pgoff = data_race(vma->vm_pgoff);
+	vm_start = data_race(vma->vm_start);
+
+	offset = (loff_t)(vm_pgoff << PAGE_SHIFT) + (bp_vaddr - vm_start);
+	uprobe = find_uprobe_rcu(vm_inode, offset);
+	if (!uprobe)
+		goto bail;
+
+	/* now double check that nothing about MM changed */
+	if (!mmap_lock_speculation_end(mm, seq))
+		goto bail;
+
+	rcu_read_unlock();
+
+	/* happy case, we speculated successfully */
+	return uprobe;
+bail:
+	rcu_read_unlock();
+	return NULL;
+}
+
 /* assumes being inside RCU protected region */
 static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swbp)
 {
@@ -2293,6 +2340,10 @@ static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swb
 	struct uprobe *uprobe = NULL;
 	struct vm_area_struct *vma;
 
+	uprobe = find_active_uprobe_speculative(bp_vaddr);
+	if (uprobe)
+		return uprobe;
+
 	mmap_read_lock(mm);
 	vma = vma_lookup(mm, bp_vaddr);
 	if (vma) {
-- 
2.43.5


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ