lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211124084119.260239-3-jolsa@kernel.org>
Date:   Wed, 24 Nov 2021 09:41:13 +0100
From:   Jiri Olsa <jolsa@...hat.com>
To:     Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>,
        Andrii Nakryiko <andrii@...nel.org>,
        Arnaldo Carvalho de Melo <acme@...nel.org>,
        Peter Zijlstra <a.p.zijlstra@...llo.nl>,
        Masami Hiramatsu <mhiramat@...nel.org>,
        Steven Rostedt <rostedt@...dmis.org>
Cc:     netdev@...r.kernel.org, bpf@...r.kernel.org,
        lkml <linux-kernel@...r.kernel.org>,
        Ingo Molnar <mingo@...nel.org>,
        Mark Rutland <mark.rutland@....com>,
        Martin KaFai Lau <kafai@...com>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
        John Fastabend <john.fastabend@...il.com>,
        KP Singh <kpsingh@...omium.org>,
        Ravi Bangoria <ravi.bangoria@....com>
Subject: [PATCH 2/8] perf/uprobe: Add support to create multiple probes

Adding support to create multiple probes within single perf event.
This way we can associate single bpf program with multiple uprobes,
because bpf program gets associated with the perf event.

The perf_event_attr is not extended, current fields for uprobe
attachment are used for multi attachment.

For current uprobe atachment we use:

   uprobe_path (in config1) + probe_offset (in config2)

to define kprobe by executable path with offset.

For multi probe attach the same fields point to array of values
with the same semantic. Each probe is defined as set of values
with the same array index (idx) as:

   uprobe_path[idx] (in config1) + probe_offset[idx] (in config2)

to define uprobe executable path with offset.

The number of probes is passed in probe_cnt value, which shares
the union with wakeup_events/wakeup_watermark values which are
not used for uprobes.

Since [1] it's possible to stack multiple probes events under
one head event. Using the same code to allow that for probes
defined under perf uprobe interface.

[1] https://lore.kernel.org/lkml/156095682948.28024.14190188071338900568.stgit@devnote2/

Signed-off-by: Jiri Olsa <jolsa@...nel.org>
---
 kernel/trace/trace_event_perf.c | 108 +++++++++++++++++++++++++++-----
 kernel/trace/trace_probe.h      |   3 +-
 kernel/trace/trace_uprobe.c     |  43 +++++++++++--
 3 files changed, 133 insertions(+), 21 deletions(-)

diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 26078e40c299..fb5db6a43d37 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -379,34 +379,114 @@ void perf_kprobe_destroy(struct perf_event *p_event)
 #endif /* CONFIG_KPROBE_EVENTS */
 
 #ifdef CONFIG_UPROBE_EVENTS
-int perf_uprobe_init(struct perf_event *p_event,
-		     unsigned long ref_ctr_offset, bool is_retprobe)
+static struct trace_event_call*
+uprobe_init(u64 uprobe_path, u64 probe_offset, unsigned long ref_ctr_offset,
+	    bool is_retprobe, struct trace_event_call *old)
 {
 	int ret;
 	char *path = NULL;
 	struct trace_event_call *tp_event;
 
-	if (!p_event->attr.uprobe_path)
-		return -EINVAL;
+	if (!uprobe_path)
+		return ERR_PTR(-EINVAL);
 
-	path = strndup_user(u64_to_user_ptr(p_event->attr.uprobe_path),
+	path = strndup_user(u64_to_user_ptr(uprobe_path),
 			    PATH_MAX);
 	if (IS_ERR(path)) {
 		ret = PTR_ERR(path);
-		return (ret == -EINVAL) ? -E2BIG : ret;
+		return ERR_PTR((ret == -EINVAL) ? -E2BIG : ret);
 	}
 	if (path[0] == '\0') {
-		ret = -EINVAL;
-		goto out;
+		kfree(path);
+		return ERR_PTR(-EINVAL);
 	}
 
-	tp_event = create_local_trace_uprobe(path, p_event->attr.probe_offset,
-					     ref_ctr_offset, is_retprobe);
-	if (IS_ERR(tp_event)) {
-		ret = PTR_ERR(tp_event);
-		goto out;
+	tp_event = create_local_trace_uprobe(path, probe_offset,
+				ref_ctr_offset, is_retprobe, old);
+	kfree(path);
+	return tp_event;
+}
+
+static struct trace_event_call*
+uprobe_init_multi(struct perf_event *p_event, unsigned long ref_ctr_offset,
+		  bool is_retprobe)
+{
+	void __user *probe_offset = u64_to_user_ptr(p_event->attr.probe_offset);
+	void __user *uprobe_path = u64_to_user_ptr(p_event->attr.uprobe_path);
+	struct trace_event_call *tp_event, *tp_old = NULL;
+	u32 i, cnt = p_event->attr.probe_cnt;
+	u64 *paths = NULL, *offs = NULL;
+	int ret = -EINVAL;
+	size_t size;
+
+	if (!cnt)
+		return ERR_PTR(-EINVAL);
+
+	size = cnt * sizeof(u64);
+	if (uprobe_path) {
+		ret = -ENOMEM;
+		paths = kmalloc(size, GFP_KERNEL);
+		if (!paths)
+			goto out;
+		ret = -EFAULT;
+		if (copy_from_user(paths, uprobe_path, size))
+			goto out;
 	}
 
+	if (probe_offset) {
+		ret = -ENOMEM;
+		offs = kmalloc(size, GFP_KERNEL);
+		if (!offs)
+			goto out;
+		ret = -EFAULT;
+		if (copy_from_user(offs, probe_offset, size))
+			goto out;
+	}
+
+	for (i = 0; i < cnt; i++) {
+		tp_event = uprobe_init(paths ? paths[i] : 0, offs ? offs[i] : 0,
+				       ref_ctr_offset, is_retprobe, tp_old);
+		if (IS_ERR(tp_event)) {
+			if (tp_old)
+				destroy_local_trace_uprobe(tp_old);
+			ret = PTR_ERR(tp_event);
+			goto out;
+		}
+		if (!tp_old)
+			tp_old = tp_event;
+	}
+	ret = 0;
+
+out:
+	kfree(paths);
+	kfree(offs);
+	return ret ? ERR_PTR(ret) : tp_old;
+}
+
+static struct trace_event_call*
+uprobe_init_single(struct perf_event *p_event, unsigned long ref_ctr_offset,
+		   bool is_retprobe)
+{
+	struct perf_event_attr *attr = &p_event->attr;
+
+	return uprobe_init(attr->uprobe_path, attr->probe_offset,
+			   ref_ctr_offset, is_retprobe, NULL);
+}
+
+int perf_uprobe_init(struct perf_event *p_event,
+		     unsigned long ref_ctr_offset, bool is_retprobe)
+{
+	struct trace_event_call *tp_event;
+	int ret;
+
+	if (p_event->attr.probe_cnt)
+		tp_event = uprobe_init_multi(p_event, ref_ctr_offset, is_retprobe);
+	else
+		tp_event = uprobe_init_single(p_event, ref_ctr_offset, is_retprobe);
+
+	if (IS_ERR(tp_event))
+		return PTR_ERR(tp_event);
+
 	/*
 	 * local trace_uprobe need to hold event_mutex to call
 	 * uprobe_buffer_enable() and uprobe_buffer_disable().
@@ -417,8 +497,6 @@ int perf_uprobe_init(struct perf_event *p_event,
 	if (ret)
 		destroy_local_trace_uprobe(tp_event);
 	mutex_unlock(&event_mutex);
-out:
-	kfree(path);
 	return ret;
 }
 
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index ba8e46c7efe8..6c81926874ff 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -383,7 +383,8 @@ extern void destroy_local_trace_kprobe(struct trace_event_call *event_call);
 
 extern struct trace_event_call *
 create_local_trace_uprobe(char *name, unsigned long offs,
-			  unsigned long ref_ctr_offset, bool is_return);
+			  unsigned long ref_ctr_offset, bool is_return,
+			  struct trace_event_call *old);
 extern void destroy_local_trace_uprobe(struct trace_event_call *event_call);
 #endif
 extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index f5f0039d31e5..ca76f9ab6811 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -358,15 +358,20 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
 	return ERR_PTR(ret);
 }
 
+static void __free_trace_uprobe(struct trace_uprobe *tu)
+{
+	path_put(&tu->path);
+	kfree(tu->filename);
+	kfree(tu);
+}
+
 static void free_trace_uprobe(struct trace_uprobe *tu)
 {
 	if (!tu)
 		return;
 
-	path_put(&tu->path);
 	trace_probe_cleanup(&tu->tp);
-	kfree(tu->filename);
-	kfree(tu);
+	__free_trace_uprobe(tu);
 }
 
 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
@@ -1584,7 +1589,8 @@ static int unregister_uprobe_event(struct trace_uprobe *tu)
 #ifdef CONFIG_PERF_EVENTS
 struct trace_event_call *
 create_local_trace_uprobe(char *name, unsigned long offs,
-			  unsigned long ref_ctr_offset, bool is_return)
+			  unsigned long ref_ctr_offset, bool is_return,
+			  struct trace_event_call *old)
 {
 	enum probe_print_type ptype;
 	struct trace_uprobe *tu;
@@ -1619,6 +1625,24 @@ create_local_trace_uprobe(char *name, unsigned long offs,
 	tu->path = path;
 	tu->ref_ctr_offset = ref_ctr_offset;
 	tu->filename = kstrdup(name, GFP_KERNEL);
+
+	if (old) {
+		struct trace_uprobe *tu_old;
+
+		tu_old = trace_uprobe_primary_from_call(old);
+		if (!tu_old) {
+			ret = -EINVAL;
+			goto error;
+		}
+
+		/* Append to existing event */
+		ret = trace_probe_append(&tu->tp, &tu_old->tp);
+		if (ret)
+			goto error;
+
+		return trace_probe_event_call(&tu->tp);
+	}
+
 	init_trace_event_call(tu);
 
 	ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
@@ -1635,11 +1659,20 @@ create_local_trace_uprobe(char *name, unsigned long offs,
 
 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
 {
+	struct trace_probe_event *event;
+	struct trace_probe *pos, *tmp;
 	struct trace_uprobe *tu;
 
 	tu = trace_uprobe_primary_from_call(event_call);
 
-	free_trace_uprobe(tu);
+	event = tu->tp.event;
+	list_for_each_entry_safe(pos, tmp, &event->probes, list) {
+		tu = container_of(pos, struct trace_uprobe, tp);
+		list_del_init(&pos->list);
+		__free_trace_uprobe(tu);
+	}
+
+	trace_probe_event_free(event);
 }
 #endif /* CONFIG_PERF_EVENTS */
 
-- 
2.33.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ