lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240815013626.935097-9-howardchu95@gmail.com>
Date: Thu, 15 Aug 2024 09:36:24 +0800
From: Howard Chu <howardchu95@...il.com>
To: acme@...nel.org
Cc: adrian.hunter@...el.com,
	irogers@...gle.com,
	jolsa@...nel.org,
	kan.liang@...ux.intel.com,
	namhyung@...nel.org,
	linux-perf-users@...r.kernel.org,
	linux-kernel@...r.kernel.org
Subject: [PATCH v2 08/10] perf trace: Add pids_allowed and rename pids_filtered

Add pids_allowed so that we only trace these pids. Rename pids_filtered
to pids_filtered_out to prevent confusion. (pids_filtered_out is for
reducing the observer effect)

We write -p argument as well as workload pid to pids_allowed to leave
only the pids that we are interested in.

Signed-off-by: Howard Chu <howardchu95@...il.com>
---
 tools/perf/builtin-trace.c                    | 49 ++++++++++++++++++-
 .../bpf_skel/augmented_raw_syscalls.bpf.c     | 39 ++++++++++++---
 2 files changed, 80 insertions(+), 8 deletions(-)

diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 593b0b8724d0..e7574146165e 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -3922,6 +3922,45 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
 
 	return err;
 }
+
+static int trace__set_allowed_pids(struct trace *trace)
+{
+	int err, pids_allowed_fd = bpf_map__fd(trace->skel->maps.pids_allowed);
+	bool exists = true;
+	struct str_node *pos;
+	struct strlist *pids_slist = strlist__new(trace->opts.target.pid, NULL);
+
+	trace->skel->bss->task_specific = false;
+
+	if (pids_slist) {
+		strlist__for_each_entry(pos, pids_slist) {
+			char *end_ptr;
+			int pid = strtol(pos->s, &end_ptr, 10);
+
+			if (pid == INT_MIN || pid == INT_MAX ||
+			    (*end_ptr != '\0' && *end_ptr != ','))
+				continue;
+
+			err = bpf_map_update_elem(pids_allowed_fd, &pid, &exists, BPF_ANY);
+			if (err)
+				return err;
+
+			trace->skel->bss->task_specific = true;
+		}
+	}
+
+	if (workload_pid != -1) {
+		err = bpf_map_update_elem(pids_allowed_fd, &workload_pid, &exists, BPF_ANY);
+		if (err)
+			return err;
+
+		trace->skel->bss->task_specific = true;
+	}
+
+	strlist__delete(pids_slist);
+	return 0;
+}
+
 #endif // HAVE_BPF_SKEL
 
 static int trace__set_ev_qualifier_filter(struct trace *trace)
@@ -3980,7 +4019,7 @@ static int trace__set_filter_loop_pids(struct trace *trace)
 	return err;
 }
 
-static int trace__set_filter_pids(struct trace *trace)
+static int trace__set_filtered_out_pids(struct trace *trace)
 {
 	int err = 0;
 	/*
@@ -4309,13 +4348,19 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
 		}
 	}
 #endif
-	err = trace__set_filter_pids(trace);
+	err = trace__set_filtered_out_pids(trace);
 	if (err < 0)
 		goto out_error_mem;
 
 #ifdef HAVE_BPF_SKEL
 	if (trace->skel && trace->skel->progs.sys_enter)
 		trace__init_syscalls_bpf_prog_array_maps(trace);
+
+	if (trace->skel) {
+		err = trace__set_allowed_pids(trace);
+		if (err)
+			goto out_error_mem;
+	}
 #endif
 
 	if (trace->ev_qualifier_ids.nr > 0) {
diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
index 0acbd74e8c76..c7b9f80239c7 100644
--- a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
+++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
@@ -22,6 +22,8 @@
 
 #define MAX_CPUS  4096
 
+volatile bool task_specific;
+
 /* bpf-output associated map */
 struct __augmented_syscalls__ {
 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
@@ -72,12 +74,21 @@ struct augmented_arg {
 	char		value[PATH_MAX];
 };
 
-struct pids_filtered {
+/* Do not trace these PIDs to prevent the observer effect */
+struct pids_filtered_out {
 	__uint(type, BPF_MAP_TYPE_HASH);
 	__type(key, pid_t);
 	__type(value, bool);
 	__uint(max_entries, 64);
-} pids_filtered SEC(".maps");
+} pids_filtered_out SEC(".maps");
+
+/* Only trace these PIDs, disregard the rest */
+struct pids_allowed {
+	__uint(type, BPF_MAP_TYPE_HASH);
+	__type(key, pid_t);
+	__type(value, bool);
+	__uint(max_entries, 512);
+} pids_allowed SEC(".maps");
 
 /*
  * Desired design of maximum size and alignment (see RFC2553)
@@ -367,18 +378,34 @@ static pid_t getpid(void)
 	return bpf_get_current_pid_tgid();
 }
 
-static bool pid_filter__has(struct pids_filtered *pids, pid_t pid)
+static inline bool pid_filtered_out__has(struct pids_filtered_out *pids, pid_t pid)
 {
 	return bpf_map_lookup_elem(pids, &pid) != NULL;
 }
 
+static inline bool pid_allowed__has(struct pids_allowed *pids, pid_t pid)
+{
+	return bpf_map_lookup_elem(pids, &pid) != NULL;
+}
+
+static inline bool task_can_trace(void)
+{
+	if (pid_filtered_out__has(&pids_filtered_out, getpid()))
+		return false;
+
+	if (task_specific && !pid_allowed__has(&pids_allowed, getpid()))
+		return false;
+
+	return true;
+}
+
 SEC("tp/raw_syscalls/sys_enter")
 int sys_enter(struct syscall_enter_args *args)
 {
 	struct augmented_args_payload *augmented_args;
 	/*
 	 * We start len, the amount of data that will be in the perf ring
-	 * buffer, if this is not filtered out by one of pid_filter__has(),
+	 * buffer, if this is not filtered out by one of pid_filtered_out__has(),
 	 * syscall->enabled, etc, with the non-augmented raw syscall payload,
 	 * i.e. sizeof(augmented_args->args).
 	 *
@@ -386,7 +413,7 @@ int sys_enter(struct syscall_enter_args *args)
 	 * initial, non-augmented raw_syscalls:sys_enter payload.
 	 */
 
-	if (pid_filter__has(&pids_filtered, getpid()))
+	if (!task_can_trace())
 		return 0;
 
 	augmented_args = augmented_args_payload();
@@ -411,7 +438,7 @@ int sys_exit(struct syscall_exit_args *args)
 {
 	struct syscall_exit_args exit_args;
 
-	if (pid_filter__has(&pids_filtered, getpid()))
+	if (!task_can_trace())
 		return 0;
 
 	bpf_probe_read_kernel(&exit_args, sizeof(exit_args), args);
-- 
2.45.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ