[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190322223848.3338614-2-javierhonduco@fb.com>
Date: Fri, 22 Mar 2019 15:38:46 -0700
From: Javier Honduvilla Coto <javierhonduco@...com>
To: <netdev@...r.kernel.org>
CC: <yhs@...com>, <kernel-team@...com>
Subject: [PATCH v4 bpf-next 1/3] bpf: add bpf_progenyof helper
This patch adds the bpf_progenyof helper which receives a PID and returns
1 if the process currently being executed is in the process hierarchy
including itself or 0 if not.
This is very useful in tracing programs when we want to filter by a
given PID and all the children it might spawn. The current workarounds
most people implement for this purpose have issues:
- Attaching to process spawning syscalls and dynamically add those PIDs
to some bpf map that would be used to filter is cumbersome and
potentially racy.
- Unrolling some loop to perform what this helper is doing consumes lots
of instructions. That and the impossibility to jump backwards makes it
really hard to be correct in really large process chains.
Signed-off-by: Javier Honduvilla Coto <javierhonduco@...com>
---
include/linux/bpf.h | 1 +
include/uapi/linux/bpf.h | 10 +++++++++-
kernel/bpf/core.c | 1 +
kernel/bpf/helpers.c | 32 ++++++++++++++++++++++++++++++++
kernel/trace/bpf_trace.c | 2 ++
5 files changed, 45 insertions(+), 1 deletion(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f62897198844..bd0d2b38e7d5 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -930,6 +930,7 @@ extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
extern const struct bpf_func_proto bpf_spin_lock_proto;
extern const struct bpf_func_proto bpf_spin_unlock_proto;
extern const struct bpf_func_proto bpf_get_local_storage_proto;
+extern const struct bpf_func_proto bpf_progenyof_proto;
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 3c04410137d9..cf54cc739bf4 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -2463,6 +2463,13 @@ union bpf_attr {
* Return
* 0 if iph and th are a valid SYN cookie ACK, or a negative error
* otherwise.
+ * int bpf_progenyof(int pid)
+ * Description
+ * This helper is useful in programs that want to filter events
+ * happening to a pid of any of its descendants.
+ * Return
+ * 1 if the currently executing process' pid is in the process
+ * hierarchy of the passed pid. 0 Otherwise.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2565,7 +2572,8 @@ union bpf_attr {
FN(skb_ecn_set_ce), \
FN(get_listener_sock), \
FN(skc_lookup_tcp), \
- FN(tcp_check_syncookie),
+ FN(tcp_check_syncookie), \
+ FN(progenyof),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index ff09d32a8a1b..437986497468 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2044,6 +2044,7 @@ const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
const struct bpf_func_proto bpf_get_current_comm_proto __weak;
const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
const struct bpf_func_proto bpf_get_local_storage_proto __weak;
+const struct bpf_func_proto bpf_progenyof_proto __weak;
const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
{
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index a411fc17d265..f093b35d1ba8 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -18,6 +18,7 @@
#include <linux/sched.h>
#include <linux/uidgid.h>
#include <linux/filter.h>
+#include <linux/init_task.h>
/* If kernel subsystem is allowing eBPF programs to call this function,
* inside its own verifier_ops->get_func_proto() callback it should return
@@ -364,3 +365,34 @@ const struct bpf_func_proto bpf_get_local_storage_proto = {
};
#endif
#endif
+
+BPF_CALL_1(bpf_progenyof, int, pid)
+{
+ int result = 0;
+ struct task_struct *task = current;
+
+ WARN_ON(!rcu_read_lock_held());
+
+ if (unlikely(!task))
+ return -EINVAL;
+
+ if (pid == 0)
+ return 1;
+
+ while (task != &init_task) {
+ if (task->pid == pid) {
+ result = 1;
+ break;
+ }
+ task = rcu_dereference(task->real_parent);
+ }
+
+ return result;
+}
+
+const struct bpf_func_proto bpf_progenyof_proto = {
+ .func = bpf_progenyof,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_ANYTHING,
+};
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index d64c00afceb5..e69283d423ff 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -599,6 +599,8 @@ tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_prandom_u32_proto;
case BPF_FUNC_probe_read_str:
return &bpf_probe_read_str_proto;
+ case BPF_FUNC_progenyof:
+ return &bpf_progenyof_proto;
#ifdef CONFIG_CGROUPS
case BPF_FUNC_get_current_cgroup_id:
return &bpf_get_current_cgroup_id_proto;
--
2.17.1
Powered by blists - more mailing lists