lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191216152715.711308-1-toke@redhat.com>
Date:   Mon, 16 Dec 2019 16:27:15 +0100
From:   Toke Høiland-Jørgensen <toke@...hat.com>
To:     Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>
Cc:     Toke Høiland-Jørgensen <toke@...hat.com>,
        netdev@...r.kernel.org, bpf@...r.kernel.org,
        Jesper Dangaard Brouer <brouer@...hat.com>,
        Ido Schimmel <idosch@...sch.org>
Subject: [RFC PATCH bpf-next] xdp: Add tracepoint on XDP program return

This adds a new tracepoint, xdp_prog_return, which is triggered at every
XDP program return. This was first discussed back in August[0] as a way to
hook XDP into the kernel drop_monitor framework, to have a one-stop place
to find all packet drops in the system.

Because trace/events/xdp.h includes filter.h, some ifdef guarding is needed
to be able to use the tracepoint from bpf_prog_run_xdp(). If anyone has any
ideas for how to improve on this, please to speak up. Sending this RFC
because of this issue, and to get some feedback from Ido on whether this
tracepoint has enough data for drop_monitor usage.

[0] https://lore.kernel.org/netdev/20190809125418.GB2931@splinter/

Cc: Ido Schimmel <idosch@...sch.org>
Signed-off-by: Toke Høiland-Jørgensen <toke@...hat.com>
---
 include/linux/filter.h     | 22 +++++++++++++++++--
 include/trace/events/xdp.h | 45 ++++++++++++++++++++++++++++++++++++++
 kernel/bpf/core.c          |  2 ++
 3 files changed, 67 insertions(+), 2 deletions(-)

diff --git a/include/linux/filter.h b/include/linux/filter.h
index 37ac7025031d..f5e79171902f 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -704,19 +704,37 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
 
 DECLARE_BPF_DISPATCHER(bpf_dispatcher_xdp)
 
+#if defined(_XDP_TRACE_DEF) || defined(_TRACE_XDP_H)
+static void call_trace_xdp_prog_return(const struct xdp_buff *xdp,
+				       const struct bpf_prog *prog,
+				       u32 act);
+#else
+#ifndef _CALL_TRACE_XDP
+#define _CALL_TRACE_XDP
+static inline void call_trace_xdp_prog_return(const struct xdp_buff *xdp,
+					      const struct bpf_prog *prog,
+					      u32 act) {}
+#endif
+#endif
+
 static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
 					    struct xdp_buff *xdp)
 {
+	u32 ret;
+
 	/* Caller needs to hold rcu_read_lock() (!), otherwise program
 	 * can be released while still running, or map elements could be
 	 * freed early while still having concurrent users. XDP fastpath
 	 * already takes rcu_read_lock() when fetching the program, so
 	 * it's not necessary here anymore.
 	 */
-	return __BPF_PROG_RUN(prog, xdp,
-			      BPF_DISPATCHER_FUNC(bpf_dispatcher_xdp));
+	ret = __BPF_PROG_RUN(prog, xdp,
+			     BPF_DISPATCHER_FUNC(bpf_dispatcher_xdp));
+	call_trace_xdp_prog_return(xdp, prog, ret);
+	return ret;
 }
 
+
 void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
 
 static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index a7378bcd9928..e64f4221bd2e 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -50,6 +50,51 @@ TRACE_EVENT(xdp_exception,
 		  __entry->ifindex)
 );
 
+TRACE_EVENT(xdp_prog_return,
+
+	TP_PROTO(const struct xdp_buff *xdp,
+		 const struct bpf_prog *pr, u32 act),
+
+	TP_ARGS(xdp, pr, act),
+
+	TP_STRUCT__entry(
+		__field(int, prog_id)
+		__field(u32, act)
+		__field(int, ifindex)
+		__field(int, queue_index)
+		__field(const void *, data_addr)
+		__field(unsigned int, data_len)
+	),
+
+	TP_fast_assign(
+		__entry->prog_id	= pr->aux->id;
+		__entry->act		= act;
+		__entry->ifindex	= xdp->rxq->dev->ifindex;
+		__entry->queue_index	= xdp->rxq->queue_index;
+		__entry->data_addr	= xdp->data;
+		__entry->data_len	= (unsigned int)(xdp->data_end - xdp->data);
+	),
+
+	TP_printk("prog_id=%d action=%s ifindex=%d queue_index=%d data_addr=%p data_len=%u",
+		  __entry->prog_id,
+		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
+		  __entry->ifindex,
+		  __entry->queue_index,
+		  __entry->data_addr,
+		  __entry->data_len)
+);
+
+#ifndef _CALL_TRACE_XDP
+#define _CALL_TRACE_XDP
+static inline void call_trace_xdp_prog_return(const struct xdp_buff *xdp,
+					      const struct bpf_prog *prog,
+					      u32 act)
+{
+	trace_xdp_prog_return(xdp, prog, act);
+}
+#endif
+
+
 TRACE_EVENT(xdp_bulk_tx,
 
 	TP_PROTO(const struct net_device *dev,
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 2ff01a716128..a81d3b8d8e5c 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -17,6 +17,8 @@
  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
  */
 
+#define _XDP_TRACE_DEF
+
 #include <uapi/linux/btf.h>
 #include <linux/filter.h>
 #include <linux/skbuff.h>
-- 
2.24.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ