[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <158453677145.3043.1585141550260881967.stgit@xdp-tutorial>
Date: Wed, 18 Mar 2020 13:06:11 +0000
From: Eelco Chaudron <echaudro@...hat.com>
To: bpf@...r.kernel.org
Cc: davem@...emloft.net, netdev@...r.kernel.org, ast@...nel.org,
daniel@...earbox.net, kafai@...com, songliubraving@...com,
yhs@...com, andriin@...com
Subject: [RFC PATCH bpf-next 1/3] bpf: introduce trace option to the BPF_PROG_TEST_RUN command API
This patch adds a flag to the existing BPF_PROG_TEST_RUN API,
which will allow tracing for XDP programs.
Signed-off-by: Eelco Chaudron <echaudro@...hat.com>
---
include/linux/filter.h | 13 +++++++++++++
include/uapi/linux/bpf.h | 4 ++++
kernel/bpf/syscall.c | 2 +-
net/bpf/test_run.c | 17 +++++++++++------
tools/include/uapi/linux/bpf.h | 4 ++++
5 files changed, 33 insertions(+), 7 deletions(-)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 43b5e455d2f5..f95f9ad45ad6 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -737,6 +737,19 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
BPF_DISPATCHER_FUNC(bpf_dispatcher_xdp));
}
+static __always_inline u32 bpf_prog_run_xdp_trace(const struct bpf_prog *prog,
+ struct xdp_buff *xdp)
+{
+ /* Caller needs to hold rcu_read_lock() (!), otherwise program
+ * can be released while still running, or map elements could be
+ * freed early while still having concurrent users. XDP fastpath
+ * already takes rcu_read_lock() when fetching the program, so
+ * it's not necessary here anymore.
+ */
+ return __BPF_PROG_RUN(prog, xdp,
+ BPF_DISPATCHER_FUNC(bpf_dispatcher_xdp));
+}
+
void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 40b2d9476268..ac5c89903550 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -375,6 +375,9 @@ enum {
*/
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
+/* Flags for BPF_PROG_TEST_RUN. */
+#define BPF_F_TEST_ENABLE_TRACE (1U << 0)
+
enum bpf_stack_build_id_status {
/* user space need an empty entry to identify end of a trace */
BPF_STACK_BUILD_ID_EMPTY = 0,
@@ -511,6 +514,7 @@ union bpf_attr {
*/
__aligned_u64 ctx_in;
__aligned_u64 ctx_out;
+ __u32 flags;
} test;
struct { /* anonymous struct used by BPF_*_GET_*_ID */
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 7ce0815793dd..9a6fae428976 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -2680,7 +2680,7 @@ static int bpf_prog_query(const union bpf_attr *attr,
return cgroup_bpf_prog_query(attr, uattr);
}
-#define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out
+#define BPF_PROG_TEST_RUN_LAST_FIELD test.flags
static int bpf_prog_test_run(const union bpf_attr *attr,
union bpf_attr __user *uattr)
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 4c921f5154e0..061cad840b05 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -16,7 +16,7 @@
#include <trace/events/bpf_test_run.h>
static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
- u32 *retval, u32 *time, bool xdp)
+ u32 *retval, u32 *time, bool xdp, bool trace)
{
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
enum bpf_cgroup_storage_type stype;
@@ -43,10 +43,14 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
for (i = 0; i < repeat; i++) {
bpf_cgroup_storage_set(storage);
- if (xdp)
- *retval = bpf_prog_run_xdp(prog, ctx);
- else
+ if (xdp) {
+ if (trace)
+ *retval = bpf_prog_run_xdp_trace(prog, ctx);
+ else
+ *retval = bpf_prog_run_xdp(prog, ctx);
+ } else {
*retval = BPF_PROG_RUN(prog, ctx);
+ }
if (signal_pending(current)) {
ret = -EINTR;
@@ -431,7 +435,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
ret = convert___skb_to_skb(skb, ctx);
if (ret)
goto out;
- ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
+ ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false, false);
if (ret)
goto out;
if (!is_l2) {
@@ -468,6 +472,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
{
u32 size = kattr->test.data_size_in;
u32 repeat = kattr->test.repeat;
+ bool trace = kattr->test.flags & BPF_F_TEST_ENABLE_TRACE;
struct netdev_rx_queue *rxqueue;
struct xdp_buff xdp = {};
u32 retval, duration;
@@ -489,7 +494,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
xdp.rxq = &rxqueue->xdp_rxq;
bpf_prog_change_xdp(NULL, prog);
- ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
+ ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true, trace);
if (ret)
goto out;
if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 40b2d9476268..ac5c89903550 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -375,6 +375,9 @@ enum {
*/
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
+/* Flags for BPF_PROG_TEST_RUN. */
+#define BPF_F_TEST_ENABLE_TRACE (1U << 0)
+
enum bpf_stack_build_id_status {
/* user space need an empty entry to identify end of a trace */
BPF_STACK_BUILD_ID_EMPTY = 0,
@@ -511,6 +514,7 @@ union bpf_attr {
*/
__aligned_u64 ctx_in;
__aligned_u64 ctx_out;
+ __u32 flags;
} test;
struct { /* anonymous struct used by BPF_*_GET_*_ID */
Powered by blists - more mailing lists