[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250716-skb-metadata-thru-dynptr-v2-13-5f580447e1df@cloudflare.com>
Date: Wed, 16 Jul 2025 18:16:57 +0200
From: Jakub Sitnicki <jakub@...udflare.com>
To: bpf@...r.kernel.org
Cc: Alexei Starovoitov <ast@...nel.org>,
Andrii Nakryiko <andrii@...nel.org>, Arthur Fabre <arthur@...hurfabre.com>,
Daniel Borkmann <daniel@...earbox.net>, Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>, Jesper Dangaard Brouer <hawk@...nel.org>,
Jesse Brandeburg <jbrandeburg@...udflare.com>,
Joanne Koong <joannelkoong@...il.com>,
Lorenzo Bianconi <lorenzo@...nel.org>,
Toke Høiland-Jørgensen <thoiland@...hat.com>,
Yan Zhai <yan@...udflare.com>, kernel-team@...udflare.com,
netdev@...r.kernel.org, Stanislav Fomichev <sdf@...ichev.me>
Subject: [PATCH bpf-next v2 13/13] selftests/bpf: Count successful bpf
program runs
The skb metadata tests for BPF programs which don't have metadata access
yet have no observable side-effects. Hence, we can't detect breakage.
Count each successful BPF program pass, when taking the expected path, as a
side-effect to test for.
Signed-off-by: Jakub Sitnicki <jakub@...udflare.com>
---
.../bpf/prog_tests/xdp_context_test_run.c | 20 +++++++++++++++-
tools/testing/selftests/bpf/progs/test_xdp_meta.c | 28 ++++++++++++++--------
2 files changed, 37 insertions(+), 11 deletions(-)
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c
index 97e1dae9a2e7..a28bf67b6d8d 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c
@@ -294,13 +294,15 @@ void test_xdp_context_veth(void)
static void test_tuntap(struct bpf_program *xdp_prog,
struct bpf_program *tc_prio_1_prog,
struct bpf_program *tc_prio_2_prog,
- struct bpf_program *nf_prog,
+ struct bpf_program *nf_prog, __u32 *prog_run_cnt,
struct bpf_map *result_map)
{
LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
struct bpf_link *nf_link = NULL;
struct netns_obj *ns = NULL;
__u8 packet[PACKET_LEN];
+ __u32 want_run_cnt = 0;
+ __u32 have_run_cnt;
int tap_fd = -1;
int tap_ifindex;
int ret;
@@ -336,6 +338,7 @@ static void test_tuntap(struct bpf_program *xdp_prog,
ret = bpf_tc_attach(&tc_hook, &tc_opts);
if (!ASSERT_OK(ret, "bpf_tc_attach"))
goto close;
+ want_run_cnt++;
}
if (tc_prio_2_prog) {
@@ -345,6 +348,7 @@ static void test_tuntap(struct bpf_program *xdp_prog,
ret = bpf_tc_attach(&tc_hook, &tc_opts);
if (!ASSERT_OK(ret, "bpf_tc_attach"))
goto close;
+ want_run_cnt++;
}
if (nf_prog) {
@@ -354,18 +358,25 @@ static void test_tuntap(struct bpf_program *xdp_prog,
nf_link = bpf_program__attach_netfilter(nf_prog, &nf_opts);
if (!ASSERT_OK_PTR(nf_link, "attach_netfilter"))
goto close;
+ want_run_cnt++;
}
ret = bpf_xdp_attach(tap_ifindex, bpf_program__fd(xdp_prog),
0, NULL);
if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
goto close;
+ want_run_cnt++;
init_test_packet(packet);
ret = write(tap_fd, packet, sizeof(packet));
if (!ASSERT_EQ(ret, sizeof(packet), "write packet"))
goto close;
+ have_run_cnt = __atomic_exchange_n(prog_run_cnt, 0, __ATOMIC_SEQ_CST);
+ if (!ASSERT_EQ(have_run_cnt, want_run_cnt,
+ "unexpected bpf prog runs"))
+ goto close;
+
if (result_map)
assert_test_result(result_map);
@@ -390,42 +401,49 @@ void test_xdp_context_tuntap(void)
skel->progs.ing_cls,
NULL, /* tc prio 2 */
NULL, /* netfilter */
+ &skel->bss->prog_run_cnt,
skel->maps.test_result);
if (test__start_subtest("dynptr_read"))
test_tuntap(skel->progs.ing_xdp,
skel->progs.ing_cls_dynptr_read,
NULL, /* tc prio 2 */
NULL, /* netfilter */
+ &skel->bss->prog_run_cnt,
skel->maps.test_result);
if (test__start_subtest("dynptr_slice"))
test_tuntap(skel->progs.ing_xdp,
skel->progs.ing_cls_dynptr_slice,
NULL, /* tc prio 2 */
NULL, /* netfilter */
+ &skel->bss->prog_run_cnt,
skel->maps.test_result);
if (test__start_subtest("dynptr_write"))
test_tuntap(skel->progs.ing_xdp_zalloc_meta,
skel->progs.ing_cls_dynptr_write,
skel->progs.ing_cls_dynptr_read,
NULL, /* netfilter */
+ &skel->bss->prog_run_cnt,
skel->maps.test_result);
if (test__start_subtest("dynptr_slice_rdwr"))
test_tuntap(skel->progs.ing_xdp_zalloc_meta,
skel->progs.ing_cls_dynptr_slice_rdwr,
skel->progs.ing_cls_dynptr_slice,
NULL, /* netfilter */
+ &skel->bss->prog_run_cnt,
skel->maps.test_result);
if (test__start_subtest("dynptr_offset"))
test_tuntap(skel->progs.ing_xdp_zalloc_meta,
skel->progs.ing_cls_dynptr_offset_wr,
skel->progs.ing_cls_dynptr_offset_rd,
NULL, /* netfilter */
+ &skel->bss->prog_run_cnt,
skel->maps.test_result);
if (test__start_subtest("dynptr_nf_hook"))
test_tuntap(skel->progs.ing_xdp,
NULL, /* tc prio 1 */
NULL, /* tc prio 2 */
skel->progs.ing_nf,
+ &skel->bss->prog_run_cnt,
NULL /* ignore result for now */);
test_xdp_meta__destroy(skel);
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_meta.c b/tools/testing/selftests/bpf/progs/test_xdp_meta.c
index a42a6dd4343c..3731baf37e06 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_meta.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_meta.c
@@ -35,6 +35,14 @@ struct {
__uint(value_size, META_SIZE);
} test_result SEC(".maps");
+__u32 prog_run_cnt = 0;
+
+static __always_inline int run_ok(int retval)
+{
+ __sync_fetch_and_add(&prog_run_cnt, 1);
+ return retval;
+}
+
SEC("tc")
int ing_cls(struct __sk_buff *ctx)
{
@@ -49,7 +57,7 @@ int ing_cls(struct __sk_buff *ctx)
bpf_map_update_elem(&test_result, &key, data_meta, BPF_ANY);
- return TC_ACT_SHOT;
+ return run_ok(TC_ACT_SHOT);
}
/* Read from metadata using bpf_dynptr_read helper */
@@ -67,7 +75,7 @@ int ing_cls_dynptr_read(struct __sk_buff *ctx)
bpf_dynptr_from_skb_meta(ctx, 0, &meta);
bpf_dynptr_read(dst, META_SIZE, &meta, 0, 0);
- return TC_ACT_SHOT;
+ return run_ok(TC_ACT_SHOT);
}
/* Check that we can't get a dynptr slice to skb metadata yet */
@@ -81,7 +89,7 @@ int ing_nf(struct bpf_nf_ctx *ctx)
if (bpf_dynptr_size(&meta) != 0)
return NF_DROP;
- return NF_ACCEPT;
+ return run_ok(NF_ACCEPT);
}
/* Write to metadata using bpf_dynptr_write helper */
@@ -99,7 +107,7 @@ int ing_cls_dynptr_write(struct __sk_buff *ctx)
bpf_dynptr_from_skb_meta(ctx, 0, &meta);
bpf_dynptr_write(&meta, 0, src, META_SIZE, 0);
- return TC_ACT_UNSPEC; /* pass */
+ return run_ok(TC_ACT_UNSPEC); /* pass */
}
/* Read from metadata using read-only dynptr slice */
@@ -121,7 +129,7 @@ int ing_cls_dynptr_slice(struct __sk_buff *ctx)
__builtin_memcpy(dst, src, META_SIZE);
- return TC_ACT_SHOT;
+ return run_ok(TC_ACT_SHOT);
}
/* Write to metadata using writeable dynptr slice */
@@ -143,7 +151,7 @@ int ing_cls_dynptr_slice_rdwr(struct __sk_buff *ctx)
__builtin_memcpy(dst, src, META_SIZE);
- return TC_ACT_UNSPEC; /* pass */
+ return run_ok(TC_ACT_UNSPEC); /* pass */
}
/*
@@ -181,7 +189,7 @@ int ing_cls_dynptr_offset_rd(struct __sk_buff *ctx)
return TC_ACT_SHOT;
__builtin_memcpy(dst, src, chunk_len);
- return TC_ACT_SHOT;
+ return run_ok(TC_ACT_SHOT);
}
/* Write skb metadata in chunks at various offsets in different ways. */
@@ -216,7 +224,7 @@ int ing_cls_dynptr_offset_wr(struct __sk_buff *ctx)
return TC_ACT_SHOT;
__builtin_memcpy(dst, src, chunk_len);
- return TC_ACT_UNSPEC; /* pass */
+ return run_ok(TC_ACT_UNSPEC); /* pass */
}
/* Reserve and clear space for metadata but don't populate it */
@@ -247,7 +255,7 @@ int ing_xdp_zalloc_meta(struct xdp_md *ctx)
__builtin_memset(meta, 0, META_SIZE);
- return XDP_PASS;
+ return run_ok(XDP_PASS);
}
SEC("xdp")
@@ -278,7 +286,7 @@ int ing_xdp(struct xdp_md *ctx)
return XDP_DROP;
__builtin_memcpy(data_meta, payload, META_SIZE);
- return XDP_PASS;
+ return run_ok(XDP_PASS);
}
char _license[] SEC("license") = "GPL";
--
2.43.0
Powered by blists - more mailing lists