[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <56db5e4bb54678e0615501693a5eee0b88e2bf3e@linux.dev>
Date: Sat, 24 Jan 2026 10:47:51 +0000
From: "Jiayuan Chen" <jiayuan.chen@...ux.dev>
To: bpf@...r.kernel.org
Cc: "John Fastabend" <john.fastabend@...il.com>, "Jakub Sitnicki"
<jakub@...udflare.com>, "David S. Miller" <davem@...emloft.net>, "Eric
Dumazet" <edumazet@...gle.com>, "Jakub Kicinski" <kuba@...nel.org>,
"Paolo Abeni" <pabeni@...hat.com>, "Simon Horman" <horms@...nel.org>,
"Neal Cardwell" <ncardwell@...gle.com>, "Kuniyuki Iwashima"
<kuniyu@...gle.com>, "David Ahern" <dsahern@...nel.org>, "Andrii
Nakryiko" <andrii@...nel.org>, "Eduard Zingerman" <eddyz87@...il.com>,
"Alexei Starovoitov" <ast@...nel.org>, "Daniel Borkmann"
<daniel@...earbox.net>, "Martin KaFai Lau" <martin.lau@...ux.dev>, "Song
Liu" <song@...nel.org>, "Yonghong Song" <yonghong.song@...ux.dev>, "KP
Singh" <kpsingh@...nel.org>, "Stanislav Fomichev" <sdf@...ichev.me>, "Hao
Luo" <haoluo@...gle.com>, "Jiri Olsa" <jolsa@...nel.org>, "Shuah Khan"
<shuah@...nel.org>, "Stefano Garzarella" <sgarzare@...hat.com>, "Michal
Luczaj" <mhal@...x.co>, "Cong Wang" <cong.wang@...edance.com>,
netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-kselftest@...r.kernel.org
Subject: Re: [PATCH bpf-next v8 3/3] bpf, selftest: Add tests for FIONREAD
and copied_seq
January 24, 2026 at 14:14, "Jiayuan Chen" <jiayuan.chen@...ux.dev mailto:jiayuan.chen@...ux.dev?to=%22Jiayuan%20Chen%22%20%3Cjiayuan.chen%40linux.dev%3E > wrote:
[...]
> +
> +/* it is used to send data to via native stack and BPF redirecting */
> +static void test_sockmap_multi_channels(int sotype)
> +{
> + int map, err, sent, recvd, zero = 0, one = 1, avail = 0;
> + struct test_sockmap_pass_prog *skel = NULL;
> + int c0 = -1, p0 = -1, c1 = -1, p1 = -1;
> + char buf[10] = "0123456789", rcv[11];
> + struct bpf_program *prog;
> +
> + skel = test_sockmap_pass_prog__open_and_load();
> + if (!ASSERT_OK_PTR(skel, "open_and_load"))
> + return;
> +
> + err = create_socket_pairs(AF_INET, sotype, &c0, &c1, &p0, &p1);
> + if (err)
> + goto end;
> +
> + prog = skel->progs.prog_skb_verdict_ingress;
> + map = bpf_map__fd(skel->maps.sock_map_rx);
> +
> + err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT, 0);
> + if (!ASSERT_OK(err, "bpf_prog_attach verdict"))
> + goto end;
> +
> + err = bpf_map_update_elem(map, &zero, &p0, BPF_ANY);
> + if (!ASSERT_OK(err, "bpf_map_update_elem(p0)"))
> + goto end;
> +
> + err = bpf_map_update_elem(map, &one, &p1, BPF_ANY);
> + if (!ASSERT_OK(err, "bpf_map_update_elem"))
> + goto end;
> +
> + /* send data to p1 via native stack */
> + sent = xsend(c1, buf, 2, 0);
> + if (!ASSERT_EQ(sent, 2, "xsend(2)"))
> + goto end;
> +
> + poll_read(p1, IO_TIMEOUT_SEC);
> + err = ioctl(p1, FIONREAD, &avail);
> + ASSERT_OK(err, "ioctl(FIONREAD) partial call");
> + ASSERT_EQ(avail, 2, "ioctl(FIONREAD) partial return");
> +
> + /* send data to p1 via bpf redirecting */
> + sent = xsend(c0, buf + 2, sizeof(buf) - 2, 0);
> + if (!ASSERT_EQ(sent, sizeof(buf) - 2, "xsend(remain-data)"))
> + goto end;
> +
> + poll_read(p1, IO_TIMEOUT_SEC);
It's a race in the test - poll_read() returns early because the first 2 bytes are
already in the queue, but the second send hasn't been processed yet.
Will fix with a polling wait for FIONREAD.
pw-bot: cr
> + err = ioctl(p1, FIONREAD, &avail);
> + ASSERT_OK(err, "ioctl(FIONREAD) full call");
> + ASSERT_EQ(avail, sotype == SOCK_DGRAM ? 2 : sizeof(buf), "ioctl(FIONREAD) full return");
> +
> + recvd = recv_timeout(p1, rcv, sizeof(rcv), MSG_DONTWAIT, 1);
> + if (!ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(p1)") ||
> + !ASSERT_OK(memcmp(buf, rcv, recvd), "data mismatch"))
> + goto end;
> +end:
> + if (c0 >= 0)
> + close(c0);
> + if (p0 >= 0)
> + close(p0);
> + if (c1 >= 0)
> + close(c1);
> + if (p1 >= 0)
> + close(p1);
> + test_sockmap_pass_prog__destroy(skel);
> +}
> +
> void test_sockmap_basic(void)
> {
> if (test__start_subtest("sockmap create_update_free"))
> @@ -1108,4 +1363,14 @@ void test_sockmap_basic(void)
> test_sockmap_skb_verdict_vsock_poll();
> if (test__start_subtest("sockmap vsock unconnected"))
> test_sockmap_vsock_unconnected();
> + if (test__start_subtest("sockmap with zc"))
> + test_sockmap_zc();
> + if (test__start_subtest("sockmap recover"))
> + test_sockmap_copied_seq(false);
> + if (test__start_subtest("sockmap recover with strp"))
> + test_sockmap_copied_seq(true);
> + if (test__start_subtest("sockmap tcp multi channels"))
> + test_sockmap_multi_channels(SOCK_STREAM);
> + if (test__start_subtest("sockmap udp multi channels"))
> + test_sockmap_multi_channels(SOCK_DGRAM);
> }
> diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c b/tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c
> index 69aacc96db36..ef9edca184ea 100644
> --- a/tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c
> +++ b/tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c
> @@ -44,4 +44,18 @@ int prog_skb_parser(struct __sk_buff *skb)
> return SK_PASS;
> }
>
> +SEC("sk_skb/stream_verdict")
> +int prog_skb_verdict_ingress(struct __sk_buff *skb)
> +{
> + int one = 1;
> +
> + return bpf_sk_redirect_map(skb, &sock_map_rx, one, BPF_F_INGRESS);
> +}
> +
> +SEC("sk_skb/stream_parser")
> +int prog_skb_verdict_ingress_strp(struct __sk_buff *skb)
> +{
> + return skb->len;
> +}
> +
> char _license[] SEC("license") = "GPL";
> --
> 2.43.0
>
Powered by blists - more mailing lists