[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <871pjjux2u.fsf@cloudflare.com>
Date: Wed, 21 Jan 2026 10:36:09 +0100
From: Jakub Sitnicki <jakub@...udflare.com>
To: Jiayuan Chen <jiayuan.chen@...ux.dev>
Cc: bpf@...r.kernel.org, John Fastabend <john.fastabend@...il.com>, "David
S. Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
Simon Horman <horms@...nel.org>, Neal Cardwell <ncardwell@...gle.com>,
Kuniyuki Iwashima <kuniyu@...gle.com>, David Ahern <dsahern@...nel.org>,
Andrii Nakryiko <andrii@...nel.org>, Eduard Zingerman
<eddyz87@...il.com>, Alexei Starovoitov <ast@...nel.org>, Daniel
Borkmann <daniel@...earbox.net>, Martin KaFai Lau <martin.lau@...ux.dev>,
Song Liu <song@...nel.org>, Yonghong Song <yonghong.song@...ux.dev>, KP
Singh <kpsingh@...nel.org>, Stanislav Fomichev <sdf@...ichev.me>, Hao
Luo <haoluo@...gle.com>, Jiri Olsa <jolsa@...nel.org>, Shuah Khan
<shuah@...nel.org>, Michal Luczaj <mhal@...x.co>, Cong Wang
<cong.wang@...edance.com>, netdev@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-kselftest@...r.kernel.org
Subject: Re: [PATCH bpf-next v7 2/3] bpf, sockmap: Fix FIONREAD for sockmap
On Tue, Jan 20, 2026 at 04:00 PM +01, Jakub Sitnicki wrote:
> On Tue, Jan 13, 2026 at 10:50 AM +08, Jiayuan Chen wrote:
[...]
>> diff --git a/net/ipv4/udp_bpf.c b/net/ipv4/udp_bpf.c
>> index 0735d820e413..91233e37cd97 100644
>> --- a/net/ipv4/udp_bpf.c
>> +++ b/net/ipv4/udp_bpf.c
>> @@ -5,6 +5,7 @@
>> #include <net/sock.h>
>> #include <net/udp.h>
>> #include <net/inet_common.h>
>> +#include <asm/ioctls.h>
>>
>> #include "udp_impl.h"
>>
>> @@ -111,12 +112,26 @@ enum {
>> static DEFINE_SPINLOCK(udpv6_prot_lock);
>> static struct proto udp_bpf_prots[UDP_BPF_NUM_PROTS];
>>
>> +static int udp_bpf_ioctl(struct sock *sk, int cmd, int *karg)
>> +{
>> + if (cmd != SIOCINQ)
>> + return udp_ioctl(sk, cmd, karg);
>> +
>> + /* Since we don't hold a lock, sk_receive_queue may contain data.
>> + * BPF might only be processing this data at the moment. We only
>> + * care about the data in the ingress_msg here.
>> + */
>
> I think we should strive for a design where FIONREAD does not go down
> after you add your socket to sockmap, if there was no recvmsg call in
> between. To show what I mean, I added this test that's currently failing
> for udp:
>
> ---8<---
> diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
> index 1f1289f5a8c2..123c96fcaef0 100644
> --- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
> +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
> @@ -1229,6 +1229,66 @@ static void test_sockmap_copied_seq(bool strp)
> test_sockmap_pass_prog__destroy(skel);
> }
>
> +/* Test FIONREAD when data exists in sk_receive_queue before sockmap insertion */
> +static void test_sockmap_fionread_pre_insert(int sotype)
> +{
> + int map, err, sent, recvd, zero = 0, avail = 0;
> + struct test_sockmap_pass_prog *skel = NULL;
> + int c = -1, p = -1;
> + char buf[10] = "0123456789", rcv[11];
> + struct bpf_program *prog;
> +
> + skel = test_sockmap_pass_prog__open_and_load();
> + if (!ASSERT_OK_PTR(skel, "open_and_load"))
> + return;
> +
> + prog = skel->progs.prog_skb_verdict;
> + map = bpf_map__fd(skel->maps.sock_map_rx);
> +
> + err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT, 0);
> + if (!ASSERT_OK(err, "bpf_prog_attach verdict"))
> + goto end;
> +
> + err = create_pair(AF_INET, sotype, &c, &p);
> + if (!ASSERT_OK(err, "create_pair"))
> + goto end;
> +
> + /* Step 1: Send data BEFORE sockmap insertion - lands in sk_receive_queue */
> + sent = xsend(p, buf, sizeof(buf), 0);
> + if (!ASSERT_EQ(sent, sizeof(buf), "xsend pre-insert"))
> + goto end;
> +
> + /* Step 2: Verify FIONREAD reports data in sk_receive_queue */
> + err = poll_read(c, IO_TIMEOUT_SEC);
> + if (!ASSERT_OK(err, "poll_read pre-insert"))
> + goto end;
> + err = ioctl(c, FIONREAD, &avail);
> + ASSERT_OK(err, "ioctl(FIONREAD) pre-insert error");
> + ASSERT_EQ(avail, sizeof(buf), "ioctl(FIONREAD) pre-insert");
> +
> + /* Step 3: Insert socket into sockmap */
> + err = bpf_map_update_elem(map, &zero, &c, BPF_ANY);
> + if (!ASSERT_OK(err, "bpf_map_update_elem(c)"))
> + goto end;
> +
> + /* Step 4: FIONREAD should still report the data in sk_receive_queue */
> + err = ioctl(c, FIONREAD, &avail);
> + ASSERT_OK(err, "ioctl(FIONREAD) post-insert error");
> + ASSERT_EQ(avail, sizeof(buf), "ioctl(FIONREAD) post-insert");
> +
> + /* Verify we can still read the data */
> + recvd = recv_timeout(c, rcv, sizeof(rcv), MSG_DONTWAIT, IO_TIMEOUT_SEC);
> + ASSERT_EQ(recvd, sizeof(buf), "recv post-insert");
> + ASSERT_OK(memcmp(buf, rcv, recvd), "data mismatch");
> +
> +end:
> + if (c >= 0)
> + close(c);
> + if (p >= 0)
> + close(p);
> + test_sockmap_pass_prog__destroy(skel);
> +}
> +
> /* it is used to send data to via native stack and BPF redirecting */
> static void test_sockmap_multi_channels(int sotype)
> {
> @@ -1373,4 +1433,8 @@ void test_sockmap_basic(void)
> test_sockmap_multi_channels(SOCK_STREAM);
> if (test__start_subtest("sockmap udp multi channels"))
> test_sockmap_multi_channels(SOCK_DGRAM);
> + if (test__start_subtest("sockmap tcp fionread pre-insert"))
> + test_sockmap_fionread_pre_insert(SOCK_STREAM);
> + if (test__start_subtest("sockmap udp fionread pre-insert"))
> + test_sockmap_fionread_pre_insert(SOCK_DGRAM);
> }
> --->8---
>
>
>> + *karg = sk_msg_first_len(sk);
>> + return 0;
>> +}
>> +
I've been thinking about this some more and came to the conclusion that
this udp_bpf_ioctl implementation is actually what we want, while
tcp_bpf_ioctl *should not* be checking if the sk_receive_queue is
non-empty.
Why? Because the verdict prog might redirect or drop the skbs from
sk_receive_queue once it actually runs. The messages might never appear
on the msg_ingress queue.
What I think we should be doing, in the end, is kicking the
sk_receive_queue processing on bpf_map_update_elem, if there's data
ready.
The API semantics I'm proposing is:
1. ioctl(FIONREAD) -> reports N bytes
2. bpf_map_update_elem(sk) -> socket inserted into sockmap
3. poll() for POLLIN -> wait for socket to be ready to read
5. ioctl(FIONREAD) -> report N bytes if verdict prog didn't
redirect or drop it
We don't have to add the the queue kick on map update in this series.
If you decide to leave it for later, can I ask that you open an issue at
our GH project [1]?
I don't want it to fall through the cracks. And I sometimes have people
asking what they could help with in sockmap.
Thanks,
-jkbs
[1] https://github.com/sockmap-project/sockmap-project/issues
Powered by blists - more mailing lists