[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aGLYO7XRafb9ROQi@mini-arch>
Date: Mon, 30 Jun 2025 11:32:27 -0700
From: Stanislav Fomichev <stfomichev@...il.com>
To: Jordan Rife <jordan@...fe.io>
Cc: netdev@...r.kernel.org, bpf@...r.kernel.org,
Daniel Borkmann <daniel@...earbox.net>,
Martin KaFai Lau <martin.lau@...ux.dev>,
Willem de Bruijn <willemdebruijn.kernel@...il.com>,
Kuniyuki Iwashima <kuniyu@...gle.com>,
Alexei Starovoitov <alexei.starovoitov@...il.com>
Subject: Re: [PATCH v3 bpf-next 05/12] bpf: tcp: Avoid socket skips and
repeats during iteration
On 06/30, Jordan Rife wrote:
> Replace the offset-based approach for tracking progress through a bucket
> in the TCP table with one based on socket cookies. Remember the cookies
> of unprocessed sockets from the last batch and use this list to
> pick up where we left off or, in the case that the next socket
> disappears between reads, find the first socket after that point that
> still exists in the bucket and resume from there.
>
> This approach guarantees that all sockets that existed when iteration
> began and continue to exist throughout will be visited exactly once.
> Sockets that are added to the table during iteration may or may not be
> seen, but if they are they will be seen exactly once.
>
> Signed-off-by: Jordan Rife <jordan@...fe.io>
> ---
> net/ipv4/tcp_ipv4.c | 147 ++++++++++++++++++++++++++++++++++----------
> 1 file changed, 115 insertions(+), 32 deletions(-)
>
> diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
> index bb51d62066a4..510053836a3c 100644
> --- a/net/ipv4/tcp_ipv4.c
> +++ b/net/ipv4/tcp_ipv4.c
> @@ -58,6 +58,7 @@
> #include <linux/times.h>
> #include <linux/slab.h>
> #include <linux/sched.h>
> +#include <linux/sock_diag.h>
>
> #include <net/net_namespace.h>
> #include <net/icmp.h>
> @@ -3016,6 +3017,7 @@ static int tcp4_seq_show(struct seq_file *seq, void *v)
> #ifdef CONFIG_BPF_SYSCALL
> union bpf_tcp_iter_batch_item {
> struct sock *sk;
> + __u64 cookie;
> };
>
> struct bpf_tcp_iter_state {
> @@ -3046,10 +3048,19 @@ static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
>
> static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
> {
> + union bpf_tcp_iter_batch_item *item;
> unsigned int cur_sk = iter->cur_sk;
> + __u64 cookie;
>
> - while (cur_sk < iter->end_sk)
> - sock_gen_put(iter->batch[cur_sk++].sk);
> + /* Remember the cookies of the sockets we haven't seen yet, so we can
> + * pick up where we left off next time around.
> + */
> + while (cur_sk < iter->end_sk) {
> + item = &iter->batch[cur_sk++];
> + cookie = sock_gen_cookie(item->sk);
> + sock_gen_put(item->sk);
> + item->cookie = cookie;
> + }
> }
>
> static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
> @@ -3070,6 +3081,106 @@ static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
> return 0;
> }
>
> +static struct sock *bpf_iter_tcp_resume_bucket(struct sock *first_sk,
> + union bpf_tcp_iter_batch_item *cookies,
> + int n_cookies)
> +{
> + struct hlist_nulls_node *node;
> + struct sock *sk;
> + int i;
> +
> + for (i = 0; i < n_cookies; i++) {
> + sk = first_sk;
> + sk_nulls_for_each_from(sk, node) {
> + if (cookies[i].cookie == atomic64_read(&sk->sk_cookie))
> + return sk;
> + }
nit: let's drop {} around sk_nulls_for_each_from?
> + }
> +
> + return NULL;
> +}
> +
> +static struct sock *bpf_iter_tcp_resume_listening(struct seq_file *seq)
> +{
> + struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
> + struct bpf_tcp_iter_state *iter = seq->private;
> + struct tcp_iter_state *st = &iter->state;
> + unsigned int find_cookie = iter->cur_sk;
> + unsigned int end_cookie = iter->end_sk;
> + int resume_bucket = st->bucket;
> + struct sock *sk;
> +
> + if (end_cookie && find_cookie == end_cookie)
> + ++st->bucket;
> +
> + sk = listening_get_first(seq);
> + iter->cur_sk = 0;
> + iter->end_sk = 0;
> +
> + if (sk && st->bucket == resume_bucket && end_cookie) {
> + sk = bpf_iter_tcp_resume_bucket(sk, &iter->batch[find_cookie],
> + end_cookie - find_cookie);
> + if (!sk) {
> + spin_unlock(&hinfo->lhash2[st->bucket].lock);
> + ++st->bucket;
> + sk = listening_get_first(seq);
> + }
> + }
> +
> + return sk;
> +}
> +
> +static struct sock *bpf_iter_tcp_resume_established(struct seq_file *seq)
> +{
> + struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
> + struct bpf_tcp_iter_state *iter = seq->private;
> + struct tcp_iter_state *st = &iter->state;
> + unsigned int find_cookie = iter->cur_sk;
> + unsigned int end_cookie = iter->end_sk;
> + int resume_bucket = st->bucket;
> + struct sock *sk;
> +
> + if (end_cookie && find_cookie == end_cookie)
> + ++st->bucket;
> +
> + sk = established_get_first(seq);
> + iter->cur_sk = 0;
> + iter->end_sk = 0;
> +
> + if (sk && st->bucket == resume_bucket && end_cookie) {
> + sk = bpf_iter_tcp_resume_bucket(sk, &iter->batch[find_cookie],
> + end_cookie - find_cookie);
> + if (!sk) {
> + spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
> + ++st->bucket;
> + sk = established_get_first(seq);
> + }
> + }
> +
> + return sk;
> +}
> +
> +static struct sock *bpf_iter_tcp_resume(struct seq_file *seq)
> +{
> + struct bpf_tcp_iter_state *iter = seq->private;
> + struct tcp_iter_state *st = &iter->state;
> + struct sock *sk = NULL;
> +
> + switch (st->state) {
> + case TCP_SEQ_STATE_LISTENING:
> + sk = bpf_iter_tcp_resume_listening(seq);
> + if (sk)
> + break;
> + st->bucket = 0;
> + st->state = TCP_SEQ_STATE_ESTABLISHED;
> + fallthrough;
> + case TCP_SEQ_STATE_ESTABLISHED:
> + sk = bpf_iter_tcp_resume_established(seq);
nit: add break here for consistency?
Powered by blists - more mailing lists