[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250520145059.1773738-4-jordan@jrife.io>
Date: Tue, 20 May 2025 07:50:50 -0700
From: Jordan Rife <jordan@...fe.io>
To: netdev@...r.kernel.org,
bpf@...r.kernel.org
Cc: Jordan Rife <jordan@...fe.io>,
Daniel Borkmann <daniel@...earbox.net>,
Martin KaFai Lau <martin.lau@...ux.dev>,
Willem de Bruijn <willemdebruijn.kernel@...il.com>,
Kuniyuki Iwashima <kuniyu@...zon.com>,
Alexei Starovoitov <alexei.starovoitov@...il.com>
Subject: [PATCH v1 bpf-next 03/10] bpf: tcp: Get rid of st_bucket_done
Get rid of the st_bucket_done field to simplify TCP iterator state and
logic. Before, st_bucket_done could be false if bpf_iter_tcp_batch
returned a partial batch; however, with the last patch ("bpf: tcp: Make
sure iter->batch always contains a full bucket snapshot"),
st_bucket_done == true is equivalent to iter->cur_sk == iter->end_sk.
Signed-off-by: Jordan Rife <jordan@...fe.io>
---
net/ipv4/tcp_ipv4.c | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 27022018194a..20730723a02c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -3020,7 +3020,6 @@ struct bpf_tcp_iter_state {
unsigned int end_sk;
unsigned int max_sk;
struct sock **batch;
- bool st_bucket_done;
};
struct bpf_iter__tcp {
@@ -3043,8 +3042,10 @@ static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
{
- while (iter->cur_sk < iter->end_sk)
- sock_gen_put(iter->batch[iter->cur_sk++]);
+ unsigned int cur_sk = iter->cur_sk;
+
+ while (cur_sk < iter->end_sk)
+ sock_gen_put(iter->batch[cur_sk++]);
}
static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
@@ -3154,7 +3155,7 @@ static struct sock *bpf_iter_tcp_batch(struct seq_file *seq)
* one by one in the current bucket and eventually find out
* it has to advance to the next bucket.
*/
- if (iter->st_bucket_done) {
+ if (iter->end_sk && iter->cur_sk == iter->end_sk) {
st->offset = 0;
st->bucket++;
if (st->state == TCP_SEQ_STATE_LISTENING &&
@@ -3168,7 +3169,6 @@ static struct sock *bpf_iter_tcp_batch(struct seq_file *seq)
/* Get a new batch */
iter->cur_sk = 0;
iter->end_sk = 0;
- iter->st_bucket_done = true;
prev_bucket = st->bucket;
prev_state = st->state;
@@ -3316,10 +3316,8 @@ static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v)
(void)tcp_prog_seq_show(prog, &meta, v, 0);
}
- if (iter->cur_sk < iter->end_sk) {
+ if (iter->cur_sk < iter->end_sk)
bpf_iter_tcp_put_batch(iter);
- iter->st_bucket_done = false;
- }
}
static const struct seq_operations bpf_iter_tcp_seq_ops = {
--
2.43.0
Powered by blists - more mailing lists