[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250709230333.926222-5-jordan@jrife.io>
Date: Wed, 9 Jul 2025 16:03:24 -0700
From: Jordan Rife <jordan@...fe.io>
To: netdev@...r.kernel.org,
bpf@...r.kernel.org
Cc: Jordan Rife <jordan@...fe.io>,
Daniel Borkmann <daniel@...earbox.net>,
Martin KaFai Lau <martin.lau@...ux.dev>,
Willem de Bruijn <willemdebruijn.kernel@...il.com>,
Kuniyuki Iwashima <kuniyu@...gle.com>,
Alexei Starovoitov <alexei.starovoitov@...il.com>,
Stanislav Fomichev <stfomichev@...il.com>
Subject: [PATCH v5 bpf-next 04/12] bpf: tcp: Use bpf_tcp_iter_batch_item for bpf_tcp_iter_state batch items
Prepare for the next patch that tracks cookies between iterations by
converting struct sock **batch to union bpf_tcp_iter_batch_item *batch
inside struct bpf_tcp_iter_state.
Signed-off-by: Jordan Rife <jordan@...fe.io>
Reviewed-by: Kuniyuki Iwashima <kuniyu@...zon.com>
---
net/ipv4/tcp_ipv4.c | 24 ++++++++++++++----------
1 file changed, 14 insertions(+), 10 deletions(-)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 50ef605dfa01..d2128a2b33bc 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -3014,12 +3014,16 @@ static int tcp4_seq_show(struct seq_file *seq, void *v)
}
#ifdef CONFIG_BPF_SYSCALL
+union bpf_tcp_iter_batch_item {
+ struct sock *sk;
+};
+
struct bpf_tcp_iter_state {
struct tcp_iter_state state;
unsigned int cur_sk;
unsigned int end_sk;
unsigned int max_sk;
- struct sock **batch;
+ union bpf_tcp_iter_batch_item *batch;
};
struct bpf_iter__tcp {
@@ -3045,13 +3049,13 @@ static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
unsigned int cur_sk = iter->cur_sk;
while (cur_sk < iter->end_sk)
- sock_gen_put(iter->batch[cur_sk++]);
+ sock_gen_put(iter->batch[cur_sk++].sk);
}
static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
unsigned int new_batch_sz, gfp_t flags)
{
- struct sock **new_batch;
+ union bpf_tcp_iter_batch_item *new_batch;
new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
flags | __GFP_NOWARN);
@@ -3075,7 +3079,7 @@ static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq,
struct sock *sk;
sock_hold(*start_sk);
- iter->batch[iter->end_sk++] = *start_sk;
+ iter->batch[iter->end_sk++].sk = *start_sk;
sk = sk_nulls_next(*start_sk);
*start_sk = NULL;
@@ -3083,7 +3087,7 @@ static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq,
if (seq_sk_match(seq, sk)) {
if (iter->end_sk < iter->max_sk) {
sock_hold(sk);
- iter->batch[iter->end_sk++] = sk;
+ iter->batch[iter->end_sk++].sk = sk;
} else if (!*start_sk) {
/* Remember where we left off. */
*start_sk = sk;
@@ -3104,7 +3108,7 @@ static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq,
struct sock *sk;
sock_hold(*start_sk);
- iter->batch[iter->end_sk++] = *start_sk;
+ iter->batch[iter->end_sk++].sk = *start_sk;
sk = sk_nulls_next(*start_sk);
*start_sk = NULL;
@@ -3112,7 +3116,7 @@ static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq,
if (seq_sk_match(seq, sk)) {
if (iter->end_sk < iter->max_sk) {
sock_hold(sk);
- iter->batch[iter->end_sk++] = sk;
+ iter->batch[iter->end_sk++].sk = sk;
} else if (!*start_sk) {
/* Remember where we left off. */
*start_sk = sk;
@@ -3216,7 +3220,7 @@ static struct sock *bpf_iter_tcp_batch(struct seq_file *seq)
WARN_ON_ONCE(iter->end_sk != expected);
done:
bpf_iter_tcp_unlock_bucket(seq);
- return iter->batch[0];
+ return iter->batch[0].sk;
}
static void *bpf_iter_tcp_seq_start(struct seq_file *seq, loff_t *pos)
@@ -3251,11 +3255,11 @@ static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
* st->bucket. See tcp_seek_last_pos().
*/
st->offset++;
- sock_gen_put(iter->batch[iter->cur_sk++]);
+ sock_gen_put(iter->batch[iter->cur_sk++].sk);
}
if (iter->cur_sk < iter->end_sk)
- sk = iter->batch[iter->cur_sk];
+ sk = iter->batch[iter->cur_sk].sk;
else
sk = bpf_iter_tcp_batch(seq);
--
2.43.0
Powered by blists - more mailing lists