[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250428180036.369192-6-jordan@jrife.io>
Date: Mon, 28 Apr 2025 11:00:29 -0700
From: Jordan Rife <jordan@...fe.io>
To: netdev@...r.kernel.org,
bpf@...r.kernel.org
Cc: Jordan Rife <jordan@...fe.io>,
Aditi Ghag <aditi.ghag@...valent.com>,
Daniel Borkmann <daniel@...earbox.net>,
Martin KaFai Lau <martin.lau@...ux.dev>,
Willem de Bruijn <willemdebruijn.kernel@...il.com>,
Kuniyuki Iwashima <kuniyu@...zon.com>,
Alexei Starovoitov <alexei.starovoitov@...il.com>
Subject: [PATCH v6 bpf-next 5/7] bpf: udp: Avoid socket skips and repeats during iteration
Replace the offset-based approach for tracking progress through a bucket
in the UDP table with one based on socket cookies. Remember the cookies
of unprocessed sockets from the last batch and use this list to
pick up where we left off or, in the case that the next socket
disappears between reads, find the first socket after that point that
still exists in the bucket and resume from there.
This approach guarantees that all sockets that existed when iteration
began and continue to exist throughout will be visited exactly once.
Sockets that are added to the table during iteration may or may not be
seen, but if they are they will be seen exactly once.
Initialize iter->state.bucket to -1 to ensure that on the first call to
bpf_iter_udp_batch, the resume_bucket case is not hit. It's not strictly
accurate that we are resuming from bucket zero when we create the first
batch, and this avoids adding special case logic for just that bucket.
Signed-off-by: Jordan Rife <jordan@...fe.io>
---
net/ipv4/udp.c | 63 ++++++++++++++++++++++++++++++++++++--------------
1 file changed, 46 insertions(+), 17 deletions(-)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 866ad29e15bb..4e2aa9b9e52d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -93,6 +93,7 @@
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
+#include <linux/sock_diag.h>
#include <net/tcp_states.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
@@ -3392,6 +3393,7 @@ struct bpf_iter__udp {
union bpf_udp_iter_batch_item {
struct sock *sock;
+ __u64 cookie;
};
struct bpf_udp_iter_state {
@@ -3399,26 +3401,42 @@ struct bpf_udp_iter_state {
unsigned int cur_sk;
unsigned int end_sk;
unsigned int max_sk;
- int offset;
union bpf_udp_iter_batch_item *batch;
};
static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter,
unsigned int new_batch_sz, int flags);
+static struct sock *bpf_iter_udp_resume(struct sock *first_sk,
+ union bpf_udp_iter_batch_item *cookies,
+ int n_cookies)
+{
+ struct sock *sk = NULL;
+ int i = 0;
+
+ for (; i < n_cookies; i++) {
+ sk = first_sk;
+ udp_portaddr_for_each_entry_from(sk)
+ if (cookies[i].cookie == atomic64_read(&sk->sk_cookie))
+ goto done;
+ }
+done:
+ return sk;
+}
+
static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
{
struct bpf_udp_iter_state *iter = seq->private;
struct udp_iter_state *state = &iter->state;
+ unsigned int find_cookie, end_cookie = 0;
struct net *net = seq_file_net(seq);
- int resume_bucket, resume_offset;
struct udp_table *udptable;
unsigned int batch_sks = 0;
+ int resume_bucket;
int resizes = 0;
struct sock *sk;
int err = 0;
resume_bucket = state->bucket;
- resume_offset = iter->offset;
/* The current batch is done, so advance the bucket. */
if (iter->cur_sk == iter->end_sk)
@@ -3434,6 +3452,8 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
* before releasing the bucket lock. This allows BPF programs that are
* called in seq_show to acquire the bucket lock if needed.
*/
+ find_cookie = iter->cur_sk;
+ end_cookie = iter->end_sk;
iter->cur_sk = 0;
iter->end_sk = 0;
batch_sks = 0;
@@ -3444,21 +3464,21 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
if (hlist_empty(&hslot2->head))
goto next_bucket;
- iter->offset = 0;
spin_lock_bh(&hslot2->lock);
sk = hlist_entry_safe(hslot2->head.first, struct sock,
__sk_common.skc_portaddr_node);
+ /* Resume from the first (in iteration order) unseen socket from
+ * the last batch that still exists in resume_bucket. Most of
+ * the time this will just be where the last iteration left off
+ * in resume_bucket unless that socket disappeared between
+ * reads.
+ */
+ if (state->bucket == resume_bucket)
+ sk = bpf_iter_udp_resume(sk, &iter->batch[find_cookie],
+ end_cookie - find_cookie);
fill_batch:
udp_portaddr_for_each_entry_from(sk) {
if (seq_sk_match(seq, sk)) {
- /* Resume from the last iterated socket at the
- * offset in the bucket before iterator was stopped.
- */
- if (state->bucket == resume_bucket &&
- iter->offset < resume_offset) {
- ++iter->offset;
- continue;
- }
if (iter->end_sk < iter->max_sk) {
sock_hold(sk);
iter->batch[iter->end_sk++].sock = sk;
@@ -3525,10 +3545,8 @@ static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
/* Whenever seq_next() is called, the iter->cur_sk is
* done with seq_show(), so unref the iter->cur_sk.
*/
- if (iter->cur_sk < iter->end_sk) {
+ if (iter->cur_sk < iter->end_sk)
sock_put(iter->batch[iter->cur_sk++].sock);
- ++iter->offset;
- }
/* After updating iter->cur_sk, check if there are more sockets
* available in the current bucket batch.
@@ -3598,10 +3616,19 @@ static int bpf_iter_udp_seq_show(struct seq_file *seq, void *v)
static void bpf_iter_udp_put_batch(struct bpf_udp_iter_state *iter)
{
+ union bpf_udp_iter_batch_item *item;
unsigned int cur_sk = iter->cur_sk;
+ __u64 cookie;
- while (iter->cur_sk < iter->end_sk)
- sock_put(iter->batch[cur_sk++].sock);
+ /* Remember the cookies of the sockets we haven't seen yet, so we can
+ * pick up where we left off next time around.
+ */
+ while (cur_sk < iter->end_sk) {
+ item = &iter->batch[cur_sk++];
+ cookie = sock_gen_cookie(item->sock);
+ sock_put(item->sock);
+ item->cookie = cookie;
+ }
}
static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v)
@@ -3895,6 +3922,8 @@ static int bpf_iter_init_udp(void *priv_data, struct bpf_iter_aux_info *aux)
if (ret)
bpf_iter_fini_seq_net(priv_data);
+ iter->state.bucket = -1;
+
return ret;
}
--
2.43.0
Powered by blists - more mailing lists