[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250423235115.1885611-3-jordan@jrife.io>
Date: Wed, 23 Apr 2025 16:51:10 -0700
From: Jordan Rife <jordan@...fe.io>
To: netdev@...r.kernel.org,
bpf@...r.kernel.org
Cc: Jordan Rife <jordan@...fe.io>,
Aditi Ghag <aditi.ghag@...valent.com>,
Daniel Borkmann <daniel@...earbox.net>,
Martin KaFai Lau <martin.lau@...ux.dev>,
Willem de Bruijn <willemdebruijn.kernel@...il.com>,
Kuniyuki Iwashima <kuniyu@...zon.com>
Subject: [PATCH v5 bpf-next 2/6] bpf: udp: Make sure iter->batch always contains a full bucket snapshot
Require that iter->batch always contains a full bucket snapshot. This
invariant is important to avoid skipping or repeating sockets during
iteration when combined with the next few patches. Before, there were
two cases where a call to bpf_iter_udp_batch may only capture part of a
bucket:
1. When bpf_iter_udp_realloc_batch() returns -ENOMEM [1].
2. When more sockets are added to the bucket while calling
bpf_iter_udp_realloc_batch(), making the updated batch size
insufficient [2].
In cases where the batch size only covers part of a bucket, it is
possible to forget which sockets were already visited, especially if we
have to process a bucket in more than two batches. This forces us to
choose between repeating or skipping sockets, so don't allow this:
1. Stop iteration and propagate -ENOMEM up to userspace if reallocation
fails instead of continuing with a partial batch.
2. Retry bpf_iter_udp_realloc_batch() two times without holding onto the
bucket lock (hslot2->lock) so that we can use GFP_USER and maximize
the chances that memory allocation succeeds. On the third attempt, if
we still haven't been able to capture a full bucket snapshot, hold
onto the bucket lock through bpf_iter_udp_realloc_batch() to
guarantee that the bucket size doesn't change while we allocate more
memory and fill the batch. On the last pass, we must use GFP_ATOMIC
since we hold onto the spin lock.
Introduce the udp_portaddr_for_each_entry_from macro and use it instead
of udp_portaddr_for_each_entry to make it possible to continue iteration
from an arbitrary socket. This is required for this patch in the
GFP_ATOMIC case to allow us to fill the rest of a batch starting from
the middle of a bucket and the later patch which skips sockets that were
already seen.
Testing all scenarios directly is a bit difficult, but I did some manual
testing to exercise the code paths where GFP_ATOMIC is used and where
where ERR_PTR(err) is returned. I used the realloc test case included
later in this series to trigger a scenario where a realloc happens
inside bpf_iter_udp_batch and made a small code tweak to force the first
two realloc attempts to allocate a too-small buffer, thus requiring
another attempt until the GFP_ATOMIC case is hit. Some printks showed
three reallocs with the tests passing:
Apr 16 00:08:32 crow kernel: go again (mem_flags=GFP_USER)
Apr 16 00:08:32 crow kernel: go again (mem_flags=GFP_USER)
Apr 16 00:08:32 crow kernel: go again (mem_flags=GFP_ATOMIC)
With this setup, I also forced bpf_iter_udp_realloc_batch to return
-ENOMEM on one of the retries to ensure that iteration ends and that the
read() in userspace fails and incremented batch_sks to hit the
WARN_ON_ONCE condition.
[1]: https://lore.kernel.org/bpf/CABi4-ogUtMrH8-NVB6W8Xg_F_KDLq=yy-yu-tKr2udXE2Mu1Lg@mail.gmail.com/
[2]: https://lore.kernel.org/bpf/7ed28273-a716-4638-912d-f86f965e54bb@linux.dev/
Signed-off-by: Jordan Rife <jordan@...fe.io>
Suggested-by: Martin KaFai Lau <martin.lau@...ux.dev>
---
include/linux/udp.h | 3 +++
net/ipv4/udp.c | 62 ++++++++++++++++++++++++++++++++++-----------
2 files changed, 50 insertions(+), 15 deletions(-)
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 0807e21cfec9..a69da9c4c1c5 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -209,6 +209,9 @@ static inline void udp_allow_gso(struct sock *sk)
#define udp_portaddr_for_each_entry(__sk, list) \
hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node)
+#define udp_portaddr_for_each_entry_from(__sk) \
+ hlist_for_each_entry_from(__sk, __sk_common.skc_portaddr_node)
+
#define udp_portaddr_for_each_entry_rcu(__sk, list) \
hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 6a3c351aa06e..0960e42f2d2c 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -3383,6 +3383,7 @@ int udp4_seq_show(struct seq_file *seq, void *v)
}
#ifdef CONFIG_BPF_SYSCALL
+#define MAX_REALLOC_ATTEMPTS 2
struct bpf_iter__udp {
__bpf_md_ptr(struct bpf_iter_meta *, meta);
__bpf_md_ptr(struct udp_sock *, udp_sk);
@@ -3410,8 +3411,9 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
int resume_bucket, resume_offset;
struct udp_table *udptable;
unsigned int batch_sks = 0;
- bool resized = false;
struct sock *sk;
+ int resizes = 0;
+ int err = 0;
resume_bucket = state->bucket;
resume_offset = iter->offset;
@@ -3439,11 +3441,14 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
struct udp_hslot *hslot2 = &udptable->hash2[state->bucket].hslot;
if (hlist_empty(&hslot2->head))
- continue;
+ goto next_bucket;
iter->offset = 0;
spin_lock_bh(&hslot2->lock);
- udp_portaddr_for_each_entry(sk, &hslot2->head) {
+ sk = hlist_entry_safe(hslot2->head.first, struct sock,
+ __sk_common.skc_portaddr_node);
+fill_batch:
+ udp_portaddr_for_each_entry_from(sk) {
if (seq_sk_match(seq, sk)) {
/* Resume from the last iterated socket at the
* offset in the bucket before iterator was stopped.
@@ -3460,10 +3465,34 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
batch_sks++;
}
}
+
+ if (unlikely(resizes == MAX_REALLOC_ATTEMPTS) && iter->end_sk &&
+ iter->end_sk != batch_sks) {
+ /* This is the last realloc attempt, so keep holding the
+ * lock to ensure that the bucket does not change.
+ */
+ err = bpf_iter_udp_realloc_batch(iter, batch_sks,
+ GFP_ATOMIC);
+ if (err) {
+ spin_unlock_bh(&hslot2->lock);
+ return ERR_PTR(err);
+ }
+
+ sk = iter->batch[iter->end_sk - 1];
+ sk = hlist_entry_safe(sk->__sk_common.skc_portaddr_node.next,
+ struct sock,
+ __sk_common.skc_portaddr_node);
+ batch_sks = iter->end_sk;
+ resizes++;
+ goto fill_batch;
+ }
+
spin_unlock_bh(&hslot2->lock);
if (iter->end_sk)
break;
+next_bucket:
+ resizes = 0;
}
/* All done: no batch made. */
@@ -3475,18 +3504,18 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
* socket to be iterated from the batch.
*/
iter->st_bucket_done = true;
- goto done;
+ return iter->batch[0];
}
- if (!resized && !bpf_iter_udp_realloc_batch(iter, batch_sks * 3 / 2,
- GFP_USER)) {
- resized = true;
- /* After allocating a larger batch, retry one more time to grab
- * the whole bucket.
- */
- goto again;
- }
-done:
- return iter->batch[0];
+
+ if (WARN_ON_ONCE(resizes >= MAX_REALLOC_ATTEMPTS))
+ return iter->batch[0];
+
+ err = bpf_iter_udp_realloc_batch(iter, batch_sks * 3 / 2, GFP_USER);
+ if (err)
+ return ERR_PTR(err);
+
+ resizes++;
+ goto again;
}
static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
@@ -3841,7 +3870,10 @@ static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter,
if (!new_batch)
return -ENOMEM;
- bpf_iter_udp_put_batch(iter);
+ if (flags != GFP_ATOMIC)
+ bpf_iter_udp_put_batch(iter);
+
+ memcpy(new_batch, iter->batch, sizeof(*iter->batch) * iter->end_sk);
kvfree(iter->batch);
iter->batch = new_batch;
iter->max_sk = new_batch_sz;
--
2.48.1
Powered by blists - more mailing lists