[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250107085559.3081563-6-houtao@huaweicloud.com>
Date: Tue, 7 Jan 2025 16:55:57 +0800
From: Hou Tao <houtao@...weicloud.com>
To: bpf@...r.kernel.org,
netdev@...r.kernel.org
Cc: Martin KaFai Lau <martin.lau@...ux.dev>,
Alexei Starovoitov <alexei.starovoitov@...il.com>,
Andrii Nakryiko <andrii@...nel.org>,
Eduard Zingerman <eddyz87@...il.com>,
Song Liu <song@...nel.org>,
Hao Luo <haoluo@...gle.com>,
Yonghong Song <yonghong.song@...ux.dev>,
Daniel Borkmann <daniel@...earbox.net>,
KP Singh <kpsingh@...nel.org>,
Stanislav Fomichev <sdf@...ichev.me>,
Jiri Olsa <jolsa@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
houtao1@...wei.com,
xukuohai@...wei.com
Subject: [PATCH bpf-next 5/7] bpf: Factor out the element allocation for pre-allocated htab
From: Hou Tao <houtao1@...wei.com>
The element allocation for pre-allocated htab is composed of two logics:
1) when there is old_element, directly reuse per-cpu extra_elems
Also stash the old_element as the next per-cpu extra_elems
2) when no old_element, allocate from per-cpu free list
The reuse and stash of per-cpu extra_elems will be broken into two
independent steps. After the breaking, per-cpu extra_elems may be NULL
when trying to reuse it and the allocation needs to fall-back to per-cpu
free list when it happens.
Therefore, factor out the element allocation to a helper to make the
following change be straightforward.
Signed-off-by: Hou Tao <houtao1@...wei.com>
---
kernel/bpf/hashtab.c | 49 +++++++++++++++++++++++++++++---------------
1 file changed, 32 insertions(+), 17 deletions(-)
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3c6eebabb492..9211df2adda4 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -1021,6 +1021,34 @@ static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
BITS_PER_LONG == 64;
}
+static struct htab_elem *alloc_preallocated_htab_elem(struct bpf_htab *htab,
+ struct htab_elem *old_elem)
+{
+ struct pcpu_freelist_node *l;
+ struct htab_elem *l_new;
+
+ if (old_elem) {
+ struct htab_elem **pl_new;
+
+ /* if we're updating the existing element,
+ * use per-cpu extra elems to avoid freelist_pop/push
+ */
+ pl_new = this_cpu_ptr(htab->extra_elems);
+ l_new = *pl_new;
+ *pl_new = old_elem;
+ return l_new;
+ }
+
+ l = __pcpu_freelist_pop(&htab->freelist);
+ if (!l)
+ return ERR_PTR(-E2BIG);
+
+ l_new = container_of(l, struct htab_elem, fnode);
+ bpf_map_inc_elem_count(&htab->map);
+
+ return l_new;
+}
+
static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
void *value, u32 key_size, u32 hash,
bool percpu, bool onallcpus,
@@ -1028,26 +1056,13 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
{
u32 size = htab->map.value_size;
bool prealloc = htab_is_prealloc(htab);
- struct htab_elem *l_new, **pl_new;
+ struct htab_elem *l_new;
void __percpu *pptr;
if (prealloc) {
- if (old_elem) {
- /* if we're updating the existing element,
- * use per-cpu extra elems to avoid freelist_pop/push
- */
- pl_new = this_cpu_ptr(htab->extra_elems);
- l_new = *pl_new;
- *pl_new = old_elem;
- } else {
- struct pcpu_freelist_node *l;
-
- l = __pcpu_freelist_pop(&htab->freelist);
- if (!l)
- return ERR_PTR(-E2BIG);
- l_new = container_of(l, struct htab_elem, fnode);
- bpf_map_inc_elem_count(&htab->map);
- }
+ l_new = alloc_preallocated_htab_elem(htab, old_elem);
+ if (IS_ERR(l_new))
+ return l_new;
} else {
if (is_map_full(htab))
if (!old_elem)
--
2.29.2
Powered by blists - more mailing lists