[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250106081900.1665573-2-houtao@huaweicloud.com>
Date: Mon, 6 Jan 2025 16:18:42 +0800
From: Hou Tao <houtao@...weicloud.com>
To: bpf@...r.kernel.org,
netdev@...r.kernel.org
Cc: Martin KaFai Lau <martin.lau@...ux.dev>,
Alexei Starovoitov <alexei.starovoitov@...il.com>,
Andrii Nakryiko <andrii@...nel.org>,
Eduard Zingerman <eddyz87@...il.com>,
Song Liu <song@...nel.org>,
Hao Luo <haoluo@...gle.com>,
Yonghong Song <yonghong.song@...ux.dev>,
Daniel Borkmann <daniel@...earbox.net>,
KP Singh <kpsingh@...nel.org>,
Stanislav Fomichev <sdf@...ichev.me>,
Jiri Olsa <jolsa@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
houtao1@...wei.com,
xukuohai@...wei.com
Subject: [PATCH bpf-next 01/19] bpf: Remove migrate_{disable|enable} from LPM trie
From: Hou Tao <houtao1@...wei.com>
Both bpf program and bpf syscall may invoke ->update or ->delete
operation for LPM trie. For bpf program, its running context has already
disabled migration explicitly through (migrate_disable()) or implicitly
through (preempt_disable() or disable irq). For bpf syscall, the
migration is disabled through the use of bpf_disable_instrumentation()
before invoking the corresponding map operation callback.
Therefore, it is safe to remove the migrate_{disable|enable){} pair from
LPM trie. To ensure the guarantee will not be voilated later, also add
cant_migrate() check in both update and delete operation.
Signed-off-by: Hou Tao <houtao1@...wei.com>
---
kernel/bpf/lpm_trie.c | 24 ++++++++----------------
1 file changed, 8 insertions(+), 16 deletions(-)
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index f8bc1e096182..1a3585a485df 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -289,16 +289,11 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key)
}
static struct lpm_trie_node *lpm_trie_node_alloc(struct lpm_trie *trie,
- const void *value,
- bool disable_migration)
+ const void *value)
{
struct lpm_trie_node *node;
- if (disable_migration)
- migrate_disable();
node = bpf_mem_cache_alloc(&trie->ma);
- if (disable_migration)
- migrate_enable();
if (!node)
return NULL;
@@ -342,10 +337,10 @@ static long trie_update_elem(struct bpf_map *map,
if (key->prefixlen > trie->max_prefixlen)
return -EINVAL;
- /* Allocate and fill a new node. Need to disable migration before
- * invoking bpf_mem_cache_alloc().
- */
- new_node = lpm_trie_node_alloc(trie, value, true);
+ cant_migrate();
+
+ /* Allocate and fill a new node */
+ new_node = lpm_trie_node_alloc(trie, value);
if (!new_node)
return -ENOMEM;
@@ -425,8 +420,7 @@ static long trie_update_elem(struct bpf_map *map,
goto out;
}
- /* migration is disabled within the locked scope */
- im_node = lpm_trie_node_alloc(trie, NULL, false);
+ im_node = lpm_trie_node_alloc(trie, NULL);
if (!im_node) {
trie->n_entries--;
ret = -ENOMEM;
@@ -452,11 +446,9 @@ static long trie_update_elem(struct bpf_map *map,
out:
raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
- migrate_disable();
if (ret)
bpf_mem_cache_free(&trie->ma, new_node);
bpf_mem_cache_free_rcu(&trie->ma, free_node);
- migrate_enable();
return ret;
}
@@ -477,6 +469,8 @@ static long trie_delete_elem(struct bpf_map *map, void *_key)
if (key->prefixlen > trie->max_prefixlen)
return -EINVAL;
+ cant_migrate();
+
raw_spin_lock_irqsave(&trie->lock, irq_flags);
/* Walk the tree looking for an exact key/length match and keeping
@@ -555,10 +549,8 @@ static long trie_delete_elem(struct bpf_map *map, void *_key)
out:
raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
- migrate_disable();
bpf_mem_cache_free_rcu(&trie->ma, free_parent);
bpf_mem_cache_free_rcu(&trie->ma, free_node);
- migrate_enable();
return ret;
}
--
2.29.2
Powered by blists - more mailing lists