lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250106081900.1665573-16-houtao@huaweicloud.com>
Date: Mon,  6 Jan 2025 16:18:56 +0800
From: Hou Tao <houtao@...weicloud.com>
To: bpf@...r.kernel.org,
	netdev@...r.kernel.org
Cc: Martin KaFai Lau <martin.lau@...ux.dev>,
	Alexei Starovoitov <alexei.starovoitov@...il.com>,
	Andrii Nakryiko <andrii@...nel.org>,
	Eduard Zingerman <eddyz87@...il.com>,
	Song Liu <song@...nel.org>,
	Hao Luo <haoluo@...gle.com>,
	Yonghong Song <yonghong.song@...ux.dev>,
	Daniel Borkmann <daniel@...earbox.net>,
	KP Singh <kpsingh@...nel.org>,
	Stanislav Fomichev <sdf@...ichev.me>,
	Jiri Olsa <jolsa@...nel.org>,
	John Fastabend <john.fastabend@...il.com>,
	houtao1@...wei.com,
	xukuohai@...wei.com
Subject: [PATCH bpf-next 15/19] bpf: Disable migration before calling ops->map_free()

From: Hou Tao <houtao1@...wei.com>

Disabling migration before calling ops->map_free() to simplify the
freeing of map values or special fields allocated from bpf memory
allocator.

After disabling migration in bpf_map_free(), there is no need for
additional migration_{disable|enable} pairs in the ->map_free()
callbacks. Remove these redundant invocations.

Signed-off-by: Hou Tao <houtao1@...wei.com>
---
 kernel/bpf/arraymap.c          | 2 --
 kernel/bpf/bpf_local_storage.c | 2 --
 kernel/bpf/hashtab.c           | 2 --
 kernel/bpf/range_tree.c        | 2 --
 kernel/bpf/syscall.c           | 8 +++++++-
 5 files changed, 7 insertions(+), 9 deletions(-)

diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 451737493b17..eb28c0f219ee 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -455,7 +455,6 @@ static void array_map_free(struct bpf_map *map)
 	struct bpf_array *array = container_of(map, struct bpf_array, map);
 	int i;
 
-	migrate_disable();
 	if (!IS_ERR_OR_NULL(map->record)) {
 		if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 			for (i = 0; i < array->map.max_entries; i++) {
@@ -472,7 +471,6 @@ static void array_map_free(struct bpf_map *map)
 				bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i));
 		}
 	}
-	migrate_enable();
 
 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
 		bpf_array_free_percpu(array);
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
index b649cf736438..12cf6382175e 100644
--- a/kernel/bpf/bpf_local_storage.c
+++ b/kernel/bpf/bpf_local_storage.c
@@ -905,13 +905,11 @@ void bpf_local_storage_map_free(struct bpf_map *map,
 		while ((selem = hlist_entry_safe(
 				rcu_dereference_raw(hlist_first_rcu(&b->list)),
 				struct bpf_local_storage_elem, map_node))) {
-			migrate_disable();
 			if (busy_counter)
 				this_cpu_inc(*busy_counter);
 			bpf_selem_unlink(selem, true);
 			if (busy_counter)
 				this_cpu_dec(*busy_counter);
-			migrate_enable();
 			cond_resched_rcu();
 		}
 		rcu_read_unlock();
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 8bf1ad326e02..6051f8a39fec 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -1570,14 +1570,12 @@ static void htab_map_free(struct bpf_map *map)
 	 * underneath and is responsible for waiting for callbacks to finish
 	 * during bpf_mem_alloc_destroy().
 	 */
-	migrate_disable();
 	if (!htab_is_prealloc(htab)) {
 		delete_all_elements(htab);
 	} else {
 		htab_free_prealloced_fields(htab);
 		prealloc_destroy(htab);
 	}
-	migrate_enable();
 
 	bpf_map_free_elem_count(map);
 	free_percpu(htab->extra_elems);
diff --git a/kernel/bpf/range_tree.c b/kernel/bpf/range_tree.c
index 5bdf9aadca3a..37b80a23ae1a 100644
--- a/kernel/bpf/range_tree.c
+++ b/kernel/bpf/range_tree.c
@@ -259,9 +259,7 @@ void range_tree_destroy(struct range_tree *rt)
 
 	while ((rn = range_it_iter_first(rt, 0, -1U))) {
 		range_it_remove(rn, rt);
-		migrate_disable();
 		bpf_mem_free(&bpf_global_ma, rn);
-		migrate_enable();
 	}
 }
 
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 0503ce1916b6..e7a41abe4809 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -835,8 +835,14 @@ static void bpf_map_free(struct bpf_map *map)
 	struct btf_record *rec = map->record;
 	struct btf *btf = map->btf;
 
-	/* implementation dependent freeing */
+	/* implementation dependent freeing. Disabling migration to simplify
+	 * the free of values or special fields allocated from bpf memory
+	 * allocator.
+	 */
+	migrate_disable();
 	map->ops->map_free(map);
+	migrate_enable();
+
 	/* Delay freeing of btf_record for maps, as map_free
 	 * callback usually needs access to them. It is better to do it here
 	 * than require each callback to do the free itself manually.
-- 
2.29.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ