[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260123055658.372869-2-leon.hwang@linux.dev>
Date: Fri, 23 Jan 2026 13:56:57 +0800
From: Leon Hwang <leon.hwang@...ux.dev>
To: bpf@...r.kernel.org
Cc: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
John Fastabend <john.fastabend@...il.com>,
Andrii Nakryiko <andrii@...nel.org>,
Martin KaFai Lau <martin.lau@...ux.dev>,
Eduard Zingerman <eddyz87@...il.com>,
Song Liu <song@...nel.org>,
Yonghong Song <yonghong.song@...ux.dev>,
KP Singh <kpsingh@...nel.org>,
Stanislav Fomichev <sdf@...ichev.me>,
Hao Luo <haoluo@...gle.com>,
Jiri Olsa <jolsa@...nel.org>,
Shuah Khan <shuah@...nel.org>,
Leon Hwang <leon.hwang@...ux.dev>,
linux-kernel@...r.kernel.org,
linux-kselftest@...r.kernel.org,
kernel-patches-bot@...com
Subject: [PATCH bpf-next v2 1/2] bpf: Disallow BPF_F_LOCK with mixed special fields and centralize flag checks
Disallow combining BPF_F_LOCK with map values that contain special BTF
fields other than bpf_spin_lock (e.g. kptr or uptr). Such mixing may lead
to subtle or undefined behavior in map value handling. Reject these
combinations early by returning -EOPNOTSUPP.
Centralize map update flag validation in bpf_map_check_op_flags() and
reuse it across array, hash, local-storage, and task-storage map update
paths. Explicitly reject incompatible BPF_NOEXIST/BPF_EXIST combinations
and invalid BPF_F_LOCK usage to keep flag validation consistent and
eliminate duplicated per-map checks.
Signed-off-by: Leon Hwang <leon.hwang@...ux.dev>
---
include/linux/bpf.h | 7 +++++++
kernel/bpf/arraymap.c | 11 ++++-------
kernel/bpf/bpf_local_storage.c | 7 -------
kernel/bpf/bpf_task_storage.c | 3 ---
kernel/bpf/hashtab.c | 8 +++-----
kernel/bpf/syscall.c | 4 +++-
6 files changed, 17 insertions(+), 23 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 5936f8e2996f..c5863487ee73 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -3935,9 +3935,16 @@ static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 all
if ((u32)flags & ~allowed_flags)
return -EINVAL;
+ /* BPF_NOEXIST and BPF_EXIST are mutually exclusive. */
+ if ((flags & (BPF_NOEXIST | BPF_EXIST)) == (BPF_NOEXIST | BPF_EXIST))
+ return -EINVAL;
+
if ((flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK))
return -EINVAL;
+ if ((flags & BPF_F_LOCK) && btf_record_has_field(map->record, ~BPF_SPIN_LOCK))
+ return -EOPNOTSUPP;
+
if (!(flags & BPF_F_CPU) && flags >> 32)
return -EINVAL;
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 67e9e811de3a..1cff40f109cd 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -366,10 +366,7 @@ static long array_map_update_elem(struct bpf_map *map, void *key, void *value,
struct bpf_array *array = container_of(map, struct bpf_array, map);
u32 index = *(u32 *)key;
char *val;
-
- if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
- /* unknown flags */
- return -EINVAL;
+ int err;
if (unlikely(index >= array->map.max_entries))
/* all elements were pre-allocated, cannot insert a new one */
@@ -379,9 +376,9 @@ static long array_map_update_elem(struct bpf_map *map, void *key, void *value,
/* all elements already exist */
return -EEXIST;
- if (unlikely((map_flags & BPF_F_LOCK) &&
- !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
- return -EINVAL;
+ err = bpf_map_check_op_flags(map, map_flags, BPF_EXIST | BPF_F_LOCK);
+ if (unlikely(err))
+ return err;
if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
val = this_cpu_ptr(array->pptrs[index & array->index_mask]);
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
index e2fe6c32822b..80b50091cbbf 100644
--- a/kernel/bpf/bpf_local_storage.c
+++ b/kernel/bpf/bpf_local_storage.c
@@ -493,13 +493,6 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
unsigned long flags;
int err;
- /* BPF_EXIST and BPF_NOEXIST cannot be both set */
- if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
- /* BPF_F_LOCK can only be used in a value with spin_lock */
- unlikely((map_flags & BPF_F_LOCK) &&
- !btf_record_has_field(smap->map.record, BPF_SPIN_LOCK)))
- return ERR_PTR(-EINVAL);
-
if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)
return ERR_PTR(-EINVAL);
diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c
index a1dc1bf0848a..21d84818e64e 100644
--- a/kernel/bpf/bpf_task_storage.c
+++ b/kernel/bpf/bpf_task_storage.c
@@ -125,9 +125,6 @@ static long bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
struct pid *pid;
int fd, err;
- if ((map_flags & BPF_F_LOCK) && btf_record_has_field(map->record, BPF_UPTR))
- return -EOPNOTSUPP;
-
fd = *(int *)key;
pid = pidfd_get_pid(fd, &f_flags);
if (IS_ERR(pid))
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3b9d297a53be..2f6ed3e80308 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -1093,9 +1093,9 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
u32 key_size, hash;
int ret;
- if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
- /* unknown flags */
- return -EINVAL;
+ ret = bpf_map_check_op_flags(map, map_flags, BPF_NOEXIST | BPF_EXIST | BPF_F_LOCK);
+ if (unlikely(ret))
+ return ret;
WARN_ON_ONCE(!bpf_rcu_lock_held());
@@ -1107,8 +1107,6 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
head = &b->head;
if (unlikely(map_flags & BPF_F_LOCK)) {
- if (unlikely(!btf_record_has_field(map->record, BPF_SPIN_LOCK)))
- return -EINVAL;
/* find an element without taking the bucket lock */
l_old = lookup_nulls_elem_raw(head, hash, key, key_size,
htab->n_buckets);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 3c5c03d43f5f..49e424e5f492 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1780,6 +1780,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
struct bpf_map *map;
void *key, *value;
+ u64 allowed_flags;
u32 value_size;
int err;
@@ -1796,7 +1797,8 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
goto err_put;
}
- err = bpf_map_check_op_flags(map, attr->flags, ~0);
+ allowed_flags = BPF_NOEXIST | BPF_EXIST | BPF_F_LOCK | BPF_F_CPU | BPF_F_ALL_CPUS;
+ err = bpf_map_check_op_flags(map, attr->flags, allowed_flags);
if (err)
goto err_put;
--
2.52.0
Powered by blists - more mailing lists