lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tencent_C34B2C1BD325BFBAC0BB5C339C9EB01B4008@qq.com>
Date: Sat,  2 Nov 2024 11:59:15 +0800
From: Edward Adam Davis <eadavis@...com>
To: syzbot+d2adb332fe371b0595e3@...kaller.appspotmail.com
Cc: linux-kernel@...r.kernel.org,
	syzkaller-bugs@...glegroups.com
Subject: Re: [syzbot] [bpf?] WARNING: locking bug in bpf_map_put

there is a raw_spin_lock and spin_lock:

      raw_spin_lock(&b->raw_lock);
      spin_lock(&map_idr_lock);
      spin_unlock(&map_idr_lock);
      raw_spin_unlock(&b->raw_lock);

#syz test: upstream master

diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index a8f1808a1ca5..4891904ee1fc 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -54,7 +54,7 @@ DEFINE_PER_CPU(int, bpf_prog_active);
 static DEFINE_IDR(prog_idr);
 static DEFINE_SPINLOCK(prog_idr_lock);
 static DEFINE_IDR(map_idr);
-static DEFINE_SPINLOCK(map_idr_lock);
+static DEFINE_RAW_SPINLOCK(map_idr_lock);
 static DEFINE_IDR(link_idr);
 static DEFINE_SPINLOCK(link_idr_lock);
 
@@ -352,11 +352,11 @@ static int bpf_map_alloc_id(struct bpf_map *map)
 	int id;
 
 	idr_preload(GFP_KERNEL);
-	spin_lock_bh(&map_idr_lock);
+	raw_spin_lock_bh(&map_idr_lock);
 	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
 	if (id > 0)
 		map->id = id;
-	spin_unlock_bh(&map_idr_lock);
+	raw_spin_unlock_bh(&map_idr_lock);
 	idr_preload_end();
 
 	if (WARN_ON_ONCE(!id))
@@ -377,12 +377,12 @@ void bpf_map_free_id(struct bpf_map *map)
 	if (!map->id)
 		return;
 
-	spin_lock_irqsave(&map_idr_lock, flags);
+	raw_spin_lock_irqsave(&map_idr_lock, flags);
 
 	idr_remove(&map_idr, map->id);
 	map->id = 0;
 
-	spin_unlock_irqrestore(&map_idr_lock, flags);
+	raw_spin_unlock_irqrestore(&map_idr_lock, flags);
 }
 
 #ifdef CONFIG_MEMCG
@@ -1479,9 +1479,9 @@ struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
 
 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
 {
-	spin_lock_bh(&map_idr_lock);
+	raw_spin_lock_bh(&map_idr_lock);
 	map = __bpf_map_inc_not_zero(map, false);
-	spin_unlock_bh(&map_idr_lock);
+	raw_spin_unlock_bh(&map_idr_lock);
 
 	return map;
 }
@@ -4278,11 +4278,37 @@ static int bpf_obj_get_next_id(const union bpf_attr *attr,
 	return err;
 }
 
+static int bpf_obj_get_next_id_raw(const union bpf_attr *attr,
+			       union bpf_attr __user *uattr,
+			       struct idr *idr,
+			       raw_spinlock_t *lock)
+{
+	u32 next_id = attr->start_id;
+	int err = 0;
+
+	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
+		return -EINVAL;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	next_id++;
+	raw_spin_lock_bh(lock);
+	if (!idr_get_next(idr, &next_id))
+		err = -ENOENT;
+	raw_spin_unlock_bh(lock);
+
+	if (!err)
+		err = put_user(next_id, &uattr->next_id);
+
+	return err;
+}
+
 struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
 {
 	struct bpf_map *map;
 
-	spin_lock_bh(&map_idr_lock);
+	raw_spin_lock_bh(&map_idr_lock);
 again:
 	map = idr_get_next(&map_idr, id);
 	if (map) {
@@ -4292,7 +4318,7 @@ struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
 			goto again;
 		}
 	}
-	spin_unlock_bh(&map_idr_lock);
+	raw_spin_unlock_bh(&map_idr_lock);
 
 	return map;
 }
@@ -4378,13 +4404,13 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
 	if (f_flags < 0)
 		return f_flags;
 
-	spin_lock_bh(&map_idr_lock);
+	raw_spin_lock_bh(&map_idr_lock);
 	map = idr_find(&map_idr, id);
 	if (map)
 		map = __bpf_map_inc_not_zero(map, true);
 	else
 		map = ERR_PTR(-ENOENT);
-	spin_unlock_bh(&map_idr_lock);
+	raw_spin_unlock_bh(&map_idr_lock);
 
 	if (IS_ERR(map))
 		return PTR_ERR(map);
@@ -5656,7 +5682,7 @@ static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size)
 					  &prog_idr, &prog_idr_lock);
 		break;
 	case BPF_MAP_GET_NEXT_ID:
-		err = bpf_obj_get_next_id(&attr, uattr.user,
+		err = bpf_obj_get_next_id_raw(&attr, uattr.user,
 					  &map_idr, &map_idr_lock);
 		break;
 	case BPF_BTF_GET_NEXT_ID:
Upstream-Status: Pending


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ