lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260119142120.28170-3-leon.hwang@linux.dev>
Date: Mon, 19 Jan 2026 22:21:19 +0800
From: Leon Hwang <leon.hwang@...ux.dev>
To: bpf@...r.kernel.org
Cc: Martin KaFai Lau <martin.lau@...ux.dev>,
	Alexei Starovoitov <ast@...nel.org>,
	Daniel Borkmann <daniel@...earbox.net>,
	Andrii Nakryiko <andrii@...nel.org>,
	Eduard Zingerman <eddyz87@...il.com>,
	Song Liu <song@...nel.org>,
	Yonghong Song <yonghong.song@...ux.dev>,
	John Fastabend <john.fastabend@...il.com>,
	KP Singh <kpsingh@...nel.org>,
	Stanislav Fomichev <sdf@...ichev.me>,
	Hao Luo <haoluo@...gle.com>,
	Jiri Olsa <jolsa@...nel.org>,
	Shuah Khan <shuah@...nel.org>,
	Leon Hwang <leon.hwang@...ux.dev>,
	linux-kernel@...r.kernel.org,
	linux-kselftest@...r.kernel.org,
	kernel-patches-bot@...com
Subject: [PATCH bpf-next 2/3] bpf: Avoid deadlock using trylock when popping LRU free nodes

Switch the free-node pop paths to raw_spin_trylock*() to avoid blocking
on contended LRU locks.

If the global or per-CPU LRU lock is unavailable, refuse to refill the
local free list and return NULL instead. This allows callers to back
off safely rather than blocking or re-entering the same lock context.

This change avoids lockdep warnings and potential deadlocks caused by
re-entrant LRU lock acquisition from NMI context, as shown below:

[  418.260323] bpf_testmod: oh no, recursing into test_1, recursion_misses 1
[  424.982207] ================================
[  424.982216] WARNING: inconsistent lock state
[  424.982223] inconsistent {INITIAL USE} -> {IN-NMI} usage.
[  424.982314]  *** DEADLOCK ***
[...]

Signed-off-by: Leon Hwang <leon.hwang@...ux.dev>
---
 kernel/bpf/bpf_lru_list.c | 17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)

diff --git a/kernel/bpf/bpf_lru_list.c b/kernel/bpf/bpf_lru_list.c
index c091f3232cc5..03d37f72731a 100644
--- a/kernel/bpf/bpf_lru_list.c
+++ b/kernel/bpf/bpf_lru_list.c
@@ -312,14 +312,15 @@ static void bpf_lru_list_push_free(struct bpf_lru_list *l,
 	raw_spin_unlock_irqrestore(&l->lock, flags);
 }
 
-static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
+static bool bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
 					   struct bpf_lru_locallist *loc_l)
 {
 	struct bpf_lru_list *l = &lru->common_lru.lru_list;
 	struct bpf_lru_node *node, *tmp_node;
 	unsigned int nfree = 0;
 
-	raw_spin_lock(&l->lock);
+	if (!raw_spin_trylock(&l->lock))
+		return false;
 
 	__local_list_flush(l, loc_l);
 
@@ -339,6 +340,8 @@ static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
 				      BPF_LRU_LOCAL_LIST_T_FREE);
 
 	raw_spin_unlock(&l->lock);
+
+	return true;
 }
 
 /*
@@ -418,7 +421,8 @@ static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
 
 	l = per_cpu_ptr(lru->percpu_lru, cpu);
 
-	raw_spin_lock_irqsave(&l->lock, flags);
+	if (!raw_spin_trylock_irqsave(&l->lock, flags))
+		return NULL;
 
 	__bpf_lru_list_rotate(lru, l);
 
@@ -451,13 +455,12 @@ static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru,
 
 	loc_l = per_cpu_ptr(clru->local_list, cpu);
 
-	raw_spin_lock_irqsave(&loc_l->lock, flags);
+	if (!raw_spin_trylock_irqsave(&loc_l->lock, flags))
+		return NULL;
 
 	node = __local_list_pop_free(loc_l);
-	if (!node) {
-		bpf_lru_list_pop_free_to_local(lru, loc_l);
+	if (!node && bpf_lru_list_pop_free_to_local(lru, loc_l))
 		node = __local_list_pop_free(loc_l);
-	}
 
 	if (node)
 		__local_list_add_pending(lru, loc_l, cpu, node, hash);
-- 
2.52.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ