lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251031154107.403054-2-kafai.wan@linux.dev>
Date: Fri, 31 Oct 2025 23:41:06 +0800
From: KaFai Wan <kafai.wan@...ux.dev>
To: ast@...nel.org,
	daniel@...earbox.net,
	john.fastabend@...il.com,
	andrii@...nel.org,
	martin.lau@...ux.dev,
	eddyz87@...il.com,
	song@...nel.org,
	yonghong.song@...ux.dev,
	kpsingh@...nel.org,
	sdf@...ichev.me,
	haoluo@...gle.com,
	jolsa@...nel.org,
	shuah@...nel.org,
	paul.chaignon@...il.com,
	m.shachnai@...il.com,
	henriette.herzog@....de,
	kafai.wan@...ux.dev,
	luis.gerhorst@....de,
	harishankar.vishwanathan@...il.com,
	colin.i.king@...il.com,
	bpf@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	linux-kselftest@...r.kernel.org
Cc: Kaiyan Mei <M202472210@...t.edu.cn>,
	Yinhao Hu <dddddd@...t.edu.cn>
Subject: [PATCH bpf-next v3 1/2] bpf: Skip bounds adjustment for conditional jumps on same scalar register

When conditional jumps are performed on the same scalar register
(e.g., r0 <= r0, r0 > r0, r0 < r0), the BPF verifier incorrectly
attempts to adjust the register's min/max bounds. This leads to
invalid range bounds and triggers a BUG warning.

The problematic BPF program:
   0: call bpf_get_prandom_u32
   1: w8 = 0x80000000
   2: r0 &= r8
   3: if r0 > r0 goto <exit>

The instruction 3 triggers kernel warning:
   3: if r0 > r0 goto <exit>
   true_reg1: range bounds violation u64=[0x1, 0x0] s64=[0x1, 0x0] u32=[0x1, 0x0] s32=[0x1, 0x0] var_off=(0x0, 0x0)
   true_reg2: const tnum out of sync with range bounds u64=[0x0, 0xffffffffffffffff] s64=[0x8000000000000000, 0x7fffffffffffffff] var_off=(0x0, 0x0)

Comparing a register with itself should not change its bounds and
for most comparison operations, comparing a register with itself has
a known result (e.g., r0 == r0 is always true, r0 < r0 is always false).

Fix this by:
1. Enhance is_scalar_branch_taken() to properly handle branch direction
   computation for same register comparisons across all BPF jump operations
2. Adds early return in reg_set_min_max() to avoid bounds adjustment
   for unknown branch directions (e.g., BPF_JSET) on the same register

The fix ensures that unnecessary bounds adjustments are skipped, preventing
the verifier bug while maintaining correct branch direction analysis.

Reported-by: Kaiyan Mei <M202472210@...t.edu.cn>
Reported-by: Yinhao Hu <dddddd@...t.edu.cn>
Closes: https://lore.kernel.org/all/1881f0f5.300df.199f2576a01.Coremail.kaiyanm@hust.edu.cn/
Signed-off-by: KaFai Wan <kafai.wan@...ux.dev>
---
 kernel/bpf/verifier.c | 33 +++++++++++++++++++++++++++++++++
 1 file changed, 33 insertions(+)

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 542e23fb19c7..a571263f4ebe 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -15995,6 +15995,8 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
 
 	switch (opcode) {
 	case BPF_JEQ:
+		if (reg1 == reg2)
+			return 1;
 		/* constants, umin/umax and smin/smax checks would be
 		 * redundant in this case because they all should match
 		 */
@@ -16021,6 +16023,8 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
 		}
 		break;
 	case BPF_JNE:
+		if (reg1 == reg2)
+			return 0;
 		/* constants, umin/umax and smin/smax checks would be
 		 * redundant in this case because they all should match
 		 */
@@ -16047,6 +16051,12 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
 		}
 		break;
 	case BPF_JSET:
+		if (reg1 == reg2) {
+			if (tnum_is_const(t1))
+				return t1.value != 0;
+			else
+				return (smin1 <= 0 && smax1 >= 0) ? -1 : 1;
+		}
 		if (!is_reg_const(reg2, is_jmp32)) {
 			swap(reg1, reg2);
 			swap(t1, t2);
@@ -16059,48 +16069,64 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
 			return 0;
 		break;
 	case BPF_JGT:
+		if (reg1 == reg2)
+			return 0;
 		if (umin1 > umax2)
 			return 1;
 		else if (umax1 <= umin2)
 			return 0;
 		break;
 	case BPF_JSGT:
+		if (reg1 == reg2)
+			return 0;
 		if (smin1 > smax2)
 			return 1;
 		else if (smax1 <= smin2)
 			return 0;
 		break;
 	case BPF_JLT:
+		if (reg1 == reg2)
+			return 0;
 		if (umax1 < umin2)
 			return 1;
 		else if (umin1 >= umax2)
 			return 0;
 		break;
 	case BPF_JSLT:
+		if (reg1 == reg2)
+			return 0;
 		if (smax1 < smin2)
 			return 1;
 		else if (smin1 >= smax2)
 			return 0;
 		break;
 	case BPF_JGE:
+		if (reg1 == reg2)
+			return 1;
 		if (umin1 >= umax2)
 			return 1;
 		else if (umax1 < umin2)
 			return 0;
 		break;
 	case BPF_JSGE:
+		if (reg1 == reg2)
+			return 1;
 		if (smin1 >= smax2)
 			return 1;
 		else if (smax1 < smin2)
 			return 0;
 		break;
 	case BPF_JLE:
+		if (reg1 == reg2)
+			return 1;
 		if (umax1 <= umin2)
 			return 1;
 		else if (umin1 > umax2)
 			return 0;
 		break;
 	case BPF_JSLE:
+		if (reg1 == reg2)
+			return 1;
 		if (smax1 <= smin2)
 			return 1;
 		else if (smin1 > smax2)
@@ -16439,6 +16465,13 @@ static int reg_set_min_max(struct bpf_verifier_env *env,
 	if (false_reg1->type != SCALAR_VALUE || false_reg2->type != SCALAR_VALUE)
 		return 0;
 
+	/* We compute branch direction for same SCALAR_VALUE registers in
+	 * is_scalar_branch_taken(). For unknown branch directions (e.g., BPF_JSET)
+	 * on the same registers, we don't need to adjust the min/max values.
+	 */
+	if (false_reg1 == false_reg2)
+		return 0;
+
 	/* fallthrough (FALSE) branch */
 	regs_refine_cond_op(false_reg1, false_reg2, rev_opcode(opcode), is_jmp32);
 	reg_bounds_sync(false_reg1);
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ