lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20221114134720.1057939-3-xukuohai@huawei.com>
Date:   Mon, 14 Nov 2022 08:47:20 -0500
From:   Xu Kuohai <xukuohai@...wei.com>
To:     <bpf@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
        <linux-kselftest@...r.kernel.org>
CC:     Martin KaFai Lau <martin.lau@...ux.dev>,
        Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>,
        Andrii Nakryiko <andrii@...nel.org>,
        Song Liu <song@...nel.org>, Yonghong Song <yhs@...com>,
        John Fastabend <john.fastabend@...il.com>,
        KP Singh <kpsingh@...nel.org>,
        Stanislav Fomichev <sdf@...gle.com>,
        Hao Luo <haoluo@...gle.com>, Jiri Olsa <jolsa@...nel.org>,
        Kumar Kartikeya Dwivedi <memxor@...il.com>,
        Mykola Lysenko <mykolal@...com>, Shuah Khan <shuah@...nel.org>
Subject: [PATCH bpf 2/2] bpf: Set and check spin lock value in sk_storage_map_test

Update sk_storage_map_test to make sure kernel does not copy user
non-zero value spin lock to kernel, and does not copy kernel spin
lock value to user.

If user spin lock value is copied to kernel, this test case will
make kernel spin on the copied lock, resulting in rcu stall and
softlockup.

Signed-off-by: Xu Kuohai <xukuohai@...wei.com>
---
 .../selftests/bpf/map_tests/sk_storage_map.c  | 36 ++++++++++---------
 1 file changed, 20 insertions(+), 16 deletions(-)

diff --git a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
index 099eb4dfd4f7..18405c3b7cee 100644
--- a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
+++ b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
@@ -458,7 +458,7 @@ static void test_sk_storage_map_basic(void)
 	struct {
 		int cnt;
 		int lock;
-	} value = { .cnt = 0xeB9f, .lock = 0, }, lookup_value;
+	} value = { .cnt = 0xeB9f, .lock = 1, }, lookup_value;
 	struct bpf_map_create_opts bad_xattr;
 	int btf_fd, map_fd, sk_fd, err;
 
@@ -483,38 +483,41 @@ static void test_sk_storage_map_basic(void)
 	      "err:%d errno:%d\n", err, errno);
 	err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
 					BPF_F_LOCK);
-	CHECK(err || lookup_value.cnt != value.cnt,
+	CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
 	      "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
-	      "err:%d errno:%d cnt:%x(%x)\n",
-	      err, errno, lookup_value.cnt, value.cnt);
+	      "err:%d errno:%d lock:%x cnt:%x(%x)\n",
+	      err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
 
 	/* Bump the cnt and update with BPF_EXIST | BPF_F_LOCK */
 	value.cnt += 1;
+	value.lock = 2;
 	err = bpf_map_update_elem(map_fd, &sk_fd, &value,
 				  BPF_EXIST | BPF_F_LOCK);
 	CHECK(err, "bpf_map_update_elem(BPF_EXIST|BPF_F_LOCK)",
 	      "err:%d errno:%d\n", err, errno);
 	err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
 					BPF_F_LOCK);
-	CHECK(err || lookup_value.cnt != value.cnt,
+	CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
 	      "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
-	      "err:%d errno:%d cnt:%x(%x)\n",
-	      err, errno, lookup_value.cnt, value.cnt);
+	      "err:%d errno:%d lock:%x cnt:%x(%x)\n",
+	      err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
 
 	/* Bump the cnt and update with BPF_EXIST */
 	value.cnt += 1;
+	value.lock = 2;
 	err = bpf_map_update_elem(map_fd, &sk_fd, &value, BPF_EXIST);
 	CHECK(err, "bpf_map_update_elem(BPF_EXIST)",
 	      "err:%d errno:%d\n", err, errno);
 	err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
 					BPF_F_LOCK);
-	CHECK(err || lookup_value.cnt != value.cnt,
+	CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
 	      "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
-	      "err:%d errno:%d cnt:%x(%x)\n",
-	      err, errno, lookup_value.cnt, value.cnt);
+	      "err:%d errno:%d lock:%x cnt:%x(%x)\n",
+	      err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
 
 	/* Update with BPF_NOEXIST */
 	value.cnt += 1;
+	value.lock = 2;
 	err = bpf_map_update_elem(map_fd, &sk_fd, &value,
 				  BPF_NOEXIST | BPF_F_LOCK);
 	CHECK(!err || errno != EEXIST,
@@ -526,22 +529,23 @@ static void test_sk_storage_map_basic(void)
 	value.cnt -= 1;
 	err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
 					BPF_F_LOCK);
-	CHECK(err || lookup_value.cnt != value.cnt,
+	CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
 	      "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
-	      "err:%d errno:%d cnt:%x(%x)\n",
-	      err, errno, lookup_value.cnt, value.cnt);
+	      "err:%d errno:%d lock:%x cnt:%x(%x)\n",
+	      err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
 
 	/* Bump the cnt again and update with map_flags == 0 */
 	value.cnt += 1;
+	value.lock = 2;
 	err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
 	CHECK(err, "bpf_map_update_elem()", "err:%d errno:%d\n",
 	      err, errno);
 	err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
 					BPF_F_LOCK);
-	CHECK(err || lookup_value.cnt != value.cnt,
+	CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
 	      "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
-	      "err:%d errno:%d cnt:%x(%x)\n",
-	      err, errno, lookup_value.cnt, value.cnt);
+	      "err:%d errno:%d lock:%x cnt:%x(%x)\n",
+	      err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
 
 	/* Test delete elem */
 	err = bpf_map_delete_elem(map_fd, &sk_fd);
-- 
2.30.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ