lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2050196010b1bf1efa357cfddebd15a152582bb4.1737433945.git.dxu@dxuuu.xyz>
Date: Mon, 20 Jan 2025 21:35:11 -0700
From: Daniel Xu <dxu@...uu.xyz>
To: pabeni@...hat.com,
	kuba@...nel.org,
	hawk@...nel.org,
	maciej.fijalkowski@...el.com,
	ast@...nel.org,
	edumazet@...gle.com,
	daniel@...earbox.net,
	davem@...emloft.net,
	bjorn@...nel.org,
	john.fastabend@...il.com,
	magnus.karlsson@...el.com,
	andrii@...nel.org
Cc: martin.lau@...ux.dev,
	eddyz87@...il.com,
	song@...nel.org,
	yonghong.song@...ux.dev,
	kpsingh@...nel.org,
	sdf@...ichev.me,
	haoluo@...gle.com,
	jolsa@...nel.org,
	jonathan.lemon@...il.com,
	horms@...nel.org,
	bpf@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	netdev@...r.kernel.org
Subject: [PATCH bpf-next 2/3] bpf: map: Thread null elision metadata to map_gen_lookup

Add an extra parameter to map_gen_lookup callback so that if the lookup
is known to be inbounds, the bounds check can be omitted.

The next commit will take advantage of this new information.

Signed-off-by: Daniel Xu <dxu@...uu.xyz>
---
 include/linux/bpf.h   |  2 +-
 kernel/bpf/arraymap.c | 11 ++++++++---
 kernel/bpf/hashtab.c  | 14 ++++++++++----
 kernel/bpf/verifier.c |  2 +-
 net/xdp/xskmap.c      |  4 +++-
 5 files changed, 23 insertions(+), 10 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index feda0ce90f5a..da8b420095c9 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -117,7 +117,7 @@ struct bpf_map_ops {
 	 * may manipulate it, exists.
 	 */
 	void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer);
-	int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
+	int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf, bool inbounds);
 	u32 (*map_fd_sys_lookup_elem)(void *ptr);
 	void (*map_seq_show_elem)(struct bpf_map *map, void *key,
 				  struct seq_file *m);
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index eb28c0f219ee..8dbdceeead95 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -205,7 +205,9 @@ static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
 }
 
 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
-static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
+static int array_map_gen_lookup(struct bpf_map *map,
+				struct bpf_insn *insn_buf,
+				bool inbounds)
 {
 	struct bpf_array *array = container_of(map, struct bpf_array, map);
 	struct bpf_insn *insn = insn_buf;
@@ -250,7 +252,9 @@ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
 }
 
 /* emit BPF instructions equivalent to C code of percpu_array_map_lookup_elem() */
-static int percpu_array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
+static int percpu_array_map_gen_lookup(struct bpf_map *map,
+				       struct bpf_insn *insn_buf,
+				       bool inbounds)
 {
 	struct bpf_array *array = container_of(map, struct bpf_array, map);
 	struct bpf_insn *insn = insn_buf;
@@ -1392,7 +1396,8 @@ static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
 }
 
 static int array_of_map_gen_lookup(struct bpf_map *map,
-				   struct bpf_insn *insn_buf)
+				   struct bpf_insn *insn_buf,
+				   bool inbounds)
 {
 	struct bpf_array *array = container_of(map, struct bpf_array, map);
 	u32 elem_size = array->elem_size;
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 4a9eeb7aef85..103cdab85977 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -720,7 +720,9 @@ static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
  * bpf_prog
  *   __htab_map_lookup_elem
  */
-static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
+static int htab_map_gen_lookup(struct bpf_map *map,
+			       struct bpf_insn *insn_buf,
+			       bool inbounds)
 {
 	struct bpf_insn *insn = insn_buf;
 	const int ret = BPF_REG_0;
@@ -760,7 +762,8 @@ static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
 }
 
 static int htab_lru_map_gen_lookup(struct bpf_map *map,
-				   struct bpf_insn *insn_buf)
+				   struct bpf_insn *insn_buf,
+				   bool inbounds)
 {
 	struct bpf_insn *insn = insn_buf;
 	const int ret = BPF_REG_0;
@@ -2342,7 +2345,9 @@ static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
 }
 
 /* inline bpf_map_lookup_elem() call for per-CPU hashmap */
-static int htab_percpu_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
+static int htab_percpu_map_gen_lookup(struct bpf_map *map,
+				      struct bpf_insn *insn_buf,
+				      bool inbounds)
 {
 	struct bpf_insn *insn = insn_buf;
 
@@ -2626,7 +2631,8 @@ static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
 }
 
 static int htab_of_map_gen_lookup(struct bpf_map *map,
-				  struct bpf_insn *insn_buf)
+				  struct bpf_insn *insn_buf,
+				  bool inbounds)
 {
 	struct bpf_insn *insn = insn_buf;
 	const int ret = BPF_REG_0;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index e83145c2260d..2ed2fd3c42f2 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -21582,7 +21582,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
 			ops = map_ptr->ops;
 			if (insn->imm == BPF_FUNC_map_lookup_elem &&
 			    ops->map_gen_lookup) {
-				cnt = ops->map_gen_lookup(map_ptr, insn_buf);
+				cnt = ops->map_gen_lookup(map_ptr, insn_buf, aux->map_ptr_state.inbounds);
 				if (cnt == -EOPNOTSUPP)
 					goto patch_map_ops_generic;
 				if (cnt <= 0 || cnt >= INSN_BUF_SIZE) {
diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c
index afa457506274..78579583b0a1 100644
--- a/net/xdp/xskmap.c
+++ b/net/xdp/xskmap.c
@@ -118,7 +118,9 @@ static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 	return 0;
 }
 
-static int xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
+static int xsk_map_gen_lookup(struct bpf_map *map,
+			      struct bpf_insn *insn_buf,
+			      bool inbounds)
 {
 	const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
 	struct bpf_insn *insn = insn_buf;
-- 
2.47.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ