[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240730051625.14349-17-viro@kernel.org>
Date: Tue, 30 Jul 2024 01:16:03 -0400
From: viro@...nel.org
To: linux-fsdevel@...r.kernel.org
Cc: amir73il@...il.com,
bpf@...r.kernel.org,
brauner@...nel.org,
cgroups@...r.kernel.org,
kvm@...r.kernel.org,
netdev@...r.kernel.org,
torvalds@...ux-foundation.org
Subject: [PATCH 17/39] bpf: resolve_pseudo_ldimm64(): take handling of a single ldimm64 insn into helper
From: Al Viro <viro@...iv.linux.org.uk>
Equivalent transformation. For one thing, it's easier to follow that way.
For another, that simplifies the control flow in the vicinity of struct fd
handling in there, which will allow a switch to CLASS(fd) and make the
thing much easier to verify wrt leaks.
Signed-off-by: Al Viro <viro@...iv.linux.org.uk>
---
kernel/bpf/verifier.c | 342 +++++++++++++++++++++---------------------
1 file changed, 172 insertions(+), 170 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 4cb5441ad75f..d54f61e12062 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -18414,11 +18414,180 @@ static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
*
* NOTE: btf_vmlinux is required for converting pseudo btf_id.
*/
+static int do_one_ldimm64(struct bpf_verifier_env *env,
+ struct bpf_insn *insn,
+ struct bpf_insn_aux_data *aux)
+{
+ struct bpf_map *map;
+ struct fd f;
+ u64 addr;
+ u32 fd;
+ int err;
+
+ if (insn[0].src_reg == 0)
+ /* valid generic load 64-bit imm */
+ return 0;
+
+ if (insn[0].src_reg == BPF_PSEUDO_BTF_ID)
+ return check_pseudo_btf_id(env, insn, aux);
+
+ if (insn[0].src_reg == BPF_PSEUDO_FUNC) {
+ aux->ptr_type = PTR_TO_FUNC;
+ return 0;
+ }
+
+ /* In final convert_pseudo_ld_imm64() step, this is
+ * converted into regular 64-bit imm load insn.
+ */
+ switch (insn[0].src_reg) {
+ case BPF_PSEUDO_MAP_VALUE:
+ case BPF_PSEUDO_MAP_IDX_VALUE:
+ break;
+ case BPF_PSEUDO_MAP_FD:
+ case BPF_PSEUDO_MAP_IDX:
+ if (insn[1].imm == 0)
+ break;
+ fallthrough;
+ default:
+ verbose(env, "unrecognized bpf_ld_imm64 insn\n");
+ return -EINVAL;
+ }
+
+ switch (insn[0].src_reg) {
+ case BPF_PSEUDO_MAP_IDX_VALUE:
+ case BPF_PSEUDO_MAP_IDX:
+ if (bpfptr_is_null(env->fd_array)) {
+ verbose(env, "fd_idx without fd_array is invalid\n");
+ return -EPROTO;
+ }
+ if (copy_from_bpfptr_offset(&fd, env->fd_array,
+ insn[0].imm * sizeof(fd),
+ sizeof(fd)))
+ return -EFAULT;
+ break;
+ default:
+ fd = insn[0].imm;
+ break;
+ }
+
+ f = fdget(fd);
+ map = __bpf_map_get(f);
+ if (IS_ERR(map)) {
+ verbose(env, "fd %d is not pointing to valid bpf_map\n", fd);
+ return PTR_ERR(map);
+ }
+
+ err = check_map_prog_compatibility(env, map, env->prog);
+ if (err) {
+ fdput(f);
+ return err;
+ }
+
+ if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
+ insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
+ addr = (unsigned long)map;
+ } else {
+ u32 off = insn[1].imm;
+
+ if (off >= BPF_MAX_VAR_OFF) {
+ verbose(env, "direct value offset of %u is not allowed\n", off);
+ fdput(f);
+ return -EINVAL;
+ }
+
+ if (!map->ops->map_direct_value_addr) {
+ verbose(env, "no direct value access support for this map type\n");
+ fdput(f);
+ return -EINVAL;
+ }
+
+ err = map->ops->map_direct_value_addr(map, &addr, off);
+ if (err) {
+ verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
+ map->value_size, off);
+ fdput(f);
+ return err;
+ }
+
+ aux->map_off = off;
+ addr += off;
+ }
+
+ insn[0].imm = (u32)addr;
+ insn[1].imm = addr >> 32;
+
+ /* check whether we recorded this map already */
+ for (int i = 0; i < env->used_map_cnt; i++) {
+ if (env->used_maps[i] == map) {
+ aux->map_index = i;
+ fdput(f);
+ return 0;
+ }
+ }
+
+ if (env->used_map_cnt >= MAX_USED_MAPS) {
+ verbose(env, "The total number of maps per program has reached the limit of %u\n",
+ MAX_USED_MAPS);
+ fdput(f);
+ return -E2BIG;
+ }
+
+ if (env->prog->sleepable)
+ atomic64_inc(&map->sleepable_refcnt);
+ /* hold the map. If the program is rejected by verifier,
+ * the map will be released by release_maps() or it
+ * will be used by the valid program until it's unloaded
+ * and all maps are released in bpf_free_used_maps()
+ */
+ bpf_map_inc(map);
+
+ aux->map_index = env->used_map_cnt;
+ env->used_maps[env->used_map_cnt++] = map;
+
+ if (bpf_map_is_cgroup_storage(map) &&
+ bpf_cgroup_storage_assign(env->prog->aux, map)) {
+ verbose(env, "only one cgroup storage of each type is allowed\n");
+ fdput(f);
+ return -EBUSY;
+ }
+ if (map->map_type == BPF_MAP_TYPE_ARENA) {
+ if (env->prog->aux->arena) {
+ verbose(env, "Only one arena per program\n");
+ fdput(f);
+ return -EBUSY;
+ }
+ if (!env->allow_ptr_leaks || !env->bpf_capable) {
+ verbose(env, "CAP_BPF and CAP_PERFMON are required to use arena\n");
+ fdput(f);
+ return -EPERM;
+ }
+ if (!env->prog->jit_requested) {
+ verbose(env, "JIT is required to use arena\n");
+ fdput(f);
+ return -EOPNOTSUPP;
+ }
+ if (!bpf_jit_supports_arena()) {
+ verbose(env, "JIT doesn't support arena\n");
+ fdput(f);
+ return -EOPNOTSUPP;
+ }
+ env->prog->aux->arena = (void *)map;
+ if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) {
+ verbose(env, "arena's user address must be set via map_extra or mmap()\n");
+ fdput(f);
+ return -EINVAL;
+ }
+ }
+
+ fdput(f);
+ return 0;
+}
+
static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
- int i, j, err;
+ int i, err;
err = bpf_prog_calc_tag(env->prog);
if (err)
@@ -18433,12 +18602,6 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
}
if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
- struct bpf_insn_aux_data *aux;
- struct bpf_map *map;
- struct fd f;
- u64 addr;
- u32 fd;
-
if (i == insn_cnt - 1 || insn[1].code != 0 ||
insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
insn[1].off != 0) {
@@ -18446,170 +18609,9 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
return -EINVAL;
}
- if (insn[0].src_reg == 0)
- /* valid generic load 64-bit imm */
- goto next_insn;
-
- if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) {
- aux = &env->insn_aux_data[i];
- err = check_pseudo_btf_id(env, insn, aux);
- if (err)
- return err;
- goto next_insn;
- }
-
- if (insn[0].src_reg == BPF_PSEUDO_FUNC) {
- aux = &env->insn_aux_data[i];
- aux->ptr_type = PTR_TO_FUNC;
- goto next_insn;
- }
-
- /* In final convert_pseudo_ld_imm64() step, this is
- * converted into regular 64-bit imm load insn.
- */
- switch (insn[0].src_reg) {
- case BPF_PSEUDO_MAP_VALUE:
- case BPF_PSEUDO_MAP_IDX_VALUE:
- break;
- case BPF_PSEUDO_MAP_FD:
- case BPF_PSEUDO_MAP_IDX:
- if (insn[1].imm == 0)
- break;
- fallthrough;
- default:
- verbose(env, "unrecognized bpf_ld_imm64 insn\n");
- return -EINVAL;
- }
-
- switch (insn[0].src_reg) {
- case BPF_PSEUDO_MAP_IDX_VALUE:
- case BPF_PSEUDO_MAP_IDX:
- if (bpfptr_is_null(env->fd_array)) {
- verbose(env, "fd_idx without fd_array is invalid\n");
- return -EPROTO;
- }
- if (copy_from_bpfptr_offset(&fd, env->fd_array,
- insn[0].imm * sizeof(fd),
- sizeof(fd)))
- return -EFAULT;
- break;
- default:
- fd = insn[0].imm;
- break;
- }
-
- f = fdget(fd);
- map = __bpf_map_get(f);
- if (IS_ERR(map)) {
- verbose(env, "fd %d is not pointing to valid bpf_map\n", fd);
- return PTR_ERR(map);
- }
-
- err = check_map_prog_compatibility(env, map, env->prog);
- if (err) {
- fdput(f);
+ err = do_one_ldimm64(env, insn, &env->insn_aux_data[i]);
+ if (err)
return err;
- }
-
- aux = &env->insn_aux_data[i];
- if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
- insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
- addr = (unsigned long)map;
- } else {
- u32 off = insn[1].imm;
-
- if (off >= BPF_MAX_VAR_OFF) {
- verbose(env, "direct value offset of %u is not allowed\n", off);
- fdput(f);
- return -EINVAL;
- }
-
- if (!map->ops->map_direct_value_addr) {
- verbose(env, "no direct value access support for this map type\n");
- fdput(f);
- return -EINVAL;
- }
-
- err = map->ops->map_direct_value_addr(map, &addr, off);
- if (err) {
- verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
- map->value_size, off);
- fdput(f);
- return err;
- }
-
- aux->map_off = off;
- addr += off;
- }
-
- insn[0].imm = (u32)addr;
- insn[1].imm = addr >> 32;
-
- /* check whether we recorded this map already */
- for (j = 0; j < env->used_map_cnt; j++) {
- if (env->used_maps[j] == map) {
- aux->map_index = j;
- fdput(f);
- goto next_insn;
- }
- }
-
- if (env->used_map_cnt >= MAX_USED_MAPS) {
- verbose(env, "The total number of maps per program has reached the limit of %u\n",
- MAX_USED_MAPS);
- fdput(f);
- return -E2BIG;
- }
-
- if (env->prog->sleepable)
- atomic64_inc(&map->sleepable_refcnt);
- /* hold the map. If the program is rejected by verifier,
- * the map will be released by release_maps() or it
- * will be used by the valid program until it's unloaded
- * and all maps are released in bpf_free_used_maps()
- */
- bpf_map_inc(map);
-
- aux->map_index = env->used_map_cnt;
- env->used_maps[env->used_map_cnt++] = map;
-
- if (bpf_map_is_cgroup_storage(map) &&
- bpf_cgroup_storage_assign(env->prog->aux, map)) {
- verbose(env, "only one cgroup storage of each type is allowed\n");
- fdput(f);
- return -EBUSY;
- }
- if (map->map_type == BPF_MAP_TYPE_ARENA) {
- if (env->prog->aux->arena) {
- verbose(env, "Only one arena per program\n");
- fdput(f);
- return -EBUSY;
- }
- if (!env->allow_ptr_leaks || !env->bpf_capable) {
- verbose(env, "CAP_BPF and CAP_PERFMON are required to use arena\n");
- fdput(f);
- return -EPERM;
- }
- if (!env->prog->jit_requested) {
- verbose(env, "JIT is required to use arena\n");
- fdput(f);
- return -EOPNOTSUPP;
- }
- if (!bpf_jit_supports_arena()) {
- verbose(env, "JIT doesn't support arena\n");
- fdput(f);
- return -EOPNOTSUPP;
- }
- env->prog->aux->arena = (void *)map;
- if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) {
- verbose(env, "arena's user address must be set via map_extra or mmap()\n");
- fdput(f);
- return -EINVAL;
- }
- }
-
- fdput(f);
-next_insn:
insn++;
i++;
continue;
--
2.39.2
Powered by blists - more mailing lists