[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220220134813.3411982-6-memxor@gmail.com>
Date: Sun, 20 Feb 2022 19:18:03 +0530
From: Kumar Kartikeya Dwivedi <memxor@...il.com>
To: bpf@...r.kernel.org
Cc: Hao Luo <haoluo@...gle.com>, Alexei Starovoitov <ast@...nel.org>,
Andrii Nakryiko <andrii@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Toke Høiland-Jørgensen <toke@...hat.com>,
Jesper Dangaard Brouer <hawk@...nel.org>,
netfilter-devel@...r.kernel.org, netdev@...r.kernel.org
Subject: [PATCH bpf-next v1 05/15] bpf: Allow storing PTR_TO_PERCPU_BTF_ID in map
Make adjustments to the code to allow storing PTR_TO_PERCPU_BTF_ID in a
map. Note that these are not yet supported as referenced pointers, so
that is explicitly disallowed during BTF tag parsing. Similar to 'ref'
tag, a new 'percpu' tag composes with 'btf_id' tag on the pointed to
type to hint that it is a percpu btf_id pointer.
Cc: Hao Luo <haoluo@...gle.com>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@...il.com>
---
include/linux/bpf.h | 3 ++-
kernel/bpf/btf.c | 27 ++++++++++++++++++++++-----
kernel/bpf/verifier.c | 37 ++++++++++++++++++++++++++++---------
3 files changed, 52 insertions(+), 15 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 923b9f36c275..843c8c01cf9d 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -161,7 +161,8 @@ enum {
};
enum {
- BPF_MAP_VALUE_OFF_F_REF = (1U << 0),
+ BPF_MAP_VALUE_OFF_F_REF = (1U << 0),
+ BPF_MAP_VALUE_OFF_F_PERCPU = (1U << 1),
};
struct bpf_map_value_off_desc {
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 20124f4a421c..eb57584ee0a8 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -3146,12 +3146,12 @@ static s32 btf_find_by_name_kind_all(const char *name, u32 kind, struct btf **bt
static int btf_find_field_kptr(const struct btf *btf, const struct btf_type *t,
u32 off, int sz, void *data)
{
- bool btf_id_tag = false, ref_tag = false;
+ bool btf_id_tag = false, ref_tag = false, percpu_tag = false;
struct bpf_map_value_off *tab;
struct bpf_map *map = data;
+ int nr_off, ret, flags = 0;
struct module *mod = NULL;
struct btf *kernel_btf;
- int nr_off, ret;
s32 id;
/* For PTR, sz is always == 8 */
@@ -3174,6 +3174,13 @@ static int btf_find_field_kptr(const struct btf *btf, const struct btf_type *t,
goto end;
}
ref_tag = true;
+ } else if (!strcmp("kernel.bpf.percpu", __btf_name_by_offset(btf, t->name_off))) {
+ /* repeated tag */
+ if (percpu_tag) {
+ ret = -EINVAL;
+ goto end;
+ }
+ percpu_tag = true;
} else if (!strncmp("kernel.", __btf_name_by_offset(btf, t->name_off),
sizeof("kernel.") - 1)) {
/* TODO: Should we reject these when loading BTF? */
@@ -3185,13 +3192,18 @@ static int btf_find_field_kptr(const struct btf *btf, const struct btf_type *t,
t = btf_type_by_id(btf, t->type);
}
if (!btf_id_tag) {
- /* 'ref' tag must be specified together with 'btf_id' tag */
- if (ref_tag) {
+ /* 'ref' or 'percpu' tag must be specified together with 'btf_id' tag */
+ if (ref_tag || percpu_tag) {
ret = -EINVAL;
goto end;
}
return 0;
}
+ /* referenced percpu btf_id pointer is not yet supported */
+ if (ref_tag && percpu_tag) {
+ ret = -EINVAL;
+ goto end;
+ }
/* Get the base type */
if (btf_type_is_modifier(t))
@@ -3241,11 +3253,16 @@ static int btf_find_field_kptr(const struct btf *btf, const struct btf_type *t,
}
}
+ if (ref_tag)
+ flags |= BPF_MAP_VALUE_OFF_F_REF;
+ else if (percpu_tag)
+ flags |= BPF_MAP_VALUE_OFF_F_PERCPU;
+
tab->off[nr_off].offset = off;
tab->off[nr_off].btf_id = id;
tab->off[nr_off].btf = kernel_btf;
tab->off[nr_off].module = mod;
- tab->off[nr_off].flags = ref_tag ? BPF_MAP_VALUE_OFF_F_REF : 0;
+ tab->off[nr_off].flags = flags;
tab->nr_off++;
return 0;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index a9d8c0d3c919..00d6ab49033d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1559,12 +1559,13 @@ static void mark_btf_ld_reg(struct bpf_verifier_env *env,
struct btf *btf, u32 btf_id,
enum bpf_type_flag flag)
{
- if (reg_type == SCALAR_VALUE) {
+ if (reg_type == SCALAR_VALUE ||
+ WARN_ON_ONCE(reg_type != PTR_TO_BTF_ID && reg_type != PTR_TO_PERCPU_BTF_ID)) {
mark_reg_unknown(env, regs, regno);
return;
}
mark_reg_known_zero(env, regs, regno);
- regs[regno].type = PTR_TO_BTF_ID | flag;
+ regs[regno].type = reg_type | flag;
regs[regno].btf = btf;
regs[regno].btf_id = btf_id;
}
@@ -3478,10 +3479,18 @@ static int map_ptr_to_btf_id_match_type(struct bpf_verifier_env *env,
bool ref_ptr)
{
const char *targ_name = kernel_type_name(off_desc->btf, off_desc->btf_id);
+ enum bpf_reg_type reg_type;
const char *reg_name = "";
- if (reg->type != PTR_TO_BTF_ID && reg->type != PTR_TO_BTF_ID_OR_NULL)
- goto end;
+ if (off_desc->flags & BPF_MAP_VALUE_OFF_F_PERCPU) {
+ if (reg->type != PTR_TO_PERCPU_BTF_ID &&
+ reg->type != (PTR_TO_PERCPU_BTF_ID | PTR_MAYBE_NULL))
+ goto end;
+ } else { /* referenced and unreferenced case */
+ if (reg->type != PTR_TO_BTF_ID &&
+ reg->type != (PTR_TO_BTF_ID | PTR_MAYBE_NULL))
+ goto end;
+ }
if (!btf_is_kernel(reg->btf)) {
verbose(env, "R%d must point to kernel BTF\n", regno);
@@ -3524,11 +3533,16 @@ static int map_ptr_to_btf_id_match_type(struct bpf_verifier_env *env,
if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
off_desc->btf, off_desc->btf_id))
goto end;
+
return 0;
end:
+ if (off_desc->flags & BPF_MAP_VALUE_OFF_F_PERCPU)
+ reg_type = PTR_TO_PERCPU_BTF_ID | PTR_MAYBE_NULL;
+ else
+ reg_type = PTR_TO_BTF_ID | PTR_MAYBE_NULL;
verbose(env, "invalid btf_id pointer access, R%d type=%s%s ", regno,
reg_type_str(env, reg->type), reg_name);
- verbose(env, "expected=%s%s\n", reg_type_str(env, PTR_TO_BTF_ID), targ_name);
+ verbose(env, "expected=%s%s\n", reg_type_str(env, reg_type), targ_name);
return -EINVAL;
}
@@ -3543,10 +3557,11 @@ static int check_map_ptr_to_btf_id(struct bpf_verifier_env *env, u32 regno, int
{
struct bpf_reg_state *reg = reg_state(env, regno), *val_reg;
struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
+ enum bpf_reg_type reg_type = PTR_TO_BTF_ID;
+ bool ref_ptr = false, percpu_ptr = false;
struct bpf_map_value_off_desc *off_desc;
int insn_class = BPF_CLASS(insn->code);
struct bpf_map *map = reg->map_ptr;
- bool ref_ptr = false;
u32 ref_obj_id = 0;
int ret;
@@ -3561,7 +3576,6 @@ static int check_map_ptr_to_btf_id(struct bpf_verifier_env *env, u32 regno, int
off_desc = bpf_map_ptr_off_contains(map, off + reg->var_off.value);
if (!off_desc)
return 0;
- ref_ptr = off_desc->flags & BPF_MAP_VALUE_OFF_F_REF;
if (WARN_ON_ONCE(size != bpf_size_to_bytes(BPF_DW)))
return -EACCES;
@@ -3574,6 +3588,11 @@ static int check_map_ptr_to_btf_id(struct bpf_verifier_env *env, u32 regno, int
return -EPERM;
}
+ ref_ptr = off_desc->flags & BPF_MAP_VALUE_OFF_F_REF;
+ percpu_ptr = off_desc->flags & BPF_MAP_VALUE_OFF_F_PERCPU;
+ if (percpu_ptr)
+ reg_type = PTR_TO_PERCPU_BTF_ID;
+
if (is_xchg_insn(insn)) {
/* We do checks and updates during register fill call for fetch case */
if (t != BPF_READ || value_regno < 0)
@@ -3603,7 +3622,7 @@ static int check_map_ptr_to_btf_id(struct bpf_verifier_env *env, u32 regno, int
ref_obj_id = ret;
}
/* val_reg might be NULL at this point */
- mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, off_desc->btf,
+ mark_btf_ld_reg(env, cur_regs(env), value_regno, reg_type, off_desc->btf,
off_desc->btf_id, PTR_MAYBE_NULL);
/* __mark_ptr_or_null_regs needs ref_obj_id == id to clear
* reference state for ptr == NULL branch.
@@ -3621,7 +3640,7 @@ static int check_map_ptr_to_btf_id(struct bpf_verifier_env *env, u32 regno, int
/* We can simply mark the value_regno receiving the pointer
* value from map as PTR_TO_BTF_ID, with the correct type.
*/
- mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, off_desc->btf,
+ mark_btf_ld_reg(env, cur_regs(env), value_regno, reg_type, off_desc->btf,
off_desc->btf_id, PTR_MAYBE_NULL);
val_reg->id = ++env->id_gen;
} else if (insn_class == BPF_STX) {
--
2.35.1
Powered by blists - more mailing lists