[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200517195727.279322-4-andriin@fb.com>
Date: Sun, 17 May 2020 12:57:23 -0700
From: Andrii Nakryiko <andriin@...com>
To: <bpf@...r.kernel.org>, <netdev@...r.kernel.org>, <ast@...com>,
<daniel@...earbox.net>
CC: <andrii.nakryiko@...il.com>, <kernel-team@...com>,
Andrii Nakryiko <andriin@...com>,
"Paul E . McKenney" <paulmck@...nel.org>,
Jonathan Lemon <jonathan.lemon@...il.com>
Subject: [PATCH v2 bpf-next 3/7] bpf: track reference type in verifier
This patch extends verifier's reference tracking logic with tracking
a reference type and ensuring acquire()/release() functions accept only the
right types of references. Currently, ambiguity between socket and ringbuf
record references is resolved through use of different types of input
arguments to bpf_sk_release() and bpf_ringbuf_commit(): ARG_PTR_TO_SOCK_COMMON
and ARG_PTR_TO_ALLOC_MEM, respectively. It is thus impossible to pass ringbuf
record pointer to bpf_sk_release() (and vice versa for socket).
On the other hand, patch #1 added ARG_PTR_TO_ALLOC_MEM arg type, which, from
the point of view of verifier, is a pointer to a fixed-sized allocated memory
region. This is generic enough concept that could be used for other BPF
helpers (e.g., malloc/free pair, if added). once we have that, there will be
nothing to prevent passing ringbuf record to bpf_mem_free() (or whatever)
helper. To that end, this patch adds a capability to specify and track
reference types, that would be validated by verifier to ensure correct match
between acquire() and release() helpers.
This patch can be postponed for later, so is posted separate from other
verifier changes.
Signed-off-by: Andrii Nakryiko <andriin@...com>
---
include/linux/bpf_verifier.h | 8 +++
kernel/bpf/verifier.c | 96 +++++++++++++++++++++++++++++-------
2 files changed, 86 insertions(+), 18 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index ca08db4ffb5f..26b2c4730f5a 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -164,6 +164,12 @@ struct bpf_stack_state {
u8 slot_type[BPF_REG_SIZE];
};
+enum bpf_ref_type {
+ BPF_REF_INVALID,
+ BPF_REF_SOCKET,
+ BPF_REF_RINGBUF,
+};
+
struct bpf_reference_state {
/* Track each reference created with a unique id, even if the same
* instruction creates the reference multiple times (eg, via CALL).
@@ -173,6 +179,8 @@ struct bpf_reference_state {
* is used purely to inform the user of a reference leak.
*/
int insn_idx;
+ /* Type of reference being tracked */
+ enum bpf_ref_type ref_type;
};
/* state of the program:
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 4ce3e031554d..2a6a48c1e10b 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -436,6 +436,19 @@ static bool is_release_function(enum bpf_func_id func_id)
func_id == BPF_FUNC_ringbuf_discard;
}
+static enum bpf_ref_type get_release_ref_type(enum bpf_func_id func_id)
+{
+ switch (func_id) {
+ case BPF_FUNC_sk_release:
+ return BPF_REF_SOCKET;
+ case BPF_FUNC_ringbuf_submit:
+ case BPF_FUNC_ringbuf_discard:
+ return BPF_REF_RINGBUF;
+ default:
+ return BPF_REF_INVALID;
+ }
+}
+
static bool may_be_acquire_function(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_sk_lookup_tcp ||
@@ -464,6 +477,28 @@ static bool is_acquire_function(enum bpf_func_id func_id,
return false;
}
+static enum bpf_ref_type get_acquire_ref_type(enum bpf_func_id func_id,
+ const struct bpf_map *map)
+{
+ enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
+
+ switch (func_id) {
+ case BPF_FUNC_sk_lookup_tcp:
+ case BPF_FUNC_sk_lookup_udp:
+ case BPF_FUNC_skc_lookup_tcp:
+ return BPF_REF_SOCKET;
+ case BPF_FUNC_map_lookup_elem:
+ if (map_type == BPF_MAP_TYPE_SOCKMAP ||
+ map_type == BPF_MAP_TYPE_SOCKHASH)
+ return BPF_REF_SOCKET;
+ return BPF_REF_INVALID;
+ case BPF_FUNC_ringbuf_reserve:
+ return BPF_REF_RINGBUF;
+ default:
+ return BPF_REF_INVALID;
+ }
+}
+
static bool is_ptr_cast_function(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_tcp_sock ||
@@ -736,7 +771,8 @@ static int realloc_func_state(struct bpf_func_state *state, int stack_size,
* On success, returns a valid pointer id to associate with the register
* On failure, returns a negative errno.
*/
-static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
+static int acquire_reference_state(struct bpf_verifier_env *env,
+ int insn_idx, enum bpf_ref_type ref_type)
{
struct bpf_func_state *state = cur_func(env);
int new_ofs = state->acquired_refs;
@@ -748,25 +784,32 @@ static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
id = ++env->id_gen;
state->refs[new_ofs].id = id;
state->refs[new_ofs].insn_idx = insn_idx;
+ state->refs[new_ofs].ref_type = ref_type;
return id;
}
/* release function corresponding to acquire_reference_state(). Idempotent. */
-static int release_reference_state(struct bpf_func_state *state, int ptr_id)
+static int release_reference_state(struct bpf_func_state *state, int ptr_id,
+ enum bpf_ref_type ref_type)
{
+ struct bpf_reference_state *ref;
int i, last_idx;
last_idx = state->acquired_refs - 1;
for (i = 0; i < state->acquired_refs; i++) {
- if (state->refs[i].id == ptr_id) {
- if (last_idx && i != last_idx)
- memcpy(&state->refs[i], &state->refs[last_idx],
- sizeof(*state->refs));
- memset(&state->refs[last_idx], 0, sizeof(*state->refs));
- state->acquired_refs--;
- return 0;
- }
+ ref = &state->refs[i];
+ if (ref->id != ptr_id)
+ continue;
+
+ if (ref_type != BPF_REF_INVALID && ref->ref_type != ref_type)
+ return -EINVAL;
+
+ if (i != last_idx)
+ memcpy(ref, &state->refs[last_idx], sizeof(*ref));
+ memset(&state->refs[last_idx], 0, sizeof(*ref));
+ state->acquired_refs--;
+ return 0;
}
return -EINVAL;
}
@@ -4296,14 +4339,13 @@ static void release_reg_references(struct bpf_verifier_env *env,
/* The pointer with the specified id has released its reference to kernel
* resources. Identify all copies of the same pointer and clear the reference.
*/
-static int release_reference(struct bpf_verifier_env *env,
- int ref_obj_id)
+static int release_reference(struct bpf_verifier_env *env, int ref_obj_id,
+ enum bpf_ref_type ref_type)
{
struct bpf_verifier_state *vstate = env->cur_state;
- int err;
- int i;
+ int err, i;
- err = release_reference_state(cur_func(env), ref_obj_id);
+ err = release_reference_state(cur_func(env), ref_obj_id, ref_type);
if (err)
return err;
@@ -4664,7 +4706,16 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
return err;
}
} else if (is_release_function(func_id)) {
- err = release_reference(env, meta.ref_obj_id);
+ enum bpf_ref_type ref_type;
+
+ ref_type = get_release_ref_type(func_id);
+ if (ref_type == BPF_REF_INVALID) {
+ verbose(env, "unrecognized reference accepted by func %s#%d\n",
+ func_id_name(func_id), func_id);
+ return -EFAULT;
+ }
+
+ err = release_reference(env, meta.ref_obj_id, ref_type);
if (err) {
verbose(env, "func %s#%d reference has not been acquired before\n",
func_id_name(func_id), func_id);
@@ -4747,8 +4798,17 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
/* For release_reference() */
regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
} else if (is_acquire_function(func_id, meta.map_ptr)) {
- int id = acquire_reference_state(env, insn_idx);
+ enum bpf_ref_type ref_type;
+ int id;
+
+ ref_type = get_acquire_ref_type(func_id, meta.map_ptr);
+ if (ref_type == BPF_REF_INVALID) {
+ verbose(env, "unrecognized reference returned by func %s#%d\n",
+ func_id_name(func_id), func_id);
+ return -EINVAL;
+ }
+ id = acquire_reference_state(env, insn_idx, ref_type);
if (id < 0)
return id;
/* For mark_ptr_or_null_reg() */
@@ -6731,7 +6791,7 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
* No one could have freed the reference state before
* doing the NULL check.
*/
- WARN_ON_ONCE(release_reference_state(state, id));
+ WARN_ON_ONCE(release_reference_state(state, id, BPF_REF_INVALID));
for (i = 0; i <= vstate->curframe; i++)
__mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
--
2.24.1
Powered by blists - more mailing lists