[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0d936412-a7fd-e861-f16b-91d970fd8896@solarflare.com>
Date: Fri, 6 Apr 2018 18:14:46 +0100
From: Edward Cree <ecree@...arflare.com>
To: Alexei Starovoitov <alexei.starovoitov@...il.com>,
Daniel Borkmann <daniel@...earbox.net>
CC: <netdev@...r.kernel.org>
Subject: [RFC PATCH v3 bpf-next 4/5] bpf/verifier: use call graph to
efficiently check max stack depth
Instead of recursively walking every possible call chain,
check_max_stack_depth() now uses an explicit call graph (recorded in
subprog_info.callees) which it topologically sorts, allowing it to find
for each subprog the maximum stack depth of all call chains, and then
use that depth in calculating the stack depth it implies for the
subprog's callees. This is O(number of subprogs).
The call graph is populated as part of the check_subprogs() pass.
Signed-off-by: Edward Cree <ecree@...arflare.com>
---
include/linux/bpf_verifier.h | 3 +
kernel/bpf/verifier.c | 168 +++++++++++++++++++++++++------------------
2 files changed, 101 insertions(+), 70 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 17990dd56e65..32f27cbe721b 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -175,8 +175,11 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
#define BPF_MAX_SUBPROGS 256
struct bpf_subprog_info {
+ /* which other subprogs does this one directly call? */
+ DECLARE_BITMAP(callees, BPF_MAX_SUBPROGS);
u32 start; /* insn idx of function entry point */
u16 stack_depth; /* max. stack depth used by this function */
+ u16 total_stack_depth; /* max. stack depth used by entire call chain */
};
/* single container for all structs
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 587eb445bfa2..45f530e4a65e 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -784,6 +784,7 @@ static int add_subprog(struct bpf_verifier_env *env, int off)
info = &env->subprog_info[ret];
info->start = off;
info->stack_depth = 0;
+ memset(info->callees, 0, sizeof(info->callees));
env->insn_aux_data[off].subprogno = ret;
}
return ret;
@@ -793,13 +794,13 @@ static int check_subprogs(struct bpf_verifier_env *env)
{
struct bpf_insn_aux_data *aux = env->insn_aux_data;
struct bpf_insn *insns = env->prog->insnsi;
- int insn_cnt = env->prog->len, i, err;
+ int insn_cnt = env->prog->len, i, subprog;
int cur_subprog;
/* Find subprog starts */
- err = add_subprog(env, 0); /* subprog 0, main(), starts at insn 0 */
- if (err < 0)
- return err;
+ subprog = add_subprog(env, 0); /* subprog 0, main(), starts at insn 0 */
+ if (subprog < 0)
+ return subprog;
for (i = 0; i < insn_cnt; i++)
if (insns[i].code == (BPF_JMP | BPF_CALL) &&
insns[i].src_reg == BPF_PSEUDO_CALL) {
@@ -814,9 +815,9 @@ static int check_subprogs(struct bpf_verifier_env *env)
/* add_subprog marks the callee entry point with the
* new subprogno.
*/
- err = add_subprog(env, i + insns[i].imm + 1);
- if (err < 0)
- return err;
+ subprog = add_subprog(env, i + insns[i].imm + 1);
+ if (subprog < 0)
+ return subprog;
}
if (env->log.level > 1)
@@ -842,6 +843,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
u8 opcode = BPF_OP(insns[i].code);
int target;
+ cur_subprog = aux[i].subprogno;
/* Determine where control flow from this insn goes */
if (BPF_CLASS(insns[i].code) != BPF_JMP) {
fallthrough = true;
@@ -855,9 +857,18 @@ static int check_subprogs(struct bpf_verifier_env *env)
*/
continue;
case BPF_CALL:
- /* If pseudo-call, already handled marking the
- * callee. Both kinds of call will eventually
- * return and pass to the following insn.
+ if (insns[i].src_reg == BPF_PSEUDO_CALL) {
+ /* record edge in call graph */
+ subprog = find_subprog(env, i + insns[i].imm + 1);
+ if (subprog < 0)
+ return subprog;
+ __set_bit(subprog, env->subprog_info[cur_subprog].callees);
+ /* already handled marking the callee
+ * back in add_subprog, so jump is false
+ */
+ }
+ /* Call will eventually return and pass control
+ * to the following insn.
*/
fallthrough = true;
break;
@@ -876,7 +887,6 @@ static int check_subprogs(struct bpf_verifier_env *env)
}
/* Check that that control flow doesn't leave the subprog */
- cur_subprog = aux[i].subprogno;
if (fallthrough) {
if (i + 1 >= insn_cnt) {
verbose(env, "no exit/jump at end of program (insn %d)\n",
@@ -1533,78 +1543,96 @@ static int update_stack_depth(struct bpf_verifier_env *env,
const struct bpf_func_state *func,
int off)
{
- u16 stack = env->subprog_info[func->subprogno].stack_depth;
+ struct bpf_subprog_info *info;
- if (stack >= -off)
- return 0;
+ if (!func)
+ return -EFAULT;
+ if (func->subprogno >= BPF_MAX_SUBPROGS)
+ return -E2BIG;
+ info = &env->subprog_info[func->subprogno];
/* update known max for given subprogram */
- env->subprog_info[func->subprogno].stack_depth = -off;
+ info->stack_depth = max_t(u16, info->stack_depth, -off);
return 0;
}
-/* starting from main bpf function walk all instructions of the function
- * and recursively walk all callees that given function can call.
- * Ignore jump and exit insns.
- * Since recursion is prevented by check_cfg() this algorithm
- * only needs a local stack of MAX_CALL_FRAMES to remember callsites
+/* Topologically sort the call graph, and thereby determine the maximum stack
+ * depth of each subprog's worst-case call chain. Store in total_stack_depth.
+ * The tsort is performed using Kahn's algorithm. Since that algorithm selects
+ * nodes in the order of the sorted output, we can do our processing in the loop
+ * that does the tsort, rather than storing the sorted list and then having a
+ * second loop to iterate over it and compute the total_stack_depth values.
*/
static int check_max_stack_depth(struct bpf_verifier_env *env)
{
- int depth = 0, frame = 0, subprog = 0, i = 0;
- struct bpf_insn_aux_data *aux = env->insn_aux_data;
- struct bpf_insn *insn = env->prog->insnsi;
- int insn_cnt = env->prog->len;
- int ret_insn[MAX_CALL_FRAMES];
- int ret_prog[MAX_CALL_FRAMES];
+ DECLARE_BITMAP(frontier, BPF_MAX_SUBPROGS) = {0};
+ DECLARE_BITMAP(visited, BPF_MAX_SUBPROGS) = {0};
+ int subprog, i, j;
+
+ /* subprog 0 has no incoming edges, and should be the only such */
+ __set_bit(0, frontier);
+ env->subprog_info[0].total_stack_depth = env->subprog_info[0].stack_depth;
+
+ while (true) {
+ /* select a frontier node */
+ subprog = find_first_bit(frontier, BPF_MAX_SUBPROGS);
+ if (subprog >= BPF_MAX_SUBPROGS) /* frontier is empty, done */
+ break;
+ /* remove it from the frontier */
+ __clear_bit(subprog, frontier);
+ /* validate its total stack depth. Since all callers of this
+ * function have already been processed, the value now in
+ * info->total_stack_depth reflects the deepest call chain of
+ * this function.
+ */
+ if (env->subprog_info[subprog].total_stack_depth > MAX_BPF_STACK) {
+ verbose(env, "combined stack size of calls to %d (at insn %d) is %d. Too large\n",
+ subprog, env->subprog_info[subprog].start,
+ env->subprog_info[subprog].total_stack_depth);
+ return -EACCES;
+ }
+ if (env->log.level > 1) {
+ verbose(env, "combined stack size of calls to %d (at insn %d) is %d\n",
+ subprog, env->subprog_info[subprog].start,
+ env->subprog_info[subprog].total_stack_depth);
+ }
+ __set_bit(subprog, visited);
+ /* for each callee */
+ for_each_set_bit(i, env->subprog_info[subprog].callees,
+ BPF_MAX_SUBPROGS) {
+ /* round up to 32-bytes, since this is granularity of
+ * interpreter stack size
+ */
+ u16 stack_depth = round_up(max_t(u16, env->subprog_info[i].stack_depth, 1), 32);
-process_func:
- /* round up to 32-bytes, since this is granularity
- * of interpreter stack size
- */
- depth += round_up(max_t(u32, env->subprog_info[subprog].stack_depth, 1),
- 32);
- if (depth > MAX_BPF_STACK) {
- verbose(env, "combined stack size of %d calls is %d. Too large\n",
- frame + 1, depth);
- return -EACCES;
+ /* Update callee total stack depth. Since our own
+ * max total_stack_depth is known, the callee tsd is
+ * at least that plus the callee's stack frame size.
+ */
+ env->subprog_info[i].total_stack_depth = max_t(u16,
+ env->subprog_info[i].total_stack_depth,
+ env->subprog_info[subprog].total_stack_depth +
+ stack_depth);
+ /* does it have unvisited callers? */
+ for_each_clear_bit(j, visited, env->subprog_cnt) {
+ if (test_bit(i, env->subprog_info[j].callees))
+ break;
+ }
+ /* if not, add it to the frontier */
+ if (j >= env->subprog_cnt)
+ __set_bit(i, frontier);
+ }
}
-continue_func:
- for (; i < insn_cnt && aux[i].subprogno == subprog; i++) {
- if (insn[i].code != (BPF_JMP | BPF_CALL))
- continue;
- if (insn[i].src_reg != BPF_PSEUDO_CALL)
- continue;
- /* remember insn and function to return to */
- ret_insn[frame] = i + 1;
- ret_prog[frame] = subprog;
- /* find the callee */
- i = i + insn[i].imm + 1;
- subprog = find_subprog(env, i);
- if (subprog < 0) {
- WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
- i);
- return -EFAULT;
- }
- frame++;
- if (frame >= MAX_CALL_FRAMES) {
- WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
- return -EFAULT;
- }
- goto process_func;
+ /* are any nodes left unvisited? */
+ subprog = find_first_zero_bit(visited, env->subprog_cnt);
+ if (subprog < env->subprog_cnt) {
+ /* then call graph is not acyclic, which shouldn't happen */
+ verbose(env, "verifier bug. Call graph has a cycle including subprog %d (at insn %d)\n",
+ subprog, env->subprog_info[subprog].start);
+ return -EFAULT;
}
- /* end of for() loop means the last insn of the 'subprog'
- * was reached. Doesn't matter whether it was JA or EXIT
- */
- if (frame == 0)
- return 0;
- depth -= round_up(max_t(u32, env->subprog_info[subprog].stack_depth, 1),
- 32);
- frame--;
- i = ret_insn[frame];
- subprog = ret_prog[frame];
- goto continue_func;
+ return 0;
}
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
Powered by blists - more mailing lists