lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180601093253.15353.20744.stgit@john-Precision-Tower-5810>
Date:   Fri, 01 Jun 2018 02:32:53 -0700
From:   John Fastabend <john.fastabend@...il.com>
To:     alexei.starovoitov@...il.com, daniel@...earbox.net,
        davem@...emloft.net
Cc:     netdev@...r.kernel.org
Subject: [RFC PATCH 07/16] bpf: cfg: build call graph and detect
 unreachable/recursive call

From: Jiong Wang <jiong.wang@...ronome.com>

This patch build call graph during insn scan inside check_subprogs.
Then do recursive and unreachable subprog detection using call graph.

Signed-off-by: Jiong Wang <jiong.wang@...ronome.com>
Signed-off-by: John Fastabend <john.fastabend@...il.com>
---
 include/linux/bpf_verifier.h |    1 
 kernel/bpf/cfg.c             |  133 ++++++++++++++++++++++++++++++++++++++++++
 kernel/bpf/cfg.h             |    4 +
 kernel/bpf/verifier.c        |   32 +++++++++-
 4 files changed, 166 insertions(+), 4 deletions(-)

diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 0eb838d..83ccbc4 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -177,6 +177,7 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
 struct bpf_subprog_info {
 	u32 start; /* insn idx of function entry point */
 	u16 stack_depth; /* max. stack depth used by this function */
+	struct list_head callees; /* callees list. */
 	struct list_head bbs; /* basic blocks list. */
 	u32 bb_num; /* total basic block num. */
 	unsigned long *dtree;
diff --git a/kernel/bpf/cfg.c b/kernel/bpf/cfg.c
index 5175aa7..56c08e8 100644
--- a/kernel/bpf/cfg.c
+++ b/kernel/bpf/cfg.c
@@ -13,6 +13,12 @@
 
 #include "cfg.h"
 
+struct cedge_node {
+	struct list_head l;
+	u8 caller_idx;
+	u8 callee_idx;
+};
+
 struct edge_node {
 	struct list_head l;
 	struct bb_node *src;
@@ -640,6 +646,126 @@ int add_subprog(struct bpf_verifier_env *env, int off)
 	return 0;
 }
 
+struct callee_iter {
+	struct list_head *list_head;
+	struct cedge_node *callee;
+};
+
+#define first_callee(c_list)	list_first_entry(c_list, struct cedge_node, l)
+#define next_callee(c)		list_next_entry(c, l)
+
+static bool ci_end_p(struct callee_iter *ci)
+{
+	return &ci->callee->l == ci->list_head;
+}
+
+static void ci_next(struct callee_iter *ci)
+{
+	struct cedge_node *c = ci->callee;
+
+	ci->callee = next_callee(c);
+}
+
+#define EXPLORING	1
+#define EXPLORED	2
+int cgraph_check_recursive_unreachable(struct bpf_verifier_env *env,
+				       struct bpf_subprog_info *subprog)
+{
+	struct callee_iter *stack;
+	struct cedge_node *callee;
+	int sp = 0, idx = 0, ret;
+	struct callee_iter ci;
+	int *status;
+
+	stack = kmalloc_array(128, sizeof(struct callee_iter), GFP_KERNEL);
+	if (!stack)
+		return -ENOMEM;
+	status = kcalloc(env->subprog_cnt, sizeof(int), GFP_KERNEL);
+	if (!status) {
+		kfree(stack);
+		return -ENOMEM;
+	}
+	ci.callee = first_callee(&subprog->callees);
+	ci.list_head = &subprog->callees;
+	status[0] = EXPLORING;
+
+	while (1) {
+		while (!ci_end_p(&ci)) {
+			callee = ci.callee;
+			idx = callee->callee_idx;
+			if (status[idx] == EXPLORING) {
+				bpf_verifier_log_write(env, "cgraph - recursive call\n");
+				ret = -EINVAL;
+				goto err_free;
+			}
+
+			status[idx] = EXPLORING;
+
+			if (sp == 127) {
+				bpf_verifier_log_write(env, "cgraph - call frame too deep\n");
+				ret = -EINVAL;
+				goto err_free;
+			}
+
+			stack[sp++] = ci;
+			ci.callee = first_callee(&subprog[idx].callees);
+			ci.list_head = &subprog[idx].callees;
+		}
+
+		if (!list_empty(ci.list_head))
+			status[first_callee(ci.list_head)->caller_idx] =
+				EXPLORED;
+		else
+			/* leaf func. */
+			status[idx] = EXPLORED;
+
+		if (!sp)
+			break;
+
+		ci = stack[--sp];
+		ci_next(&ci);
+	}
+
+	for (idx = 0; idx < env->subprog_cnt; idx++)
+		if (status[idx] != EXPLORED) {
+			bpf_verifier_log_write(env, "cgraph - unreachable subprog\n");
+			ret = -EINVAL;
+			goto err_free;
+		}
+
+	ret = 0;
+err_free:
+	kfree(status);
+	kfree(stack);
+	return ret;
+}
+
+int subprog_append_callee(struct bpf_verifier_env *env,
+			  struct list_head *callees_list,
+			  int caller_idx, int off)
+{
+	int callee_idx = find_subprog(env, off);
+	struct cedge_node *new_callee, *callee;
+
+	if (callee_idx < 0)
+		return callee_idx;
+
+	list_for_each_entry(callee, callees_list, l) {
+		if (callee->callee_idx == callee_idx)
+			return 0;
+	}
+
+	new_callee = kzalloc(sizeof(*new_callee), GFP_KERNEL);
+	if (!new_callee)
+		return -ENOMEM;
+
+	new_callee->caller_idx = caller_idx;
+	new_callee->callee_idx = callee_idx;
+	list_add_tail(&new_callee->l, callees_list);
+
+	return 0;
+}
+
 static void subprog_free_edge(struct bb_node *bb)
 {
 	struct list_head *succs = &bb->e_succs;
@@ -657,7 +783,9 @@ void subprog_free(struct bpf_subprog_info *subprog, int end_idx)
 	int i = 0;
 
 	for (; i <= end_idx; i++) {
+		struct list_head *callees = &subprog[i].callees;
 		struct list_head *bbs = &subprog[i].bbs;
+		struct cedge_node *callee, *tmp_callee;
 		struct bb_node *bb, *tmp, *exit;
 
 		bb = entry_bb(bbs);
@@ -668,6 +796,11 @@ void subprog_free(struct bpf_subprog_info *subprog, int end_idx)
 			kfree(bb);
 		}
 
+		list_for_each_entry_safe(callee, tmp_callee, callees, l) {
+			list_del(&callee->l);
+			kfree(callee);
+		}
+
 		if (subprog[i].dtree_avail)
 			kfree(subprog[i].dtree);
 	}
diff --git a/kernel/bpf/cfg.h b/kernel/bpf/cfg.h
index 57eab9b..577c22c 100644
--- a/kernel/bpf/cfg.h
+++ b/kernel/bpf/cfg.h
@@ -9,9 +9,13 @@
 #define __BPF_CFG_H__
 
 int add_subprog(struct bpf_verifier_env *env, int off);
+int cgraph_check_recursive_unreachable(struct bpf_verifier_env *env,
+				       struct bpf_subprog_info *subprog);
 int find_subprog(struct bpf_verifier_env *env, int off);
 int subprog_add_bb_edges(struct bpf_insn *insns, struct list_head *bb_list);
 int subprog_append_bb(struct list_head *bb_list, int head);
+int subprog_append_callee(struct bpf_verifier_env *env,
+			  struct list_head *bb_list, int caller_idx, int off);
 int subprog_build_dom_info(struct bpf_verifier_env *env,
 			   struct bpf_subprog_info *subprog);
 int subprog_fini_bb(struct list_head *bb_list, int subprog_end);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 782dd17..ddba8ad 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -764,9 +764,9 @@ static int check_subprogs(struct bpf_verifier_env *env)
 {
 	int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
 	struct bpf_subprog_info *subprog = env->subprog_info;
+	struct list_head *cur_bb_list, *cur_callee_list;
 	struct bpf_insn *insn = env->prog->insnsi;
 	int insn_cnt = env->prog->len;
-	struct list_head *cur_bb_list;
 
 	/* Add entry function. */
 	ret = add_subprog(env, 0);
@@ -797,16 +797,23 @@ static int check_subprogs(struct bpf_verifier_env *env)
 	 */
 	subprog[env->subprog_cnt].start = insn_cnt;
 
-	if (env->log.level > 1)
-		for (i = 0; i < env->subprog_cnt; i++)
+	for (i = 0; i < env->subprog_cnt; i++) {
+		if (env->log.level > 1)
 			verbose(env, "func#%d @%d\n", i, subprog[i].start);
 
+		/* Don't init callees inside add_subprog which will sort the
+		 * array which breaks list link.
+		 */
+		INIT_LIST_HEAD(&subprog[i].callees);
+	}
+
 	subprog_start = subprog[cur_subprog].start;
 	subprog_end = subprog[cur_subprog + 1].start;
 	cur_bb_list = &subprog[cur_subprog].bbs;
 	ret = subprog_init_bb(cur_bb_list, subprog_start);
 	if (ret < 0)
 		goto free_nodes;
+	cur_callee_list = &env->subprog_info[cur_subprog].callees;
 	/* now check that all jumps are within the same subprog */
 	for (i = 0; i < insn_cnt; i++) {
 		u8 code = insn[i].code;
@@ -823,8 +830,20 @@ static int check_subprogs(struct bpf_verifier_env *env)
 			goto next;
 		}
 
-		if (BPF_OP(code) == BPF_CALL)
+		if (BPF_OP(code) == BPF_CALL) {
+			if (insn[i].src_reg == BPF_PSEUDO_CALL) {
+				int target = i + insn[i].imm + 1;
+
+				ret = subprog_append_callee(env,
+							    cur_callee_list,
+							    cur_subprog,
+							    target);
+				if (ret < 0)
+					return ret;
+			}
+
 			goto next;
+		}
 
 		off = i + insn[i].off + 1;
 		if (off < subprog_start || off >= subprog_end) {
@@ -880,10 +899,15 @@ static int check_subprogs(struct bpf_verifier_env *env)
 						      subprog_start);
 				if (ret < 0)
 					goto free_nodes;
+				cur_callee_list = &subprog[cur_subprog].callees;
 			}
 		}
 	}
 
+	ret = cgraph_check_recursive_unreachable(env, env->subprog_info);
+	if (ret < 0)
+		goto free_nodes;
+
 	ret = 0;
 
 free_nodes:

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ