[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4c0e3b9f-7bc7-6c43-97e4-ed9e56056e37@solarflare.com>
Date: Tue, 26 Jun 2018 19:22:03 +0100
From: Edward Cree <ecree@...arflare.com>
To: <linux-net-drivers@...arflare.com>, <netdev@...r.kernel.org>
CC: <davem@...emloft.net>
Subject: [RFC PATCH v2 net-next 11/12] net: listify Generic XDP processing,
part 2
Adds listified versions of the eBPF interpreter functions, and uses them when
the single func is not JITed. If the single func is JITed (and the list
func is not, which currently it never is), then use the single func since
the cost of interpreting is probably much worse than the cost of the extra
indirect calls.
Signed-off-by: Edward Cree <ecree@...arflare.com>
---
include/linux/filter.h | 38 +++++++++++++++++++++++++++++---------
kernel/bpf/core.c | 26 ++++++++++++++++++++++++++
net/core/dev.c | 19 ++++++++-----------
3 files changed, 63 insertions(+), 20 deletions(-)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 75db6cbf78a3..7d813034e286 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -477,6 +477,21 @@ struct bpf_binary_header {
u8 image[] __aligned(4);
};
+struct redirect_info {
+ u32 ifindex;
+ u32 flags;
+ struct bpf_map *map;
+ struct bpf_map *map_to_flush;
+ unsigned long map_owner;
+};
+
+struct bpf_work {
+ struct list_head list;
+ void *ctx;
+ struct redirect_info ri;
+ unsigned long ret;
+};
+
struct bpf_prog {
u16 pages; /* Number of allocated pages */
u16 jited:1, /* Is our filter JIT'ed? */
@@ -488,7 +503,9 @@ struct bpf_prog {
blinded:1, /* Was blinded */
is_func:1, /* program is a bpf function */
kprobe_override:1, /* Do we override a kprobe? */
- has_callchain_buf:1; /* callchain buffer allocated? */
+ has_callchain_buf:1, /* callchain buffer allocated? */
+ jited_list:1; /* Is list func JIT'ed? */
+ /* 5 bits left */
enum bpf_prog_type type; /* Type of BPF program */
enum bpf_attach_type expected_attach_type; /* For some prog types */
u32 len; /* Number of filter blocks */
@@ -498,6 +515,9 @@ struct bpf_prog {
struct sock_fprog_kern *orig_prog; /* Original BPF program */
unsigned int (*bpf_func)(const void *ctx,
const struct bpf_insn *insn);
+ /* Takes a list of struct bpf_work */
+ void (*list_func)(struct list_head *list,
+ const struct bpf_insn *insn);
/* Instructions for interpreter */
union {
struct sock_filter insns[0];
@@ -512,6 +532,7 @@ struct sk_filter {
};
#define BPF_PROG_RUN(filter, ctx) (*(filter)->bpf_func)(ctx, (filter)->insnsi)
+#define BPF_LIST_PROG_RUN(filter, list) (*(filter)->list_func)(list, (filter)->insnsi)
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
@@ -616,6 +637,13 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
return BPF_PROG_RUN(prog, xdp);
}
+static __always_inline void bpf_list_prog_run_xdp(const struct bpf_prog *prog,
+ struct list_head *list)
+{
+ /* Caller must hold rcu_read_lock(), as per bpf_prog_run_xdp(). */
+ BPF_LIST_PROG_RUN(prog, list);
+}
+
static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
{
return prog->len * sizeof(struct bpf_insn);
@@ -820,14 +848,6 @@ static inline int __xdp_generic_ok_fwd_dev(struct sk_buff *skb,
return 0;
}
-struct redirect_info {
- u32 ifindex;
- u32 flags;
- struct bpf_map *map;
- struct bpf_map *map_to_flush;
- unsigned long map_owner;
-};
-
DECLARE_PER_CPU(struct redirect_info, redirect_info);
/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index a9e6c04d0f4a..c35da826cc3b 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1356,6 +1356,18 @@ static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
return ___bpf_prog_run(regs, insn, stack); \
}
+#define LIST_PROG_NAME(stack_size) __bpf_list_prog_run##stack_size
+#define DEFINE_BPF_LIST_PROG_RUN(stack_size) \
+static void LIST_PROG_NAME(stack_size)(struct list_head *list, const struct bpf_insn *insn) \
+{ \
+ struct bpf_work *work; \
+\
+ list_for_each_entry(work, list, list) { \
+ work->ret = PROG_NAME(stack_size)(work->ctx, insn); \
+ work->ri = *this_cpu_ptr(&redirect_info); \
+ } \
+}
+
#define EVAL1(FN, X) FN(X)
#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
@@ -1367,6 +1379,10 @@ EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
+EVAL6(DEFINE_BPF_LIST_PROG_RUN, 32, 64, 96, 128, 160, 192);
+EVAL6(DEFINE_BPF_LIST_PROG_RUN, 224, 256, 288, 320, 352, 384);
+EVAL4(DEFINE_BPF_LIST_PROG_RUN, 416, 448, 480, 512);
+
EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
@@ -1380,6 +1396,14 @@ EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
};
#undef PROG_NAME_LIST
+#define PROG_NAME_LIST(stack_size) LIST_PROG_NAME(stack_size),
+static void (*list_interpreters[])(struct list_head *list,
+ const struct bpf_insn *insn) = {
+EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
+EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
+EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
+};
+#undef PROG_NAME_LIST
#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
const struct bpf_insn *insn) = {
@@ -1472,8 +1496,10 @@ static void bpf_prog_select_func(struct bpf_prog *fp)
u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
+ fp->list_func = list_interpreters[(round_up(stack_depth, 32) / 32) - 1];
#else
fp->bpf_func = __bpf_prog_ret0_warn;
+ fp->list_func = NULL;
#endif
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 22cbd5314d56..746112c22afd 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4198,13 +4198,6 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(do_xdp_generic);
-struct bpf_work {
- struct list_head list;
- void *ctx;
- struct redirect_info ri;
- unsigned long ret;
-};
-
struct xdp_work {
struct bpf_work w;
struct xdp_buff xdp;
@@ -4254,10 +4247,14 @@ static void do_xdp_list_generic(struct bpf_prog *xdp_prog,
list_add_tail(&xw->w.list, &xdp_list);
}
- list_for_each_entry(bw, &xdp_list, list) {
- bw->ret = bpf_prog_run_xdp(xdp_prog, bw->ctx);
- bw->ri = *this_cpu_ptr(&redirect_info);
- }
+ if (xdp_prog->list_func && (xdp_prog->jited_list ||
+ !xdp_prog->jited))
+ bpf_list_prog_run_xdp(xdp_prog, &xdp_list);
+ else
+ list_for_each_entry(bw, &xdp_list, list) {
+ bw->ret = bpf_prog_run_xdp(xdp_prog, bw->ctx);
+ bw->ri = *this_cpu_ptr(&redirect_info);
+ }
for (i = 0; i < n; i++) {
xw = (*xwa) + i;
Powered by blists - more mailing lists