[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180726144032.2116-8-toshiaki.makita1@gmail.com>
Date: Thu, 26 Jul 2018 23:40:30 +0900
From: Toshiaki Makita <toshiaki.makita1@...il.com>
To: netdev@...r.kernel.org, Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>
Cc: Toshiaki Makita <makita.toshiaki@....ntt.co.jp>,
Jesper Dangaard Brouer <brouer@...hat.com>,
Jakub Kicinski <jakub.kicinski@...ronome.com>
Subject: [PATCH v5 bpf-next 7/9] xdp: Helpers for disabling napi_direct of xdp_return_frame
From: Toshiaki Makita <makita.toshiaki@....ntt.co.jp>
We need some mechanism to disable napi_direct on calling
xdp_return_frame_rx_napi() from some context.
When veth gets support of XDP_REDIRECT, it will redirects packets which
are redirected from other devices. On redirection veth will reuse
xdp_mem_info of the redirection source device to make return_frame work.
But in this case .ndo_xdp_xmit() called from veth redirection uses
xdp_mem_info which is not guarded by NAPI, because the .ndo_xdp_xmit()
is not called directly from the rxq which owns the xdp_mem_info.
This approach introduces a flag in bpf_redirect_info to indicate that
napi_direct should be disabled even when _rx_napi variant is used as
well as helper functions to use it.
A NAPI handler who wants to use this flag needs to call
xdp_set_return_frame_no_direct() before processing packets, and call
xdp_clear_return_frame_no_direct() after xdp_do_flush_map() before
exiting NAPI.
v4:
- Use bpf_redirect_info for storing the flag instead of xdp_mem_info to
avoid per-frame copy cost.
Signed-off-by: Toshiaki Makita <makita.toshiaki@....ntt.co.jp>
---
include/linux/filter.h | 25 +++++++++++++++++++++++++
net/core/xdp.c | 6 ++++--
2 files changed, 29 insertions(+), 2 deletions(-)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 4717af8b95e6..2b072dab32c0 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -543,10 +543,14 @@ struct bpf_redirect_info {
struct bpf_map *map;
struct bpf_map *map_to_flush;
unsigned long map_owner;
+ u32 kern_flags;
};
DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
+/* flags for bpf_redirect_info kern_flags */
+#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
+
/* Compute the linear packet data range [data, data_end) which
* will be accessed by various program types (cls_bpf, act_bpf,
* lwt, ...). Subsystems allowing direct data access must (!)
@@ -775,6 +779,27 @@ static inline bool bpf_dump_raw_ok(void)
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
+static inline bool xdp_return_frame_no_direct(void)
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+
+ return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
+}
+
+static inline void xdp_set_return_frame_no_direct(void)
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+
+ ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
+}
+
+static inline void xdp_clear_return_frame_no_direct(void)
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+
+ ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
+}
+
static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
unsigned int pktlen)
{
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 57285383ed00..3dd99e1c04f5 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -330,10 +330,12 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
/* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
page = virt_to_head_page(data);
- if (xa)
+ if (xa) {
+ napi_direct &= !xdp_return_frame_no_direct();
page_pool_put_page(xa->page_pool, page, napi_direct);
- else
+ } else {
put_page(page);
+ }
rcu_read_unlock();
break;
case MEM_TYPE_PAGE_SHARED:
--
2.14.3
Powered by blists - more mailing lists