[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <72aa14d2c15a4367d59ac232772d3bf08852bc30.1706451150.git.lorenzo@kernel.org>
Date: Sun, 28 Jan 2024 15:20:41 +0100
From: Lorenzo Bianconi <lorenzo@...nel.org>
To: netdev@...r.kernel.org
Cc: lorenzo.bianconi@...hat.com,
davem@...emloft.net,
kuba@...nel.org,
edumazet@...gle.com,
pabeni@...hat.com,
bpf@...r.kernel.org,
toke@...hat.com,
willemdebruijn.kernel@...il.com,
jasowang@...hat.com,
sdf@...gle.com,
hawk@...nel.org,
ilias.apalodimas@...aro.org
Subject: [PATCH v6 net-next 5/5] veth: rely on netif_skb_segment_for_xdp utility routine
Rely on netif_skb_segment_for_xdp utility routine and remove duplicated
code.
Signed-off-by: Lorenzo Bianconi <lorenzo@...nel.org>
---
drivers/net/veth.c | 79 +++------------------------------------
include/linux/netdevice.h | 4 ++
net/core/dev.c | 6 +--
3 files changed, 12 insertions(+), 77 deletions(-)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 578e36ea1589..ddb163f134ea 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -721,7 +721,8 @@ static void veth_xdp_get(struct xdp_buff *xdp)
static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
struct xdp_buff *xdp,
- struct sk_buff **pskb)
+ struct sk_buff **pskb,
+ struct bpf_prog *prog)
{
struct sk_buff *skb = *pskb;
u32 frame_sz;
@@ -729,80 +730,10 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
if (skb_shared(skb) || skb_head_is_locked(skb) ||
skb_shinfo(skb)->nr_frags ||
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
- u32 size, len, max_head_size, off, truesize, page_offset;
- struct sk_buff *nskb;
- struct page *page;
- int i, head_off;
- void *va;
-
- /* We need a private copy of the skb and data buffers since
- * the ebpf program can modify it. We segment the original skb
- * into order-0 pages without linearize it.
- *
- * Make sure we have enough space for linear and paged area
- */
- max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
- VETH_XDP_HEADROOM);
- if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size)
- goto drop;
-
- size = min_t(u32, skb->len, max_head_size);
- truesize = SKB_HEAD_ALIGN(size) + VETH_XDP_HEADROOM;
-
- /* Allocate skb head */
- va = page_pool_dev_alloc_va(rq->page_pool, &truesize);
- if (!va)
- goto drop;
-
- nskb = napi_build_skb(va, truesize);
- if (!nskb) {
- page_pool_free_va(rq->page_pool, va, true);
+ if (netif_skb_segment_for_xdp(rq->page_pool, pskb, prog))
goto drop;
- }
-
- skb_reserve(nskb, VETH_XDP_HEADROOM);
- skb_copy_header(nskb, skb);
- skb_mark_for_recycle(nskb);
-
- if (skb_copy_bits(skb, 0, nskb->data, size)) {
- consume_skb(nskb);
- goto drop;
- }
- skb_put(nskb, size);
- head_off = skb_headroom(nskb) - skb_headroom(skb);
- skb_headers_offset_update(nskb, head_off);
-
- /* Allocate paged area of new skb */
- off = size;
- len = skb->len - off;
-
- for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
- size = min_t(u32, len, PAGE_SIZE);
- truesize = size;
-
- page = page_pool_dev_alloc(rq->page_pool, &page_offset,
- &truesize);
- if (!page) {
- consume_skb(nskb);
- goto drop;
- }
-
- skb_add_rx_frag(nskb, i, page, page_offset, size,
- truesize);
- if (skb_copy_bits(skb, off,
- page_address(page) + page_offset,
- size)) {
- consume_skb(nskb);
- goto drop;
- }
-
- len -= size;
- off += size;
- }
-
- consume_skb(skb);
- skb = nskb;
+ skb = *pskb;
}
/* SKB "head" area always have tailroom for skb_shared_info */
@@ -850,7 +781,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
}
__skb_push(skb, skb->data - skb_mac_header(skb));
- if (veth_convert_skb_to_xdp_buff(rq, xdp, &skb))
+ if (veth_convert_skb_to_xdp_buff(rq, xdp, &skb, xdp_prog))
goto drop;
vxbuf.skb = skb;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7eee99a58200..8c1f6954de47 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3955,6 +3955,10 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
dev_kfree_skb_any_reason(skb, SKB_CONSUMED);
}
+#if IS_ENABLED(CONFIG_PAGE_POOL)
+int netif_skb_segment_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
+ struct bpf_prog *prog);
+#endif
u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog);
void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
diff --git a/net/core/dev.c b/net/core/dev.c
index 19f92ba90e49..b2fc8f0683dd 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4939,9 +4939,8 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
}
#if IS_ENABLED(CONFIG_PAGE_POOL)
-static int
-netif_skb_segment_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
- struct bpf_prog *prog)
+int netif_skb_segment_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
+ struct bpf_prog *prog)
{
u32 size, truesize, len, max_head_size, off;
struct sk_buff *skb = *pskb, *nskb;
@@ -5016,6 +5015,7 @@ netif_skb_segment_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
return 0;
}
+EXPORT_SYMBOL_GPL(netif_skb_segment_for_xdp);
#endif
static int
--
2.43.0
Powered by blists - more mailing lists