[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <863f4934d251f44ad85a6be08b3737fac74f9b5a.1623674025.git.lorenzo@kernel.org>
Date: Mon, 14 Jun 2021 14:49:46 +0200
From: Lorenzo Bianconi <lorenzo@...nel.org>
To: bpf@...r.kernel.org, netdev@...r.kernel.org
Cc: lorenzo.bianconi@...hat.com, davem@...emloft.net, kuba@...nel.org,
ast@...nel.org, daniel@...earbox.net, shayagr@...zon.com,
sameehj@...zon.com, john.fastabend@...il.com, dsahern@...nel.org,
brouer@...hat.com, echaudro@...hat.com, jasowang@...hat.com,
alexander.duyck@...il.com, saeed@...nel.org,
maciej.fijalkowski@...el.com, magnus.karlsson@...el.com,
tirthendu.sarkar@...el.com
Subject: [PATCH v9 bpf-next 08/14] bpf: add multi-buff support to the bpf_xdp_adjust_tail() API
From: Eelco Chaudron <echaudro@...hat.com>
This change adds support for tail growing and shrinking for XDP multi-buff.
Signed-off-by: Eelco Chaudron <echaudro@...hat.com>
Signed-off-by: Lorenzo Bianconi <lorenzo@...nel.org>
---
include/net/xdp.h | 7 ++++++
net/core/filter.c | 62 +++++++++++++++++++++++++++++++++++++++++++++++
net/core/xdp.c | 5 ++--
3 files changed, 72 insertions(+), 2 deletions(-)
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 935a6f83115f..3525801c6ed5 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -132,6 +132,11 @@ xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
return (struct skb_shared_info *)xdp_data_hard_end(xdp);
}
+static inline unsigned int xdp_get_frag_tailroom(const skb_frag_t *frag)
+{
+ return PAGE_SIZE - skb_frag_size(frag) - skb_frag_off(frag);
+}
+
struct xdp_frame {
void *data;
u16 len;
@@ -259,6 +264,8 @@ struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
return xdp_frame;
}
+void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
+ struct xdp_buff *xdp);
void xdp_return_frame(struct xdp_frame *xdpf);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
void xdp_return_buff(struct xdp_buff *xdp);
diff --git a/net/core/filter.c b/net/core/filter.c
index caa88955562e..05f574a3d690 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3859,11 +3859,73 @@ static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
.arg2_type = ARG_ANYTHING,
};
+static int bpf_xdp_mb_adjust_tail(struct xdp_buff *xdp, int offset)
+{
+ struct skb_shared_info *sinfo;
+
+ if (unlikely(!xdp_buff_is_mb(xdp)))
+ return -EINVAL;
+
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ if (offset >= 0) {
+ skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags - 1];
+ int size;
+
+ if (unlikely(offset > xdp_get_frag_tailroom(frag)))
+ return -EINVAL;
+
+ size = skb_frag_size(frag);
+ memset(skb_frag_address(frag) + size, 0, offset);
+ skb_frag_size_set(frag, size + offset);
+ sinfo->data_len += offset;
+ } else {
+ int i, n_frags_free = 0, len_free = 0;
+
+ offset = abs(offset);
+ if (unlikely(offset > ((int)(xdp->data_end - xdp->data) +
+ sinfo->data_len - ETH_HLEN)))
+ return -EINVAL;
+
+ for (i = sinfo->nr_frags - 1; i >= 0 && offset > 0; i--) {
+ skb_frag_t *frag = &sinfo->frags[i];
+ int size = skb_frag_size(frag);
+ int shrink = min_t(int, offset, size);
+
+ len_free += shrink;
+ offset -= shrink;
+
+ if (unlikely(size == shrink)) {
+ struct page *page = skb_frag_page(frag);
+
+ __xdp_return(page_address(page), &xdp->rxq->mem,
+ false, NULL);
+ n_frags_free++;
+ } else {
+ skb_frag_size_set(frag, size - shrink);
+ break;
+ }
+ }
+ sinfo->nr_frags -= n_frags_free;
+ sinfo->data_len -= len_free;
+
+ if (unlikely(!sinfo->nr_frags))
+ xdp_buff_clear_mb(xdp);
+
+ if (unlikely(offset > 0))
+ xdp->data_end -= offset;
+ }
+
+ return 0;
+}
+
BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
{
void *data_hard_end = xdp_data_hard_end(xdp); /* use xdp->frame_sz */
void *data_end = xdp->data_end + offset;
+ if (unlikely(xdp_buff_is_mb(xdp)))
+ return bpf_xdp_mb_adjust_tail(xdp, offset);
+
/* Notice that xdp_data_hard_end have reserved some tailroom */
if (unlikely(data_end > data_hard_end))
return -EINVAL;
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 71bedf6049a1..ffd70d3e9e5d 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -338,8 +338,8 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
* is used for those calls sites. Thus, allowing for faster recycling
* of xdp_frames/pages in those cases.
*/
-static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
- struct xdp_buff *xdp)
+void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
+ struct xdp_buff *xdp)
{
struct xdp_mem_allocator *xa;
struct page *page;
@@ -372,6 +372,7 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
break;
}
}
+EXPORT_SYMBOL_GPL(__xdp_return);
void xdp_return_frame(struct xdp_frame *xdpf)
{
--
2.31.1
Powered by blists - more mailing lists