[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <166256556130.1434226.10426110144984671774.stgit@firesoul>
Date: Wed, 07 Sep 2022 17:46:01 +0200
From: Jesper Dangaard Brouer <brouer@...hat.com>
To: bpf@...r.kernel.org
Cc: Jesper Dangaard Brouer <brouer@...hat.com>, netdev@...r.kernel.org,
xdp-hints@...-project.net, larysa.zaremba@...el.com,
memxor@...il.com, Lorenzo Bianconi <lorenzo@...nel.org>,
mtahhan@...hat.com,
Alexei Starovoitov <alexei.starovoitov@...il.com>,
Daniel Borkmann <borkmann@...earbox.net>,
Andrii Nakryiko <andrii.nakryiko@...il.com>,
dave@...cker.co.uk, Magnus Karlsson <magnus.karlsson@...el.com>,
bjorn@...nel.org
Subject: [PATCH RFCv2 bpf-next 12/18] net: use XDP-hints in xdp_frame to SKB
conversion
This patch makes the net/core/xdp function __xdp_build_skb_from_frame()
consume HW offloads provided via XDP-hints when creating an SKB based
on an xdp_frame. This is an initial step towards SKB less drivers that
moves SKB handing to net/core.
Current users that already benefit from this are: Redirect into veth
and cpumap. XDP_PASS action in bpf_test_run_xdp_live and driver
ethernet/aquantia/atlantic/.
Signed-off-by: Jesper Dangaard Brouer <brouer@...hat.com>
---
include/net/xdp.h | 72 +++++++++++++++++++++++++++++++++++++++++++++++++++++
net/core/xdp.c | 17 ++++++++-----
2 files changed, 83 insertions(+), 6 deletions(-)
diff --git a/include/net/xdp.h b/include/net/xdp.h
index c7cdcef83fa5..bdb497c7b296 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -153,6 +153,68 @@ static __always_inline u32 xdp_hints_set_vlan(struct xdp_hints_common *hints,
return flags;
}
+/* XDP hints to SKB helper functions */
+static inline void xdp_hint2skb_record_rx_queue(struct sk_buff *skb,
+ struct xdp_hints_common *hints)
+{
+ if (hints->xdp_hints_flags & HINT_FLAG_RX_QUEUE)
+ skb_record_rx_queue(skb, hints->rx_queue);
+}
+
+static inline void xdp_hint2skb_set_hash(struct sk_buff *skb,
+ struct xdp_hints_common *hints)
+{
+ u32 hash_type = hints->xdp_hints_flags & HINT_FLAG_RX_HASH_TYPE_MASK;
+
+ if (hash_type) {
+ hash_type = hash_type >> HINT_FLAG_RX_HASH_TYPE_SHIFT;
+ skb_set_hash(skb, hints->rx_hash32, hash_type);
+ }
+}
+
+static inline void xdp_hint2skb_checksum(struct sk_buff *skb,
+ struct xdp_hints_common *hints)
+{
+ u32 csum_type = hints->xdp_hints_flags & HINT_FLAG_CSUM_TYPE_MASK;
+ u32 csum_level = hints->xdp_hints_flags & HINT_FLAG_CSUM_LEVEL_MASK;
+
+ if (csum_type == CHECKSUM_UNNECESSARY)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (csum_level)
+ skb->csum_level = csum_level >> HINT_FLAG_CSUM_LEVEL_SHIFT;
+
+ /* TODO: First driver implementing CHECKSUM_PARTIAL or CHECKSUM_COMPLETE
+ * need to implement handling here.
+ */
+}
+
+static inline void xdp_hint2skb_vlan_hw_tag(struct sk_buff *skb,
+ struct xdp_hints_common *hints)
+{
+ u32 flags = hints->xdp_hints_flags;
+ __be16 proto = htons(ETH_P_8021Q);
+
+ if (flags & HINT_FLAG_VLAN_PROTO_ETH_P_8021AD)
+ proto = htons(ETH_P_8021AD);
+
+ if (flags & HINT_FLAG_VLAN_PRESENT) {
+ /* like: __vlan_hwaccel_put_tag */
+ skb->vlan_proto = proto;
+ skb->vlan_tci = hints->vlan_tci;
+ skb->vlan_present = 1;
+ }
+}
+
+static inline void xdp_hint2skb(struct sk_buff *skb,
+ struct xdp_hints_common *hints)
+{
+ xdp_hint2skb_record_rx_queue(skb, hints);
+ xdp_hint2skb_set_hash(skb, hints);
+ xdp_hint2skb_checksum(skb, hints);
+ xdp_hint2skb_vlan_hw_tag(skb, hints);
+}
+
/**
* DOC: XDP RX-queue information
*
@@ -364,6 +426,16 @@ static __always_inline bool xdp_frame_is_frag_pfmemalloc(struct xdp_frame *frame
return !!(frame->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
}
+static __always_inline bool xdp_frame_has_hints_compat(struct xdp_frame *xdpf)
+{
+ u32 flags = xdpf->flags;
+
+ if (!(flags & XDP_FLAGS_HINTS_COMPAT_COMMON))
+ return false;
+
+ return !!(flags & XDP_FLAGS_HINTS_MASK);
+}
+
#define XDP_BULK_QUEUE_SIZE 16
struct xdp_frame_bulk {
int count;
diff --git a/net/core/xdp.c b/net/core/xdp.c
index a57bd5278b47..ffa353367941 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -623,6 +623,7 @@ struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
struct net_device *dev)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ struct xdp_hints_common *xdp_hints = NULL;
unsigned int headroom, frame_size;
void *hard_start;
u8 nr_frags;
@@ -640,14 +641,17 @@ struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
frame_size = xdpf->frame_sz;
hard_start = xdpf->data - headroom;
+ prefetch(xdpf->data); /* cache-line for eth_type_trans */
skb = build_skb_around(skb, hard_start, frame_size);
if (unlikely(!skb))
return NULL;
skb_reserve(skb, headroom);
__skb_put(skb, xdpf->len);
- if (xdpf->metasize)
+ if (xdpf->metasize) {
skb_metadata_set(skb, xdpf->metasize);
+ prefetch(xdpf->data - sizeof(*xdp_hints));
+ }
if (unlikely(xdp_frame_has_frags(xdpf)))
xdp_update_skb_shared_info(skb, nr_frags,
@@ -658,11 +662,12 @@ struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
/* Essential SKB info: protocol and skb->dev */
skb->protocol = eth_type_trans(skb, dev);
- /* Optional SKB info, currently missing:
- * - HW checksum info (skb->ip_summed)
- * - HW RX hash (skb_set_hash)
- * - RX ring dev queue index (skb_record_rx_queue)
- */
+ /* Populate (optional) HW offload hints in SKB via XDP-hints */
+ if (xdp_frame_has_hints_compat(xdpf)
+ && xdpf->metasize >= sizeof(*xdp_hints)) {
+ xdp_hints = xdpf->data - sizeof(*xdp_hints);
+ xdp_hint2skb(skb, xdp_hints);
+ }
/* Until page_pool get SKB return path, release DMA here */
xdp_release_frame(xdpf);
Powered by blists - more mailing lists