lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <174897275458.1677018.15827867832940584671.stgit@firesoul>
Date: Tue, 03 Jun 2025 19:45:54 +0200
From: Jesper Dangaard Brouer <hawk@...nel.org>
To: bpf@...r.kernel.org, netdev@...r.kernel.org,
 Jakub Kicinski <kuba@...nel.org>, lorenzo@...nel.org
Cc: Jesper Dangaard Brouer <hawk@...nel.org>,
 Alexei Starovoitov <ast@...nel.org>,
 Daniel Borkmann <borkmann@...earbox.net>,
 Eric Dumazet <eric.dumazet@...il.com>,
 "David S. Miller" <davem@...emloft.net>, Paolo Abeni <pabeni@...hat.com>,
 sdf@...ichev.me, kernel-team@...udflare.com, arthur@...hurfabre.com,
 jakub@...udflare.com
Subject: [PATCH bpf-next V1 1/7] net: xdp: Add xdp_rx_meta structure

From: Lorenzo Bianconi <lorenzo@...nel.org>

Introduce the `xdp_rx_meta` structure to serve as a container for XDP RX
hardware hints within XDP packet buffers. Initially, this structure will
accommodate `rx_hash` and `rx_vlan` metadata. (The `rx_timestamp` hint will
get stored in `skb_shared_info`).

A key design aspect is making this metadata accessible both during BPF
program execution (via `struct xdp_buff`) and later if an `struct
xdp_frame` is materialized (e.g., for XDP_REDIRECT).
To achieve this:
  - The `struct xdp_frame` embeds an `xdp_rx_meta` field directly for
    storage.
  - The `struct xdp_buff` includes an `xdp_rx_meta` pointer. This pointer
    is initialized (in `xdp_prepare_buff`) to point to the memory location
    within the packet buffer's headroom where the `xdp_frame`'s embedded
    `rx_meta` field would reside.

This setup allows BPF kfuncs, operating on `xdp_buff`, to populate the
metadata in the precise location where it will be found if an `xdp_frame`
is subsequently created.

The availability of this metadata storage area within the buffer is
indicated by the `XDP_FLAGS_META_AREA` flag in `xdp_buff->flags` (and
propagated to `xdp_frame->flags`). This flag is only set if sufficient
headroom (at least `XDP_MIN_HEADROOM`, currently 192 bytes) is present.
Specific hints like `XDP_FLAGS_META_RX_HASH` and `XDP_FLAGS_META_RX_VLAN`
will then denote which types of metadata have been populated into the
`xdp_rx_meta` structure.

This patch is a step for enabling the preservation and use of XDP RX
hints across operations like XDP_REDIRECT.

Signed-off-by: Lorenzo Bianconi <lorenzo@...nel.org>
Signed-off-by: Jesper Dangaard Brouer <hawk@...nel.org>
---
 include/net/xdp.h       |   57 +++++++++++++++++++++++++++++++++++------------
 net/core/xdp.c          |    1 +
 net/xdp/xsk_buff_pool.c |    4 ++-
 3 files changed, 47 insertions(+), 15 deletions(-)

diff --git a/include/net/xdp.h b/include/net/xdp.h
index 48efacbaa35d..5dcdf634ae4a 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -71,11 +71,31 @@ struct xdp_txq_info {
 	struct net_device *dev;
 };
 
+struct xdp_rx_meta {
+	struct xdp_rx_meta_hash {
+		u32 val;
+		u32 type; /* enum xdp_rss_hash_type */
+	} hash;
+	struct xdp_rx_meta_vlan {
+		__be16 proto;
+		u16 tci;
+	} vlan;
+};
+
+/* Storage area for HW RX metadata only available with reasonable headroom
+ * available. Less than XDP_PACKET_HEADROOM due to Intel drivers.
+ */
+#define XDP_MIN_HEADROOM	192
+
 enum xdp_buff_flags {
 	XDP_FLAGS_HAS_FRAGS		= BIT(0), /* non-linear xdp buff */
 	XDP_FLAGS_FRAGS_PF_MEMALLOC	= BIT(1), /* xdp paged memory is under
 						   * pressure
 						   */
+	XDP_FLAGS_META_AREA		= BIT(2), /* storage area available */
+	XDP_FLAGS_META_RX_HASH		= BIT(3), /* hw rx hash */
+	XDP_FLAGS_META_RX_VLAN		= BIT(4), /* hw rx vlan */
+	XDP_FLAGS_META_RX_TS		= BIT(5), /* hw rx timestamp */
 };
 
 struct xdp_buff {
@@ -87,6 +107,24 @@ struct xdp_buff {
 	struct xdp_txq_info *txq;
 	u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/
 	u32 flags; /* supported values defined in xdp_buff_flags */
+	struct xdp_rx_meta *rx_meta; /* rx hw metadata pointer in the
+				      * buffer headroom
+				      */
+};
+
+struct xdp_frame {
+	void *data;
+	u32 len;
+	u32 headroom;
+	u32 metasize; /* uses lower 8-bits */
+	/* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time,
+	 * while mem_type is valid on remote CPU.
+	 */
+	enum xdp_mem_type mem_type:32;
+	struct net_device *dev_rx; /* used by cpumap */
+	u32 frame_sz;
+	u32 flags; /* supported values defined in xdp_buff_flags */
+	struct xdp_rx_meta rx_meta; /* rx hw metadata */
 };
 
 static __always_inline bool xdp_buff_has_frags(const struct xdp_buff *xdp)
@@ -133,6 +171,9 @@ xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start,
 	xdp->data = data;
 	xdp->data_end = data + data_len;
 	xdp->data_meta = meta_valid ? data : data + 1;
+	xdp->flags = (headroom < XDP_MIN_HEADROOM) ? 0 : XDP_FLAGS_META_AREA;
+	xdp->rx_meta = (void *)(hard_start +
+				offsetof(struct xdp_frame, rx_meta));
 }
 
 /* Reserve memory area at end-of data area.
@@ -253,20 +294,6 @@ static inline bool xdp_buff_add_frag(struct xdp_buff *xdp, netmem_ref netmem,
 	return true;
 }
 
-struct xdp_frame {
-	void *data;
-	u32 len;
-	u32 headroom;
-	u32 metasize; /* uses lower 8-bits */
-	/* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time,
-	 * while mem_type is valid on remote CPU.
-	 */
-	enum xdp_mem_type mem_type:32;
-	struct net_device *dev_rx; /* used by cpumap */
-	u32 frame_sz;
-	u32 flags; /* supported values defined in xdp_buff_flags */
-};
-
 static __always_inline bool xdp_frame_has_frags(const struct xdp_frame *frame)
 {
 	return !!(frame->flags & XDP_FLAGS_HAS_FRAGS);
@@ -355,6 +382,8 @@ void xdp_convert_frame_to_buff(const struct xdp_frame *frame,
 	xdp->data_meta = frame->data - frame->metasize;
 	xdp->frame_sz = frame->frame_sz;
 	xdp->flags = frame->flags;
+	xdp->rx_meta = xdp->data_hard_start +
+		       offsetof(struct xdp_frame, rx_meta);
 }
 
 static inline
diff --git a/net/core/xdp.c b/net/core/xdp.c
index f86eedad586a..61edbd424494 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -605,6 +605,7 @@ struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
 	xdpf->metasize = metasize;
 	xdpf->frame_sz = PAGE_SIZE;
 	xdpf->mem_type = MEM_TYPE_PAGE_ORDER0;
+	memcpy(&xdpf->rx_meta, xdp->rx_meta, sizeof(*xdp->rx_meta));
 
 	xsk_buff_free(xdp);
 	return xdpf;
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 25a76c5ce0f1..bdeb6a4ef7dc 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -569,7 +569,9 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
 
 	xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
 	xskb->xdp.data_meta = xskb->xdp.data;
-	xskb->xdp.flags = 0;
+	xskb->xdp.flags = XDP_FLAGS_META_AREA;
+	xskb->xdp.rx_meta = (void *)(xskb->xdp.data_hard_start +
+				     offsetof(struct xdp_frame, rx_meta));
 
 	if (pool->dev)
 		xp_dma_sync_for_device(pool, xskb->dma, pool->frame_len);



Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ