lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210330231528.546284-3-alobakin@pm.me>
Date:   Tue, 30 Mar 2021 23:15:59 +0000
From:   Alexander Lobakin <alobakin@...me>
To:     Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>
Cc:     Xuan Zhuo <xuanzhuo@...ux.alibaba.com>,
        Björn Töpel <bjorn@...nel.org>,
        Magnus Karlsson <magnus.karlsson@...el.com>,
        Jonathan Lemon <jonathan.lemon@...il.com>,
        "David S. Miller" <davem@...emloft.net>,
        Jakub Kicinski <kuba@...nel.org>,
        Jesper Dangaard Brouer <hawk@...nel.org>,
        John Fastabend <john.fastabend@...il.com>,
        Andrii Nakryiko <andrii@...nel.org>,
        Martin KaFai Lau <kafai@...com>,
        Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
        KP Singh <kpsingh@...nel.org>,
        Alexander Lobakin <alobakin@...me>, netdev@...r.kernel.org,
        bpf@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH bpf-next 2/2] xsk: introduce generic almost-zerocopy xmit

The reasons behind IFF_TX_SKB_NO_LINEAR are:
 - most drivers expect skb with the linear space;
 - most drivers expect hard header in the linear space;
 - many drivers need some headroom to insert custom headers
   and/or pull headers from frags (pskb_may_pull() etc.).

With some bits of overhead, we can satisfy all of this without
inducing full buffer data copy.

Now frames that are no lesser than 128 bytes (to mitigate allocation
overhead) are also being built using zerocopy path (if the device and
driver support S/G xmit, which is almost always true).
We allocate 256* additional bytes for skb linear space and pull hard
header there (aligning its end by 16 bytes for platforms with
NET_IP_ALIGN). The rest of the buffer data is just pinned as frags.
A room of at least 242 bytes is left for any driver needs.

We could just pass the buffer to eth_get_headlen() to minimize
allocation overhead and be able to copy all the headers into the
linear space, but the flow dissection procedure tends to be more
expensive than the current approach.

IFF_TX_SKB_NO_LINEAR path remains unchanged and is still actual and
generally faster.

* The value of 256 bytes is kinda "magic", it can be found in lots
  of drivers and places of core code and it is believed that 256
  bytes are enough to store any headers of any frame.

Cc: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
Signed-off-by: Alexander Lobakin <alobakin@...me>
---
 net/xdp/xsk.c | 26 ++++++++++++++++++++++----
 1 file changed, 22 insertions(+), 4 deletions(-)

diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 41f8f21b3348..090ff9c096a3 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -445,6 +445,9 @@ static void xsk_destruct_skb(struct sk_buff *skb)
 	sock_wfree(skb);
 }

+#define XSK_SKB_HEADLEN		256
+#define XSK_COPY_THRESHOLD	(XSK_SKB_HEADLEN / 2)
+
 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
 					      struct xdp_desc *desc)
 {
@@ -452,13 +455,22 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
 	u32 hr, len, ts, offset, copy, copied;
 	struct sk_buff *skb;
 	struct page *page;
+	bool need_pull;
 	void *buffer;
 	int err, i;
 	u64 addr;

 	hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
+	len = hr;
+
+	need_pull = !(xs->dev->priv_flags & IFF_TX_SKB_NO_LINEAR);
+	if (need_pull) {
+		len += XSK_SKB_HEADLEN;
+		len += NET_IP_ALIGN;
+		hr += NET_IP_ALIGN;
+	}

-	skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
+	skb = sock_alloc_send_skb(&xs->sk, len, 1, &err);
 	if (unlikely(!skb))
 		return ERR_PTR(err);

@@ -488,6 +500,11 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
 	skb->data_len += len;
 	skb->truesize += ts;

+	if (need_pull && unlikely(!__pskb_pull_tail(skb, ETH_HLEN))) {
+		kfree_skb(skb);
+		return ERR_PTR(-ENOMEM);
+	}
+
 	refcount_add(ts, &xs->sk.sk_wmem_alloc);

 	return skb;
@@ -498,19 +515,20 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
 {
 	struct net_device *dev = xs->dev;
 	struct sk_buff *skb;
+	u32 len = desc->len;

-	if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
+	if ((dev->priv_flags & IFF_TX_SKB_NO_LINEAR) ||
+	    (len >= XSK_COPY_THRESHOLD && likely(dev->features & NETIF_F_SG))) {
 		skb = xsk_build_skb_zerocopy(xs, desc);
 		if (IS_ERR(skb))
 			return skb;
 	} else {
-		u32 hr, tr, len;
 		void *buffer;
+		u32 hr, tr;
 		int err;

 		hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
 		tr = dev->needed_tailroom;
-		len = desc->len;

 		skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
 		if (unlikely(!skb))
--
2.31.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ