lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 24 Apr 2018 23:39:22 +0900
From:   Toshiaki Makita <toshiaki.makita1@...il.com>
To:     netdev@...r.kernel.org
Cc:     Toshiaki Makita <makita.toshiaki@....ntt.co.jp>
Subject: [PATCH RFC 8/9] veth: Avoid per-packet spinlock of XDP napi ring on dequeueing

From: Toshiaki Makita <makita.toshiaki@....ntt.co.jp>

Use percpu temporary storage to avoid per-packet spinlock.

Signed-off-by: Toshiaki Makita <makita.toshiaki@....ntt.co.jp>
---
 drivers/net/veth.c | 46 +++++++++++++++++++++++++++-------------------
 1 file changed, 27 insertions(+), 19 deletions(-)

diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 98fc91a64e29..1592119e3873 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -30,6 +30,7 @@
 #define VETH_XDP_FLAG		0x1UL
 #define VETH_RING_SIZE		256
 #define VETH_XDP_HEADROOM	(XDP_PACKET_HEADROOM + NET_IP_ALIGN)
+#define VETH_XDP_QUEUE_SIZE	NAPI_POLL_WEIGHT
 
 struct pcpu_vstats {
 	u64			packets;
@@ -50,6 +51,8 @@ struct veth_priv {
 	struct xdp_rxq_info	xdp_rxq;
 };
 
+static DEFINE_PER_CPU(void *[VETH_XDP_QUEUE_SIZE], xdp_consume_q);
+
 static bool veth_is_xdp_frame(void *ptr)
 {
 	return (unsigned long)ptr & VETH_XDP_FLAG;
@@ -563,27 +566,32 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv,
 static int veth_xdp_rcv(struct veth_priv *priv, int budget, bool *xdp_xmit,
 			bool *xdp_redir)
 {
-	int i, done = 0;
-
-	for (i = 0; i < budget; i++) {
-		void *ptr = ptr_ring_consume(&priv->xdp_ring);
-		struct sk_buff *skb;
-
-		if (!ptr)
-			break;
+	void **q = this_cpu_ptr(xdp_consume_q);
+	int num, lim, done = 0;
+
+	do {
+		int i;
+
+		lim = min(budget - done, VETH_XDP_QUEUE_SIZE);
+		num = ptr_ring_consume_batched(&priv->xdp_ring, q, lim);
+		for (i = 0; i < num; i++) {
+			struct sk_buff *skb;
+			void *ptr = q[i];
+
+			if (veth_is_xdp_frame(ptr)) {
+				skb = veth_xdp_rcv_one(priv,
+						       veth_ptr_to_xdp(ptr),
+						       xdp_xmit, xdp_redir);
+			} else {
+				skb = veth_xdp_rcv_skb(priv, ptr, xdp_xmit,
+						       xdp_redir);
+			}
 
-		if (veth_is_xdp_frame(ptr)) {
-			skb = veth_xdp_rcv_one(priv, veth_ptr_to_xdp(ptr),
-					       xdp_xmit, xdp_redir);
-		} else {
-			skb = veth_xdp_rcv_skb(priv, ptr, xdp_xmit, xdp_redir);
+			if (skb)
+				napi_gro_receive(&priv->xdp_napi, skb);
 		}
-
-		if (skb)
-			napi_gro_receive(&priv->xdp_napi, skb);
-
-		done++;
-	}
+		done += num;
+	} while (unlikely(num == lim && done < budget));
 
 	return done;
 }
-- 
2.14.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ