lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180723143609.2242-4-toshiaki.makita1@gmail.com>
Date:   Mon, 23 Jul 2018 23:36:06 +0900
From:   Toshiaki Makita <toshiaki.makita1@...il.com>
To:     "Michael S. Tsirkin" <mst@...hat.com>,
        Jason Wang <jasowang@...hat.com>,
        "David S. Miller" <davem@...emloft.net>
Cc:     Toshiaki Makita <makita.toshiaki@....ntt.co.jp>,
        netdev@...r.kernel.org, virtualization@...ts.linux-foundation.org
Subject: [PATCH net-next 3/6] virtio_net: Make drop counter per-queue

From: Toshiaki Makita <makita.toshiaki@....ntt.co.jp>

Since when XDP was introduced, drop counter has been able to be updated
much more frequently than before, as XDP_DROP increments the counter.
Thus for performance analysis per-queue drop counter would be useful.

Also this avoids cache contention and race on updating the counter. It
is currently racy because napi handlers read-modify-write it without any
locks.

There are more counters in dev->stats that are racy, but I left them
per-device, because they are rarely updated and does not worth being
per-queue counters IMHO. To fix them we need atomic ops or some kind of
locks.

Signed-off-by: Toshiaki Makita <makita.toshiaki@....ntt.co.jp>
---
 drivers/net/virtio_net.c | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d03bfc4fce8e..7a47ce750a43 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -87,6 +87,7 @@ struct virtnet_sq_stats {
 struct virtnet_rq_stat_items {
 	u64 packets;
 	u64 bytes;
+	u64 drops;
 };
 
 struct virtnet_rq_stats {
@@ -109,6 +110,7 @@ static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
 static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
 	{ "packets",	VIRTNET_RQ_STAT(packets) },
 	{ "bytes",	VIRTNET_RQ_STAT(bytes) },
+	{ "drops",	VIRTNET_RQ_STAT(drops) },
 };
 
 #define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
@@ -705,7 +707,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
 
 err_xdp:
 	rcu_read_unlock();
-	dev->stats.rx_dropped++;
+	stats->rx.drops++;
 	put_page(page);
 xdp_xmit:
 	return NULL;
@@ -728,7 +730,7 @@ static struct sk_buff *receive_big(struct net_device *dev,
 	return skb;
 
 err:
-	dev->stats.rx_dropped++;
+	stats->rx.drops++;
 	give_pages(rq, page);
 	return NULL;
 }
@@ -952,7 +954,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
 		put_page(page);
 	}
 err_buf:
-	dev->stats.rx_dropped++;
+	stats->rx.drops++;
 	dev_kfree_skb(head_skb);
 xdp_xmit:
 	return NULL;
@@ -1632,7 +1634,7 @@ static void virtnet_stats(struct net_device *dev,
 	int i;
 
 	for (i = 0; i < vi->max_queue_pairs; i++) {
-		u64 tpackets, tbytes, rpackets, rbytes;
+		u64 tpackets, tbytes, rpackets, rbytes, rdrops;
 		struct receive_queue *rq = &vi->rq[i];
 		struct send_queue *sq = &vi->sq[i];
 
@@ -1646,17 +1648,18 @@ static void virtnet_stats(struct net_device *dev,
 			start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
 			rpackets = rq->stats.items.packets;
 			rbytes   = rq->stats.items.bytes;
+			rdrops   = rq->stats.items.drops;
 		} while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
 
 		tot->rx_packets += rpackets;
 		tot->tx_packets += tpackets;
 		tot->rx_bytes   += rbytes;
 		tot->tx_bytes   += tbytes;
+		tot->rx_dropped += rdrops;
 	}
 
 	tot->tx_dropped = dev->stats.tx_dropped;
 	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
-	tot->rx_dropped = dev->stats.rx_dropped;
 	tot->rx_length_errors = dev->stats.rx_length_errors;
 	tot->rx_frame_errors = dev->stats.rx_frame_errors;
 }
-- 
2.14.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ