lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 15 Jun 2010 12:14:16 +0200
From:	Eric Dumazet <eric.dumazet@...il.com>
To:	David Miller <davem@...emloft.net>
Cc:	netdev@...r.kernel.org, bhutchings@...arflare.com,
	Nick Piggin <npiggin@...e.de>
Subject: [PATCH net-next-2.6] net: Introduce u64_stats_sync infrastructure

Le lundi 14 juin 2010 à 23:14 -0700, David Miller a écrit :
> From: Eric Dumazet <eric.dumazet@...il.com>
> Date: Mon, 14 Jun 2010 17:59:22 +0200
> 
> > Uses a seqcount_t to synchronize stat producer and consumer, for packets
> > and bytes counter, now u64 types.
> > 
> > (dropped counter being rarely used, stay a native "unsigned long" type)
> > 
> > No noticeable performance impact on x86, as it only adds two increments
> > per frame. It might be more expensive on arches where smp_wmb() is not
> > free.
> > 
> > Signed-off-by: Eric Dumazet <eric.dumazet@...il.com>
> 
> Applied, but I suspect we might end up eventually needing to
> abstract this kind of technique in a common place so other
> spots can use it.

Here is the followup patch to abstract things a bit, before upcoming
conversions.

Thanks !

[PATCH net-next-2.6] net: Introduce u64_stats_sync infrastructure

To properly implement 64bits network statistics on 32bit or 64bit hosts,
we provide one new type and four methods, to ease conversions.

Stats producer should use following template granted it already got an
exclusive access to counters (a previous lock is taken, or per cpu
data [used in a non preemptable context])

Let me repeat : stats producers must be serialized by other means before
using this template. Preemption must be disabled too.

u64_stats_update_begin(&stats->syncp);
stats->bytes += len;
stats->packets++;
u64_stats_update_end(&stats->syncp);

While a consumer should use following template to get consistent
snapshot :

u64 tbytes, tpackets;
unsigned int start;

do {
	start = u64_stats_fetch_begin(&stats->syncp);
	tbytes = stats->bytes;
	tpackets = stats->packets;
} while (u64_stats_fetch_retry(&stats->lock, syncp));

This patch uses this infrastructure in net loopback driver, instead of
specific one added in commit 6b10de38f0ef (loopback: Implement 64bit
stats on 32bit arches)

Suggested by David Miller

Signed-off-by: Eric Dumazet <eric.dumazet@...il.com>
CC: Nick Piggin <npiggin@...e.de>
---
 drivers/net/loopback.c    |   61 ++++++++----------------------------
 include/linux/netdevice.h |   50 +++++++++++++++++++++++++++++
 2 files changed, 65 insertions(+), 46 deletions(-)

diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 09334f8..f20b156 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -60,51 +60,12 @@
 #include <net/net_namespace.h>
 
 struct pcpu_lstats {
-	u64 packets;
-	u64 bytes;
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-	seqcount_t seq;
-#endif
-	unsigned long drops;
+	u64			packets;
+	u64			bytes;
+	struct u64_stats_sync	syncp;
+	unsigned long		drops;
 };
 
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-static void inline lstats_update_begin(struct pcpu_lstats *lstats)
-{
-	write_seqcount_begin(&lstats->seq);
-}
-static void inline lstats_update_end(struct pcpu_lstats *lstats)
-{
-	write_seqcount_end(&lstats->seq);
-}
-static void inline lstats_fetch_and_add(u64 *packets, u64 *bytes, const struct pcpu_lstats *lstats)
-{
-	u64 tpackets, tbytes;
-	unsigned int seq;
-
-	do {
-		seq = read_seqcount_begin(&lstats->seq);
-		tpackets = lstats->packets;
-		tbytes = lstats->bytes;
-	} while (read_seqcount_retry(&lstats->seq, seq));
-
-	*packets += tpackets;
-	*bytes += tbytes;
-}
-#else
-static void inline lstats_update_begin(struct pcpu_lstats *lstats)
-{
-}
-static void inline lstats_update_end(struct pcpu_lstats *lstats)
-{
-}
-static void inline lstats_fetch_and_add(u64 *packets, u64 *bytes, const struct pcpu_lstats *lstats)
-{
-	*packets += lstats->packets;
-	*bytes += lstats->bytes;
-}
-#endif
-
 /*
  * The higher levels take care of making this non-reentrant (it's
  * called with bh's disabled).
@@ -126,10 +87,10 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
 
 	len = skb->len;
 	if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
-		lstats_update_begin(lb_stats);
+		u64_stats_update_begin(&lb_stats->syncp);
 		lb_stats->bytes += len;
 		lb_stats->packets++;
-		lstats_update_end(lb_stats);
+		u64_stats_update_end(&lb_stats->syncp);
 	} else
 		lb_stats->drops++;
 
@@ -148,10 +109,18 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev)
 	pcpu_lstats = (void __percpu __force *)dev->ml_priv;
 	for_each_possible_cpu(i) {
 		const struct pcpu_lstats *lb_stats;
+		u64 tbytes, tpackets;
+		unsigned int start;
 
 		lb_stats = per_cpu_ptr(pcpu_lstats, i);
-		lstats_fetch_and_add(&packets, &bytes, lb_stats);
+		do {
+			start = u64_stats_fetch_begin(&lb_stats->syncp);
+			tbytes = lb_stats->bytes;
+			tpackets = lb_stats->packets;
+		} while (u64_stats_fetch_retry(&lb_stats->syncp, start));
 		drops   += lb_stats->drops;
+		bytes   += tbytes;
+		packets += tpackets;
 	}
 	stats->rx_packets = packets;
 	stats->tx_packets = packets;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 4fbccc5..dd1d93d 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -174,6 +174,56 @@ static inline bool dev_xmit_complete(int rc)
 #define NET_DEVICE_STATS_DEFINE(name)	unsigned long pad_ ## name, name
 #endif
 
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+struct u64_stats_sync {
+	seqcount_t	seq;
+};
+
+static void inline u64_stats_update_begin(struct u64_stats_sync *syncp)
+{
+	write_seqcount_begin(&syncp->seq);
+}
+
+static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
+{
+	write_seqcount_end(&syncp->seq);
+}
+
+static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+{
+	return read_seqcount_begin(&syncp->seq);
+}
+
+static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+					 unsigned int start)
+{
+	return read_seqcount_retry(&syncp->seq, start);
+}
+
+#else
+struct u64_stats_sync {
+};
+
+static void inline u64_stats_update_begin(struct u64_stats_sync *syncp)
+{
+}
+
+static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
+{
+}
+
+static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+{
+	return 0;
+}
+
+static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+					 unsigned int start)
+{
+	return false;
+}
+#endif
+
 struct net_device_stats {
 	NET_DEVICE_STATS_DEFINE(rx_packets);
 	NET_DEVICE_STATS_DEFINE(tx_packets);


--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ