[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191108002722.129055-8-edumazet@google.com>
Date: Thu, 7 Nov 2019 16:27:20 -0800
From: Eric Dumazet <edumazet@...gle.com>
To: "David S . Miller" <davem@...emloft.net>
Cc: netdev <netdev@...r.kernel.org>,
Eric Dumazet <edumazet@...gle.com>,
Eric Dumazet <eric.dumazet@...il.com>
Subject: [PATCH net-next 7/9] u64_stats: provide u64_stats_t type
On 64bit arches, struct u64_stats_sync is empty and provides
no help against load/store tearing.
Using READ_ONCE()/WRITE_ONCE() would be needed.
But the update side would be slightly more expensive.
local64_t was defined so that we could use regular adds
in a manner which is atomic wrt IRQs.
However the u64_stats infra means we do not have to use
local64_t on 32bit arches since the syncp provides the needed
protection.
Signed-off-by: Eric Dumazet <edumazet@...gle.com>
---
include/linux/u64_stats_sync.h | 51 +++++++++++++++++++++++++++++++---
1 file changed, 47 insertions(+), 4 deletions(-)
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index a27604f99ed044d3492395c0328f41538234d7dc..9de5c10293f593fac6f64458d78f413c3c9fe26c 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -40,8 +40,8 @@
* spin_lock_bh(...) or other synchronization to get exclusive access
* ...
* u64_stats_update_begin(&stats->syncp);
- * stats->bytes64 += len; // non atomic operation
- * stats->packets64++; // non atomic operation
+ * u64_stats_add(&stats->bytes64, len); // non atomic operation
+ * u64_stats_inc(&stats->packets64); // non atomic operation
* u64_stats_update_end(&stats->syncp);
*
* While a consumer (reader) should use following template to get consistent
@@ -52,8 +52,8 @@
*
* do {
* start = u64_stats_fetch_begin(&stats->syncp);
- * tbytes = stats->bytes64; // non atomic operation
- * tpackets = stats->packets64; // non atomic operation
+ * tbytes = u64_stats_read(&stats->bytes64); // non atomic operation
+ * tpackets = u64_stats_read(&stats->packets64); // non atomic operation
* } while (u64_stats_fetch_retry(&stats->syncp, start));
*
*
@@ -68,6 +68,49 @@ struct u64_stats_sync {
#endif
};
+#if BITS_PER_LONG == 64
+#include <asm/local64.h>
+
+typedef struct {
+ local64_t v;
+} u64_stats_t ;
+
+static inline u64 u64_stats_read(const u64_stats_t *p)
+{
+ return local64_read(&p->v);
+}
+
+static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
+{
+ local64_add(val, &p->v);
+}
+
+static inline void u64_stats_inc(u64_stats_t *p)
+{
+ local64_inc(&p->v);
+}
+
+#else
+
+typedef struct {
+ u64 v;
+} u64_stats_t;
+
+static inline u64 u64_stats_read(const u64_stats_t *p)
+{
+ return p->v;
+}
+
+static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
+{
+ p->v += val;
+}
+
+static inline void u64_stats_inc(u64_stats_t *p)
+{
+ p->v++;
+}
+#endif
static inline void u64_stats_init(struct u64_stats_sync *syncp)
{
--
2.24.0.432.g9d3f5f5b63-goog
Powered by blists - more mailing lists