[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220913192538.3023708-2-jiebin.sun@intel.com>
Date: Wed, 14 Sep 2022 03:25:37 +0800
From: Jiebin Sun <jiebin.sun@...el.com>
To: akpm@...ux-foundation.org, vasily.averin@...ux.dev,
shakeelb@...gle.com, dennis@...nel.org, tj@...nel.org,
cl@...ux.com, ebiederm@...ssion.com, legion@...nel.org,
manfred@...orfullife.com, alexander.mikhalitsyn@...tuozzo.com,
linux-mm@...ck.org, linux-kernel@...r.kernel.org
Cc: tim.c.chen@...el.com, feng.tang@...el.com, ying.huang@...el.com,
tianyou.li@...el.com, wangyang.guo@...el.com, jiebin.sun@...el.com,
Tim Chen <tim.c.chen@...ux.intel.com>,
kernel test robot <lkp@...el.com>
Subject: [PATCH v6 1/2] percpu: Add percpu_counter_add_local and percpu_counter_sub_local
The batch size in percpu_counter_add_batch should be very large in
heavy writing and rare reading case. Add the "_local" version, and
mostly it will do local adding, reduce the global updating and
mitigate lock contention in writing.
Signed-off-by: Jiebin Sun <jiebin.sun@...el.com>
Reviewed-by: Tim Chen <tim.c.chen@...ux.intel.com>
Reported-by: kernel test robot <lkp@...el.com>
---
include/linux/percpu_counter.h | 32 ++++++++++++++++++++++++++++++++
1 file changed, 32 insertions(+)
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 01861eebed79..8ed5fba6d156 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -15,6 +15,9 @@
#include <linux/types.h>
#include <linux/gfp.h>
+/* percpu_counter batch for local add or sub */
+#define PERCPU_COUNTER_LOCAL_BATCH INT_MAX
+
#ifdef CONFIG_SMP
struct percpu_counter {
@@ -56,6 +59,22 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
}
+/*
+ * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
+ * are accumulated in local per cpu counter and not in fbc->count until
+ * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
+ * write efficient.
+ * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
+ * used to add up the counts from each CPU to account for all the local
+ * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
+ * should be used when a counter is updated frequently and read rarely.
+ */
+static inline void
+percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH);
+}
+
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
{
s64 ret = __percpu_counter_sum(fbc);
@@ -138,6 +157,13 @@ percpu_counter_add(struct percpu_counter *fbc, s64 amount)
preempt_enable();
}
+/* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
+static inline void
+percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add(fbc, amount);
+}
+
static inline void
percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
{
@@ -193,4 +219,10 @@ static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
percpu_counter_add(fbc, -amount);
}
+static inline void
+percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add_local(fbc, -amount);
+}
+
#endif /* _LINUX_PERCPU_COUNTER_H */
--
2.31.1
Powered by blists - more mailing lists