[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8f86083b-c452-95d4-365b-f16a2e4ebcd4@google.com>
Date: Wed, 11 Oct 2023 21:40:09 -0700 (PDT)
From: Hugh Dickins <hughd@...gle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
cc: Dave Chinner <david@...morbit.com>,
Tim Chen <tim.c.chen@...el.com>,
Dave Chinner <dchinner@...hat.com>,
"Darrick J. Wong" <djwong@...nel.org>,
Christian Brauner <brauner@...nel.org>,
Carlos Maiolino <cem@...nel.org>,
Chuck Lever <chuck.lever@...cle.com>, Jan Kara <jack@...e.cz>,
Matthew Wilcox <willy@...radead.org>,
Johannes Weiner <hannes@...xchg.org>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Dennis Zhou <dennisszhou@...il.com>,
linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Subject: [PATCH 9/8] percpu_counter: extend _limited_add() to negative
amounts
Though tmpfs does not need it, percpu_counter_limited_add() can be twice
as useful if it works sensibly with negative amounts (subs) - typically
decrements towards a limit of 0 or nearby: as suggested by Dave Chinner.
And in the course of that reworking, skip the percpu counter sum if it is
already obvious that the limit would be passed: as suggested by Tim Chen.
Extend the comment above __percpu_counter_limited_add(), defining the
behaviour with positive and negative amounts, allowing negative limits,
but not bothering about overflow beyond S64_MAX.
Signed-off-by: Hugh Dickins <hughd@...gle.com>
---
include/linux/percpu_counter.h | 11 +++++--
lib/percpu_counter.c | 54 +++++++++++++++++++++++++---------
2 files changed, 49 insertions(+), 16 deletions(-)
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 8cb7c071bd5c..3a44dd1e33d2 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -198,14 +198,21 @@ static inline bool
percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount)
{
unsigned long flags;
+ bool good = false;
s64 count;
+ if (amount == 0)
+ return true;
+
local_irq_save(flags);
count = fbc->count + amount;
- if (count <= limit)
+ if ((amount > 0 && count <= limit) ||
+ (amount < 0 && count >= limit)) {
fbc->count = count;
+ good = true;
+ }
local_irq_restore(flags);
- return count <= limit;
+ return good;
}
/* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 58a3392f471b..44dd133594d4 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -279,8 +279,16 @@ int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
EXPORT_SYMBOL(__percpu_counter_compare);
/*
- * Compare counter, and add amount if the total is within limit.
- * Return true if amount was added, false if it would exceed limit.
+ * Compare counter, and add amount if total is: less than or equal to limit if
+ * amount is positive, or greater than or equal to limit if amount is negative.
+ * Return true if amount is added, or false if total would be beyond the limit.
+ *
+ * Negative limit is allowed, but unusual.
+ * When negative amounts (subs) are given to percpu_counter_limited_add(),
+ * the limit would most naturally be 0 - but other limits are also allowed.
+ *
+ * Overflow beyond S64_MAX is not allowed for: counter, limit and amount
+ * are all assumed to be sane (far from S64_MIN and S64_MAX).
*/
bool __percpu_counter_limited_add(struct percpu_counter *fbc,
s64 limit, s64 amount, s32 batch)
@@ -288,10 +296,10 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc,
s64 count;
s64 unknown;
unsigned long flags;
- bool good;
+ bool good = false;
- if (amount > limit)
- return false;
+ if (amount == 0)
+ return true;
local_irq_save(flags);
unknown = batch * num_online_cpus();
@@ -299,7 +307,8 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc,
/* Skip taking the lock when safe */
if (abs(count + amount) <= batch &&
- fbc->count + unknown <= limit) {
+ ((amount > 0 && fbc->count + unknown <= limit) ||
+ (amount < 0 && fbc->count - unknown >= limit))) {
this_cpu_add(*fbc->counters, amount);
local_irq_restore(flags);
return true;
@@ -309,7 +318,19 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc,
count = fbc->count + amount;
/* Skip percpu_counter_sum() when safe */
- if (count + unknown > limit) {
+ if (amount > 0) {
+ if (count - unknown > limit)
+ goto out;
+ if (count + unknown <= limit)
+ good = true;
+ } else {
+ if (count + unknown < limit)
+ goto out;
+ if (count - unknown >= limit)
+ good = true;
+ }
+
+ if (!good) {
s32 *pcount;
int cpu;
@@ -317,15 +338,20 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc,
pcount = per_cpu_ptr(fbc->counters, cpu);
count += *pcount;
}
+ if (amount > 0) {
+ if (count > limit)
+ goto out;
+ } else {
+ if (count < limit)
+ goto out;
+ }
+ good = true;
}
- good = count <= limit;
- if (good) {
- count = __this_cpu_read(*fbc->counters);
- fbc->count += count + amount;
- __this_cpu_sub(*fbc->counters, count);
- }
-
+ count = __this_cpu_read(*fbc->counters);
+ fbc->count += count + amount;
+ __this_cpu_sub(*fbc->counters, count);
+out:
raw_spin_unlock(&fbc->lock);
local_irq_restore(flags);
return good;
--
2.35.3
Powered by blists - more mailing lists