lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <DM6PR11MB4107F132CC1203486A91A4DEDCCAA@DM6PR11MB4107.namprd11.prod.outlook.com>
Date:   Thu, 5 Oct 2023 16:50:26 +0000
From:   "Chen, Tim C" <tim.c.chen@...el.com>
To:     Hugh Dickins <hughd@...gle.com>,
        Andrew Morton <akpm@...ux-foundation.org>
CC:     Dave Chinner <dchinner@...hat.com>,
        "Darrick J. Wong" <djwong@...nel.org>,
        Christian Brauner <brauner@...nel.org>,
        Carlos Maiolino <cem@...nel.org>,
        Chuck Lever <chuck.lever@...cle.com>, Jan Kara <jack@...e.cz>,
        Matthew Wilcox <willy@...radead.org>,
        Johannes Weiner <hannes@...xchg.org>,
        Axel Rasmussen <axelrasmussen@...gle.com>,
        "linux-fsdevel@...r.kernel.org" <linux-fsdevel@...r.kernel.org>,
        "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
        "linux-mm@...ck.org" <linux-mm@...ck.org>
Subject: RE: [PATCH 8/8] shmem,percpu_counter: add _limited_add(fbc, limit,
 amount)

>Signed-off-by: Hugh Dickins <hughd@...gle.com>
>Cc: Tim Chen <tim.c.chen@...el.com>
>Cc: Dave Chinner <dchinner@...hat.com>
>Cc: Darrick J. Wong <djwong@...nel.org>
>---
>Tim, Dave, Darrick: I didn't want to waste your time on patches 1-7, which are
>just internal to shmem, and do not affect this patch (which applies to v6.6-rc
>and linux-next as is): but want to run this by you.
>
> include/linux/percpu_counter.h | 23 +++++++++++++++
> lib/percpu_counter.c           | 53 ++++++++++++++++++++++++++++++++++
> mm/shmem.c                     | 10 +++----
> 3 files changed, 81 insertions(+), 5 deletions(-)
>
>diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
>index d01351b1526f..8cb7c071bd5c 100644
>--- a/include/linux/percpu_counter.h
>+++ b/include/linux/percpu_counter.h
>@@ -57,6 +57,8 @@ void percpu_counter_add_batch(struct percpu_counter
>*fbc, s64 amount,
> 			      s32 batch);
> s64 __percpu_counter_sum(struct percpu_counter *fbc);  int
>__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
>+bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit,
>+				  s64 amount, s32 batch);
> void percpu_counter_sync(struct percpu_counter *fbc);
>
> static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
>@@ -69,6 +71,13 @@ static inline void percpu_counter_add(struct
>percpu_counter *fbc, s64 amount)
> 	percpu_counter_add_batch(fbc, amount, percpu_counter_batch);  }
>
>+static inline bool
>+percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64
>+amount) {
>+	return __percpu_counter_limited_add(fbc, limit, amount,
>+					    percpu_counter_batch);
>+}
>+
> /*
>  * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
>  * are accumulated in local per cpu counter and not in fbc->count until @@ -
>185,6 +194,20 @@ percpu_counter_add(struct percpu_counter *fbc, s64
>amount)
> 	local_irq_restore(flags);
> }
>
>+static inline bool
>+percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64
>+amount) {
>+	unsigned long flags;
>+	s64 count;
>+
>+	local_irq_save(flags);
>+	count = fbc->count + amount;
>+	if (count <= limit)
>+		fbc->count = count;
>+	local_irq_restore(flags);
>+	return count <= limit;
>+}
>+
> /* non-SMP percpu_counter_add_local is the same with percpu_counter_add
>*/  static inline void  percpu_counter_add_local(struct percpu_counter *fbc,
>s64 amount) diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index
>9073430dc865..58a3392f471b 100644
>--- a/lib/percpu_counter.c
>+++ b/lib/percpu_counter.c
>@@ -278,6 +278,59 @@ int __percpu_counter_compare(struct
>percpu_counter *fbc, s64 rhs, s32 batch)  }
>EXPORT_SYMBOL(__percpu_counter_compare);
>
>+/*
>+ * Compare counter, and add amount if the total is within limit.
>+ * Return true if amount was added, false if it would exceed limit.
>+ */
>+bool __percpu_counter_limited_add(struct percpu_counter *fbc,
>+				  s64 limit, s64 amount, s32 batch) {
>+	s64 count;
>+	s64 unknown;
>+	unsigned long flags;
>+	bool good;
>+
>+	if (amount > limit)
>+		return false;
>+
>+	local_irq_save(flags);
>+	unknown = batch * num_online_cpus();
>+	count = __this_cpu_read(*fbc->counters);
>+
>+	/* Skip taking the lock when safe */
>+	if (abs(count + amount) <= batch &&
>+	    fbc->count + unknown <= limit) {
>+		this_cpu_add(*fbc->counters, amount);
>+		local_irq_restore(flags);
>+		return true;
>+	}
>+
>+	raw_spin_lock(&fbc->lock);
>+	count = fbc->count + amount;
>+

Perhaps we can fast path the case where for sure
we will exceed limit? 

if (fbc->count + amount - unknown > limit)
	return false;
 
Tim

>+	/* Skip percpu_counter_sum() when safe */
>+	if (count + unknown > limit) {
>+		s32 *pcount;
>+		int cpu;
>+
>+		for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
>+			pcount = per_cpu_ptr(fbc->counters, cpu);
>+			count += *pcount;
>+		}
>+	}
>+
>+	good = count <= limit;
>+	if (good) {
>+		count = __this_cpu_read(*fbc->counters);
>+		fbc->count += count + amount;
>+		__this_cpu_sub(*fbc->counters, count);
>+	}
>+
>+	raw_spin_unlock(&fbc->lock);
>+	local_irq_restore(flags);
>+	return good;
>+}
>+
> static int __init percpu_counter_startup(void)  {
> 	int ret;
>diff --git a/mm/shmem.c b/mm/shmem.c
>index 4f4ab26bc58a..7cb72c747954 100644
>--- a/mm/shmem.c
>+++ b/mm/shmem.c
>@@ -217,15 +217,15 @@ static int shmem_inode_acct_blocks(struct inode
>*inode, long pages)
>
> 	might_sleep();	/* when quotas */
> 	if (sbinfo->max_blocks) {
>-		if (percpu_counter_compare(&sbinfo->used_blocks,
>-					   sbinfo->max_blocks - pages) > 0)
>+		if (!percpu_counter_limited_add(&sbinfo->used_blocks,
>+						sbinfo->max_blocks, pages))
> 			goto unacct;
>
> 		err = dquot_alloc_block_nodirty(inode, pages);
>-		if (err)
>+		if (err) {
>+			percpu_counter_sub(&sbinfo->used_blocks, pages);
> 			goto unacct;
>-
>-		percpu_counter_add(&sbinfo->used_blocks, pages);
>+		}
> 	} else {
> 		err = dquot_alloc_block_nodirty(inode, pages);
> 		if (err)
>--
>2.35.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ