[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210414121226.2650-2-urezki@gmail.com>
Date: Wed, 14 Apr 2021 14:12:22 +0200
From: "Uladzislau Rezki (Sony)" <urezki@...il.com>
To: LKML <linux-kernel@...r.kernel.org>, RCU <rcu@...r.kernel.org>,
"Paul E . McKenney" <paulmck@...nel.org>
Cc: Michal Hocko <mhocko@...e.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Daniel Axtens <dja@...ens.net>,
Frederic Weisbecker <frederic@...nel.org>,
Neeraj Upadhyay <neeraju@...eaurora.org>,
Joel Fernandes <joel@...lfernandes.org>,
Peter Zijlstra <peterz@...radead.org>,
Thomas Gleixner <tglx@...utronix.de>,
"Theodore Y . Ts'o" <tytso@....edu>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Uladzislau Rezki <urezki@...il.com>,
Oleksiy Avramchenko <oleksiy.avramchenko@...ymobile.com>
Subject: [PATCH 2/6] kvfree_rcu: use [READ/WRITE]_ONCE() macros to access to nr_bkv_objs
nr_bkv_objs represents the counter of objects in the page-cache.
Accessing to it requires taking the lock. Switch to READ_ONCE()
WRITE_ONCE() macros to provide an atomic access to that counter.
A shrinker is one of the user of it.
Signed-off-by: Uladzislau Rezki (Sony) <urezki@...il.com>
---
kernel/rcu/tree.c | 18 ++++++------------
1 file changed, 6 insertions(+), 12 deletions(-)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 8b74edcd11d4..1b0289fa1cdd 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3220,10 +3220,10 @@ krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
static inline struct kvfree_rcu_bulk_data *
get_cached_bnode(struct kfree_rcu_cpu *krcp)
{
- if (!krcp->nr_bkv_objs)
+ if (!READ_ONCE(krcp->nr_bkv_objs))
return NULL;
- krcp->nr_bkv_objs--;
+ WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1);
return (struct kvfree_rcu_bulk_data *)
llist_del_first(&krcp->bkvcache);
}
@@ -3233,13 +3233,12 @@ put_cached_bnode(struct kfree_rcu_cpu *krcp,
struct kvfree_rcu_bulk_data *bnode)
{
// Check the limit.
- if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
+ if (READ_ONCE(krcp->nr_bkv_objs) >= rcu_min_cached_objs)
return false;
llist_add((struct llist_node *) bnode, &krcp->bkvcache);
- krcp->nr_bkv_objs++;
+ WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1);
return true;
-
}
static int
@@ -3251,7 +3250,7 @@ drain_page_cache(struct kfree_rcu_cpu *krcp)
raw_spin_lock_irqsave(&krcp->lock, flags);
page_list = llist_del_all(&krcp->bkvcache);
- krcp->nr_bkv_objs = 0;
+ WRITE_ONCE(krcp->nr_bkv_objs, 0);
raw_spin_unlock_irqrestore(&krcp->lock, flags);
llist_for_each_safe(pos, n, page_list) {
@@ -3655,18 +3654,13 @@ kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
int cpu;
unsigned long count = 0;
- unsigned long flags;
/* Snapshot count of all CPUs */
for_each_possible_cpu(cpu) {
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
count += READ_ONCE(krcp->count);
-
- raw_spin_lock_irqsave(&krcp->lock, flags);
- count += krcp->nr_bkv_objs;
- raw_spin_unlock_irqrestore(&krcp->lock, flags);
-
+ count += READ_ONCE(krcp->nr_bkv_objs);
atomic_set(&krcp->backoff_page_cache_fill, 1);
}
--
2.20.1
Powered by blists - more mailing lists