[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210414121226.2650-3-urezki@gmail.com>
Date: Wed, 14 Apr 2021 14:12:23 +0200
From: "Uladzislau Rezki (Sony)" <urezki@...il.com>
To: LKML <linux-kernel@...r.kernel.org>, RCU <rcu@...r.kernel.org>,
"Paul E . McKenney" <paulmck@...nel.org>
Cc: Michal Hocko <mhocko@...e.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Daniel Axtens <dja@...ens.net>,
Frederic Weisbecker <frederic@...nel.org>,
Neeraj Upadhyay <neeraju@...eaurora.org>,
Joel Fernandes <joel@...lfernandes.org>,
Peter Zijlstra <peterz@...radead.org>,
Thomas Gleixner <tglx@...utronix.de>,
"Theodore Y . Ts'o" <tytso@....edu>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Uladzislau Rezki <urezki@...il.com>,
Oleksiy Avramchenko <oleksiy.avramchenko@...ymobile.com>
Subject: [PATCH 3/6] kvfree_rcu: introduce "flags" variable
We have a few extra variables within kfree_rcu_cpu structure
which are control ones and behave as regular booleans. Instead
we can pack them into only one, define bit descriptions which
will represent an individual boolean state.
This reduces the size of the per-cpu kfree_rcu_cpu structure.
To access to the flags variable atomic bit operations are used.
Signed-off-by: Uladzislau Rezki (Sony) <urezki@...il.com>
---
kernel/rcu/tree.c | 61 ++++++++++++++++++++++++++++-------------------
1 file changed, 36 insertions(+), 25 deletions(-)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 1b0289fa1cdd..31ee820c3d9e 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3139,6 +3139,24 @@ struct kfree_rcu_cpu_work {
struct kfree_rcu_cpu *krcp;
};
+// The per-cpu kfree_rcu_cpu structure was initialized.
+// It is set only once when a system is up and running.
+#define KRC_INITIALIZED 0x1
+
+// Indicates that a page_cache_work has been initialized
+// and is about to be queued for execution. The flag is
+// cleared on exit of the worker function.
+#define KRC_CACHE_WORK_RUN 0x2
+
+// A page shrinker can ask for freeing extra pages to get
+// them available for other needs in a system. Usually it
+// happens under low memory condition, in that case hold
+// on a bit with page cache filling.
+#define KRC_DELAY_CACHE_FILL 0x4
+
+// Tracks whether a "monitor_work" delayed work is pending
+#define KRC_MONITOR_TODO 0x8
+
/**
* struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
* @head: List of kfree_rcu() objects not yet waiting for a grace period
@@ -3146,17 +3164,14 @@ struct kfree_rcu_cpu_work {
* @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
* @lock: Synchronize access to this structure
* @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
- * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
- * @initialized: The @rcu_work fields have been initialized
* @count: Number of objects for which GP not started
+ * @flags: Atomic flags which describe different states
* @bkvcache:
* A simple cache list that contains objects for reuse purpose.
* In order to save some per-cpu space the list is singular.
* Even though it is lockless an access has to be protected by the
* per-cpu lock.
* @page_cache_work: A work to refill the cache when it is empty
- * @backoff_page_cache_fill: Delay a cache filling
- * @work_in_progress: Indicates that page_cache_work is running
* @hrtimer: A hrtimer for scheduling a page_cache_work
* @nr_bkv_objs: number of allocated objects at @bkvcache.
*
@@ -3171,13 +3186,10 @@ struct kfree_rcu_cpu {
struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
raw_spinlock_t lock;
struct delayed_work monitor_work;
- bool monitor_todo;
- bool initialized;
int count;
+ unsigned long flags;
struct delayed_work page_cache_work;
- atomic_t backoff_page_cache_fill;
- atomic_t work_in_progress;
struct hrtimer hrtimer;
struct llist_head bkvcache;
@@ -3415,7 +3427,8 @@ static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp,
unsigned long flags)
{
// Attempt to start a new batch.
- krcp->monitor_todo = false;
+ clear_bit(KRC_MONITOR_TODO, &krcp->flags);
+
if (queue_kfree_rcu_work(krcp)) {
// Success! Our job is done here.
raw_spin_unlock_irqrestore(&krcp->lock, flags);
@@ -3423,7 +3436,7 @@ static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp,
}
// Previous RCU batch still in progress, try again later.
- krcp->monitor_todo = true;
+ set_bit(KRC_MONITOR_TODO, &krcp->flags);
schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
raw_spin_unlock_irqrestore(&krcp->lock, flags);
}
@@ -3439,7 +3452,7 @@ static void kfree_rcu_monitor(struct work_struct *work)
monitor_work.work);
raw_spin_lock_irqsave(&krcp->lock, flags);
- if (krcp->monitor_todo)
+ if (test_bit(KRC_MONITOR_TODO, &krcp->flags))
kfree_rcu_drain_unlock(krcp, flags);
else
raw_spin_unlock_irqrestore(&krcp->lock, flags);
@@ -3466,7 +3479,7 @@ static void fill_page_cache_func(struct work_struct *work)
bool pushed;
int i;
- nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
+ nr_pages = test_bit(KRC_DELAY_CACHE_FILL, &krcp->flags) ?
1 : rcu_min_cached_objs;
for (i = 0; i < nr_pages; i++) {
@@ -3485,16 +3498,16 @@ static void fill_page_cache_func(struct work_struct *work)
}
}
- atomic_set(&krcp->work_in_progress, 0);
- atomic_set(&krcp->backoff_page_cache_fill, 0);
+ clear_bit(KRC_CACHE_WORK_RUN, &krcp->flags);
+ clear_bit(KRC_DELAY_CACHE_FILL, &krcp->flags);
}
static void
run_page_cache_worker(struct kfree_rcu_cpu *krcp)
{
if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
- !atomic_xchg(&krcp->work_in_progress, 1)) {
- if (atomic_read(&krcp->backoff_page_cache_fill)) {
+ !test_and_set_bit(KRC_CACHE_WORK_RUN, &krcp->flags)) {
+ if (test_bit(KRC_DELAY_CACHE_FILL, &krcp->flags)) {
queue_delayed_work(system_wq,
&krcp->page_cache_work,
msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
@@ -3520,7 +3533,7 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
int idx;
*krcp = krc_this_cpu_lock(flags);
- if (unlikely(!(*krcp)->initialized))
+ if (unlikely(!test_bit(KRC_INITIALIZED, &(*krcp)->flags)))
return false;
idx = !!is_vmalloc_addr(ptr);
@@ -3628,10 +3641,8 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
// Set timer to drain after KFREE_DRAIN_JIFFIES.
if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
- !krcp->monitor_todo) {
- krcp->monitor_todo = true;
+ !test_and_set_bit(KRC_MONITOR_TODO, &krcp->flags))
schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
- }
unlock_return:
krc_this_cpu_unlock(krcp, flags);
@@ -3661,7 +3672,7 @@ kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
count += READ_ONCE(krcp->count);
count += READ_ONCE(krcp->nr_bkv_objs);
- atomic_set(&krcp->backoff_page_cache_fill, 1);
+ set_bit(KRC_DELAY_CACHE_FILL, &krcp->flags);
}
return count;
@@ -3681,7 +3692,7 @@ kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
count += drain_page_cache(krcp);
raw_spin_lock_irqsave(&krcp->lock, flags);
- if (krcp->monitor_todo)
+ if (test_bit(KRC_MONITOR_TODO, &krcp->flags))
kfree_rcu_drain_unlock(krcp, flags);
else
raw_spin_unlock_irqrestore(&krcp->lock, flags);
@@ -3712,11 +3723,11 @@ void __init kfree_rcu_scheduler_running(void)
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
raw_spin_lock_irqsave(&krcp->lock, flags);
- if (!krcp->head || krcp->monitor_todo) {
+ if (!krcp->head || test_and_set_bit(KRC_MONITOR_TODO, &krcp->flags)) {
raw_spin_unlock_irqrestore(&krcp->lock, flags);
continue;
}
- krcp->monitor_todo = true;
+
schedule_delayed_work_on(cpu, &krcp->monitor_work,
KFREE_DRAIN_JIFFIES);
raw_spin_unlock_irqrestore(&krcp->lock, flags);
@@ -4655,7 +4666,7 @@ static void __init kfree_rcu_batch_init(void)
INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
- krcp->initialized = true;
+ set_bit(KRC_INITIALIZED, &krcp->flags);
}
if (register_shrinker(&kfree_rcu_shrinker))
pr_err("Failed to register kfree_rcu() shrinker!\n");
--
2.20.1
Powered by blists - more mailing lists