[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210207124432.10143-1-qiang.zhang@windriver.com>
Date: Sun, 7 Feb 2021 20:44:32 +0800
From: qiang.zhang@...driver.com
To: urezki@...il.com, paulmck@...nel.org, joel@...lfernandes.org
Cc: rcu@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH v4] kvfree_rcu: Release page cache under memory pressure
From: Zqiang <qiang.zhang@...driver.com>
Add free per-cpu existing krcp's page cache operation, when
the system is under memory pressure.
Signed-off-by: Zqiang <qiang.zhang@...driver.com>
Co-developed-by: Uladzislau Rezki (Sony) <urezki@...il.com>
---
v1->v2->v3->v4:
During the test a page shrinker is pretty active, because of low memory
condition. callback drains it whereas kvfree_rcu() part refill it right
away making kind of vicious circle.
Through Vlad Rezki suggestion, to avoid this, schedule a periodic delayed
work with HZ, and it's easy to do that.
kernel/rcu/tree.c | 50 +++++++++++++++++++++++++++++++++++++++--------
1 file changed, 42 insertions(+), 8 deletions(-)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index c1ae1e52f638..f3b772eef468 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3139,7 +3139,7 @@ struct kfree_rcu_cpu {
bool initialized;
int count;
- struct work_struct page_cache_work;
+ struct delayed_work page_cache_work;
atomic_t work_in_progress;
struct hrtimer hrtimer;
@@ -3395,7 +3395,7 @@ schedule_page_work_fn(struct hrtimer *t)
struct kfree_rcu_cpu *krcp =
container_of(t, struct kfree_rcu_cpu, hrtimer);
- queue_work(system_highpri_wq, &krcp->page_cache_work);
+ queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
return HRTIMER_NORESTART;
}
@@ -3404,7 +3404,7 @@ static void fill_page_cache_func(struct work_struct *work)
struct kvfree_rcu_bulk_data *bnode;
struct kfree_rcu_cpu *krcp =
container_of(work, struct kfree_rcu_cpu,
- page_cache_work);
+ page_cache_work.work);
unsigned long flags;
bool pushed;
int i;
@@ -3428,15 +3428,22 @@ static void fill_page_cache_func(struct work_struct *work)
atomic_set(&krcp->work_in_progress, 0);
}
+static bool backoff_page_cache_fill;
+
static void
run_page_cache_worker(struct kfree_rcu_cpu *krcp)
{
if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
!atomic_xchg(&krcp->work_in_progress, 1)) {
- hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- krcp->hrtimer.function = schedule_page_work_fn;
- hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
+ if (READ_ONCE(backoff_page_cache_fill)) {
+ queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, HZ);
+ WRITE_ONCE(backoff_page_cache_fill, false);
+ } else {
+ hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ krcp->hrtimer.function = schedule_page_work_fn;
+ hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
+ }
}
}
@@ -3571,19 +3578,44 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
}
EXPORT_SYMBOL_GPL(kvfree_call_rcu);
+static int free_krc_page_cache(struct kfree_rcu_cpu *krcp)
+{
+ unsigned long flags;
+ struct llist_node *page_list, *pos, *n;
+ int freed = 0;
+
+ raw_spin_lock_irqsave(&krcp->lock, flags);
+ page_list = llist_del_all(&krcp->bkvcache);
+ krcp->nr_bkv_objs = 0;
+ raw_spin_unlock_irqrestore(&krcp->lock, flags);
+
+ llist_for_each_safe(pos, n, page_list) {
+ free_page((unsigned long)pos);
+ freed++;
+ }
+
+ return freed;
+}
+
static unsigned long
kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
int cpu;
unsigned long count = 0;
+ unsigned long flags;
/* Snapshot count of all CPUs */
for_each_possible_cpu(cpu) {
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
count += READ_ONCE(krcp->count);
+
+ raw_spin_lock_irqsave(&krcp->lock, flags);
+ count += krcp->nr_bkv_objs;
+ raw_spin_unlock_irqrestore(&krcp->lock, flags);
}
+ WRITE_ONCE(backoff_page_cache_fill, true);
return count;
}
@@ -3598,6 +3630,8 @@ kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
count = krcp->count;
+ count += free_krc_page_cache(krcp);
+
raw_spin_lock_irqsave(&krcp->lock, flags);
if (krcp->monitor_todo)
kfree_rcu_drain_unlock(krcp, flags);
@@ -4574,7 +4608,7 @@ static void __init kfree_rcu_batch_init(void)
}
INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
- INIT_WORK(&krcp->page_cache_work, fill_page_cache_func);
+ INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
krcp->initialized = true;
}
if (register_shrinker(&kfree_rcu_shrinker))
--
2.17.1
Powered by blists - more mailing lists