lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 31 May 2016 14:50:15 -0600
From:	Keith Busch <keith.busch@...el.com>
To:	Tejun Heo <tj@...nel.org>, linux-mm@...ck.org,
	LKML <linux-kernel@...r.kernel.org>
Cc:	Keith Busch <keith.busch@...el.com>
Subject: [PATCH] mm/swap: lru drain on memory reclaim workqueue

This creates a system memory reclaim work queue and has lru_add_drain_all
use this new work queue. This allows memory reclaim work that invalidates
block devices to flush all lru add caches without triggering the
check_flush_dependency warning.

Signed-off-by: Keith Busch <keith.busch@...el.com>
---
This is similar to proposal a few months ago:

  https://patchwork.ozlabs.org/patch/574623/

The difference from this patch is this one uses a system workqueue so
others can use a memory reclaim workqueue without having to allocate
their own.

I didn't see any follow up on linux-mm on if lru_add_drain_per_cpu
should be using a WQ_MEM_RECLAIM set work queue, so sending a similar
patch since warnings are frequently being triggered.

 include/linux/workqueue.h | 1 +
 kernel/workqueue.c        | 5 ++++-
 mm/swap.c                 | 2 +-
 3 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index ca73c50..8c79e82 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -357,6 +357,7 @@ extern struct workqueue_struct *system_unbound_wq;
 extern struct workqueue_struct *system_freezable_wq;
 extern struct workqueue_struct *system_power_efficient_wq;
 extern struct workqueue_struct *system_freezable_power_efficient_wq;
+extern struct workqueue_struct *system_mem_wq;
 
 extern struct workqueue_struct *
 __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 5f5068e..7e9050a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -341,6 +341,8 @@ struct workqueue_struct *system_long_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_long_wq);
 struct workqueue_struct *system_unbound_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_unbound_wq);
+struct workqueue_struct *system_mem_wq __read_mostly;
+EXPORT_SYMBOL(system_mem_wq);
 struct workqueue_struct *system_freezable_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_freezable_wq);
 struct workqueue_struct *system_power_efficient_wq __read_mostly;
@@ -5574,6 +5576,7 @@ static int __init init_workqueues(void)
 	system_long_wq = alloc_workqueue("events_long", 0, 0);
 	system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
 					    WQ_UNBOUND_MAX_ACTIVE);
+	system_mem_wq = alloc_workqueue("events_mem_unbound", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
 	system_freezable_wq = alloc_workqueue("events_freezable",
 					      WQ_FREEZABLE, 0);
 	system_power_efficient_wq = alloc_workqueue("events_power_efficient",
@@ -5582,7 +5585,7 @@ static int __init init_workqueues(void)
 					      WQ_FREEZABLE | WQ_POWER_EFFICIENT,
 					      0);
 	BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
-	       !system_unbound_wq || !system_freezable_wq ||
+	       !system_mem_wq || !system_unbound_wq || !system_freezable_wq ||
 	       !system_power_efficient_wq ||
 	       !system_freezable_power_efficient_wq);
 
diff --git a/mm/swap.c b/mm/swap.c
index 03aacbc..ade23de 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -685,7 +685,7 @@ void lru_add_drain_all(void)
 		    pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
 		    need_activate_page_drain(cpu)) {
 			INIT_WORK(work, lru_add_drain_per_cpu);
-			schedule_work_on(cpu, work);
+			queue_work_on(cpu, system_mem_wq, work);
 			cpumask_set_cpu(cpu, &has_work);
 		}
 	}
-- 
2.7.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ