lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 25 Sep 2012 16:06:08 +0530
From:	Viresh Kumar <viresh.kumar@...aro.org>
To:	linux-kernel@...r.kernel.org
Cc:	pjt@...gle.com, paul.mckenney@...aro.org, tglx@...utronix.de,
	tj@...nel.org, suresh.b.siddha@...el.com, venki@...gle.com,
	mingo@...hat.com, peterz@...radead.org, robin.randhawa@....com,
	Steve.Bannister@....com, Arvind.Chauhan@....com,
	amit.kucheria@...aro.org, vincent.guittot@...aro.org,
	linaro-dev@...ts.linaro.org, patches@...aro.org,
	Viresh Kumar <viresh.kumar@...aro.org>
Subject: [PATCH 3/3] workqueue: Schedule work on non-idle cpu instead of current one

Workqueues queues work on current cpu, if the caller haven't passed a preferred
cpu. This may wake up an idle CPU, which is actually not required.

This work can be processed by any CPU and so we must select a non-idle CPU here.
This patch adds in support in workqueue framework to get preferred CPU details
from the scheduler, instead of using current CPU.

Signed-off-by: Viresh Kumar <viresh.kumar@...aro.org>
---
 arch/arm/Kconfig   | 11 +++++++++++
 kernel/workqueue.c | 25 ++++++++++++++++++-------
 2 files changed, 29 insertions(+), 7 deletions(-)

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5944511..da17bd0 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1594,6 +1594,17 @@ config HMP_SLOW_CPU_MASK
 	  Specify the cpuids of the slow CPUs in the system as a list string,
 	  e.g. cpuid 0+1 should be specified as 0-1.
 
+config MIGRATE_WQ
+	bool "(EXPERIMENTAL) Migrate Workqueues to non-idle cpu"
+	depends on SMP && EXPERIMENTAL
+	help
+	  Workqueues queues work on current cpu, if the caller haven't passed a
+	  preferred cpu. This may wake up an idle CPU, which is actually not
+	  required. This work can be processed by any CPU and so we must select
+	  a non-idle CPU here.  This patch adds in support in workqueue
+	  framework to get preferred CPU details from the scheduler, instead of
+	  using current CPU.
+
 config HAVE_ARM_SCU
 	bool
 	help
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 692a55b..fd8df4a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -456,6 +456,16 @@ static inline void debug_work_activate(struct work_struct *work) { }
 static inline void debug_work_deactivate(struct work_struct *work) { }
 #endif
 
+/* This enables migration of a work to a non-IDLE cpu instead of current cpu */
+#ifdef CONFIG_MIGRATE_WQ
+static int wq_select_cpu(void)
+{
+	return sched_select_cpu(SD_NUMA, -1);
+}
+#else
+#define wq_select_cpu()		smp_processor_id()
+#endif
+
 /* Serializes the accesses to the list of workqueues. */
 static DEFINE_SPINLOCK(workqueue_lock);
 static LIST_HEAD(workqueues);
@@ -995,7 +1005,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 		struct global_cwq *last_gcwq;
 
 		if (unlikely(cpu == WORK_CPU_UNBOUND))
-			cpu = raw_smp_processor_id();
+			cpu = wq_select_cpu();
 
 		/*
 		 * It's multi cpu.  If @wq is non-reentrant and @work
@@ -1066,8 +1076,9 @@ int queue_work(struct workqueue_struct *wq, struct work_struct *work)
 {
 	int ret;
 
-	ret = queue_work_on(get_cpu(), wq, work);
-	put_cpu();
+	preempt_disable();
+	ret = queue_work_on(wq_select_cpu(), wq, work);
+	preempt_enable();
 
 	return ret;
 }
@@ -1102,7 +1113,7 @@ static void delayed_work_timer_fn(unsigned long __data)
 	struct delayed_work *dwork = (struct delayed_work *)__data;
 	struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
 
-	__queue_work(smp_processor_id(), cwq->wq, &dwork->work);
+	__queue_work(wq_select_cpu(), cwq->wq, &dwork->work);
 }
 
 /**
@@ -1158,7 +1169,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
 			if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
 				lcpu = gcwq->cpu;
 			else
-				lcpu = raw_smp_processor_id();
+				lcpu = wq_select_cpu();
 		} else
 			lcpu = WORK_CPU_UNBOUND;
 
@@ -2823,8 +2834,8 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
 static inline void __flush_delayed_work(struct delayed_work *dwork)
 {
 	if (del_timer_sync(&dwork->timer))
-		__queue_work(raw_smp_processor_id(),
-			     get_work_cwq(&dwork->work)->wq, &dwork->work);
+		__queue_work(wq_select_cpu(), get_work_cwq(&dwork->work)->wq,
+			     &dwork->work);
 }
 
 /**
-- 
1.7.12.rc2.18.g61b472e


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ