lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu,  9 Mar 2017 17:15:14 +0530
From:   Viresh Kumar <viresh.kumar@...aro.org>
To:     Rafael Wysocki <rjw@...ysocki.net>, Ingo Molnar <mingo@...hat.com>,
        Peter Zijlstra <peterz@...radead.org>
Cc:     linaro-kernel@...ts.linaro.org, linux-pm@...r.kernel.org,
        linux-kernel@...r.kernel.org,
        Vincent Guittot <vincent.guittot@...aro.org>,
        smuckle.linux@...il.com, juri.lelli@....com,
        Morten.Rasmussen@....com, patrick.bellasi@....com,
        eas-dev@...ts.linaro.org, Viresh Kumar <viresh.kumar@...aro.org>
Subject: [RFC 4/9] sched: cpufreq: extend irq work to support fast switches

From: Steve Muckle <smuckle.linux@...il.com>

In preparation for schedutil receiving sched cpufreq callbacks for
remote CPUs, extend the irq work in schedutil to support policies with
fast switching enabled in addition to policies using the slow path.

Signed-off-by: Steve Muckle <smuckle.linux@...il.com>
[ vk: minor code updates ]
Signed-off-by: Viresh Kumar <viresh.kumar@...aro.org>
---
 kernel/sched/cpufreq_schedutil.c | 28 ++++++++++++++++++++--------
 1 file changed, 20 insertions(+), 8 deletions(-)

diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index f5ffe241812e..a418544c51b1 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -88,6 +88,17 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
 	return delta_ns >= sg_policy->freq_update_delay_ns;
 }
 
+static void sugov_fast_switch(struct cpufreq_policy *policy,
+			      unsigned int next_freq)
+{
+	next_freq = cpufreq_driver_fast_switch(policy, next_freq);
+	if (next_freq == CPUFREQ_ENTRY_INVALID)
+		return;
+
+	policy->cur = next_freq;
+	trace_cpu_frequency(next_freq, smp_processor_id());
+}
+
 static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
 				unsigned int next_freq)
 {
@@ -100,12 +111,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
 		}
 		sg_policy->next_freq = next_freq;
 		sg_policy->last_freq_update_time = time;
-		next_freq = cpufreq_driver_fast_switch(policy, next_freq);
-		if (next_freq == CPUFREQ_ENTRY_INVALID)
-			return;
-
-		policy->cur = next_freq;
-		trace_cpu_frequency(next_freq, smp_processor_id());
+		sugov_fast_switch(policy, next_freq);
 	} else if (sg_policy->next_freq != next_freq) {
 		sg_policy->next_freq = next_freq;
 		sg_policy->last_freq_update_time = time;
@@ -303,9 +309,15 @@ static void sugov_work(struct kthread_work *work)
 
 static void sugov_irq_work(struct irq_work *irq_work)
 {
-	struct sugov_policy *sg_policy;
+	struct sugov_policy *sg_policy = container_of(irq_work, struct
+						      sugov_policy, irq_work);
+	struct cpufreq_policy *policy = sg_policy->policy;
 
-	sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
+	if (policy->fast_switch_enabled) {
+		sugov_fast_switch(policy, sg_policy->next_freq);
+		sg_policy->work_in_progress = false;
+		return;
+	}
 
 	/*
 	 * For RT and deadline tasks, the schedutil governor shoots the
-- 
2.7.1.410.g6faf27b

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ