[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1518109302-8239-1-git-send-email-claudio@evidence.eu.com>
Date: Thu, 8 Feb 2018 18:01:42 +0100
From: Claudio Scordino <claudio@...dence.eu.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>
Cc: Claudio Scordino <claudio@...dence.eu.com>,
"Rafael J . Wysocki" <rafael.j.wysocki@...el.com>,
Patrick Bellasi <patrick.bellasi@....com>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Morten Rasmussen <morten.rasmussen@....com>,
Juri Lelli <juri.lelli@...hat.com>,
Viresh Kumar <viresh.kumar@...aro.org>,
Vincent Guittot <vincent.guittot@...aro.org>,
Todd Kjos <tkjos@...roid.com>,
Joel Fernandes <joelaf@...gle.com>, linux-pm@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH] cpufreq: schedutil: rate limits for SCHED_DEADLINE
When the SCHED_DEADLINE scheduling class increases the CPU utilization,
we should not wait for the rate limit, otherwise we may miss some deadline.
Tests using rt-app on Exynos5422 have shown reductions of about 10% of deadline
misses for tasks with low RT periods.
The patch applies on top of the one recently proposed by Peter to drop the
SCHED_CPUFREQ_* flags.
Signed-off-by: Claudio Scordino <claudio@...dence.eu.com>
CC: Rafael J . Wysocki <rafael.j.wysocki@...el.com>
CC: Patrick Bellasi <patrick.bellasi@....com>
CC: Dietmar Eggemann <dietmar.eggemann@....com>
CC: Morten Rasmussen <morten.rasmussen@....com>
CC: Juri Lelli <juri.lelli@...hat.com>
CC: Viresh Kumar <viresh.kumar@...aro.org>
CC: Vincent Guittot <vincent.guittot@...aro.org>
CC: Todd Kjos <tkjos@...roid.com>
CC: Joel Fernandes <joelaf@...gle.com>
CC: linux-pm@...r.kernel.org
CC: linux-kernel@...r.kernel.org
---
kernel/sched/cpufreq_schedutil.c | 15 ++++++++++++---
1 file changed, 12 insertions(+), 3 deletions(-)
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index b0bd77d..d8dcba2 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -74,7 +74,10 @@ static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
/************************ Governor internals ***********************/
-static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
+static bool sugov_should_update_freq(struct sugov_policy *sg_policy,
+ u64 time,
+ struct sugov_cpu *sg_cpu_old,
+ struct sugov_cpu *sg_cpu_new)
{
s64 delta_ns;
@@ -111,6 +114,10 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
return true;
}
+ /* Ignore rate limit when DL increased utilization. */
+ if (sg_cpu_new->util_dl > sg_cpu_old->util_dl)
+ return true;
+
delta_ns = time - sg_policy->last_freq_update_time;
return delta_ns >= sg_policy->freq_update_delay_ns;
}
@@ -271,6 +278,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
unsigned int flags)
{
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
+ struct sugov_cpu sg_cpu_old = *sg_cpu;
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
unsigned long util, max;
unsigned int next_f;
@@ -279,7 +287,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
sugov_set_iowait_boost(sg_cpu, time, flags);
sg_cpu->last_update = time;
- if (!sugov_should_update_freq(sg_policy, time))
+ if (!sugov_should_update_freq(sg_policy, time, &sg_cpu_old, sg_cpu))
return;
busy = sugov_cpu_is_busy(sg_cpu);
@@ -350,6 +358,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
unsigned int flags)
{
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
+ struct sugov_cpu sg_cpu_old = *sg_cpu;
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
unsigned int next_f;
@@ -359,7 +368,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
sugov_set_iowait_boost(sg_cpu, time, flags);
sg_cpu->last_update = time;
- if (sugov_should_update_freq(sg_policy, time)) {
+ if (sugov_should_update_freq(sg_policy, time, &sg_cpu_old, sg_cpu)) {
next_f = sugov_next_freq_shared(sg_cpu, time);
sugov_update_commit(sg_policy, time, next_f);
}
--
2.7.4
Powered by blists - more mailing lists