lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1435362824-26734-5-git-send-email-mturquette@linaro.org>
Date:	Fri, 26 Jun 2015 16:53:44 -0700
From:	Michael Turquette <mturquette@...libre.com>
To:	peterz@...radead.org, mingo@...nel.org
Cc:	linux-kernel@...r.kernel.org, preeti@...ux.vnet.ibm.com,
	Morten.Rasmussen@....com, riel@...hat.com, efault@....de,
	nicolas.pitre@...aro.org, daniel.lezcano@...aro.org,
	dietmar.eggemann@....com, vincent.guittot@...aro.org,
	amit.kucheria@...aro.org, juri.lelli@....com, rjw@...ysocki.net,
	viresh.kumar@...aro.org, ashwin.chaugule@...aro.org,
	alex.shi@...aro.org, linux-pm@...r.kernel.org, abelvesa@...il.com,
	pebolle@...cali.nl, Michael Turquette <mturquette@...libre.com>
Subject: [PATCH v3 4/4] [RFC] sched: cfs: cpu frequency scaling policy

From: Michael Turquette <mturquette@...libre.com>

Implements a very simple policy to scale cpu frequency as a function of
cfs utilization. This policy is a placeholder until something better
comes along. Its purpose is to illustrate how to use the
cpufreq_sched_set_capacity api and allow interested parties to hack on
this stuff.

Signed-off-by: Michael Turquette <mturquette@...libre.com>
---
Changes in v3:
Split out into separate patch
Capacity calculation moved from cpufreq governor to cfs
Removed use of static key. Replaced with Kconfig option

 kernel/sched/fair.c | 41 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 41 insertions(+)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 46855d0..5ccc384 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4217,6 +4217,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 {
 	struct cfs_rq *cfs_rq;
 	struct sched_entity *se = &p->se;
+	unsigned long utilization, capacity;
 
 	for_each_sched_entity(se) {
 		if (se->on_rq)
@@ -4252,6 +4253,19 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 		update_rq_runnable_avg(rq, rq->nr_running);
 		add_nr_running(rq, 1);
 	}
+
+#ifdef CONFIG_CPU_FREQ_GOV_SCHED
+	/* add 25% margin to current utilization */
+	utilization = rq->cfs.utilization_load_avg;
+	capacity = utilization + (utilization >> 2);
+
+	/* handle rounding errors */
+	capacity = (capacity > SCHED_LOAD_SCALE) ? SCHED_LOAD_SCALE :
+		capacity;
+
+	cpufreq_sched_set_cap(cpu_of(rq), capacity);
+#endif
+
 	hrtick_update(rq);
 }
 
@@ -4267,6 +4281,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 	struct cfs_rq *cfs_rq;
 	struct sched_entity *se = &p->se;
 	int task_sleep = flags & DEQUEUE_SLEEP;
+	unsigned long utilization, capacity;
 
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
@@ -4313,6 +4328,19 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 		sub_nr_running(rq, 1);
 		update_rq_runnable_avg(rq, 1);
 	}
+
+#ifdef CONFIG_CPU_FREQ_GOV_SCHED
+	/* add 25% margin to current utilization */
+	utilization = rq->cfs.utilization_load_avg;
+	capacity = utilization + (utilization >> 2);
+
+	/* handle rounding errors */
+	capacity = (capacity > SCHED_LOAD_SCALE) ? SCHED_LOAD_SCALE :
+		capacity;
+
+	cpufreq_sched_set_cap(cpu_of(rq), capacity);
+#endif
+
 	hrtick_update(rq);
 }
 
@@ -7806,6 +7834,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
 {
 	struct cfs_rq *cfs_rq;
 	struct sched_entity *se = &curr->se;
+	unsigned long utilization, capacity;
 
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
@@ -7816,6 +7845,18 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
 		task_tick_numa(rq, curr);
 
 	update_rq_runnable_avg(rq, 1);
+
+#ifdef CONFIG_CPU_FREQ_GOV_SCHED
+	/* add 25% margin to current utilization */
+	utilization = rq->cfs.utilization_load_avg;
+	capacity = utilization + (utilization >> 2);
+
+	/* handle rounding errors */
+	capacity = (capacity > SCHED_LOAD_SCALE) ? SCHED_LOAD_SCALE :
+		capacity;
+
+	cpufreq_sched_set_cap(cpu_of(rq), capacity);
+#endif
 }
 
 /*
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ