lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1427391983-31961-1-git-send-email-morten.rasmussen@arm.com>
Date:	Thu, 26 Mar 2015 17:46:22 +0000
From:	Morten Rasmussen <morten.rasmussen@....com>
To:	Vincent Guittot <vincent.guittot@...aro.org>
Cc:	Peter Zijlstra <peterz@...radead.org>,
	Ingo Molnar <mingo@...nel.org>, linux-kernel@...r.kernel.org,
	Preeti U Murthy <preeti@...ux.vnet.ibm.com>,
	Kamalesh Babulal <kamalesh@...ux.vnet.ibm.com>,
	Rik van Riel <riel@...hat.com>,
	Mike Galbraith <efault@....de>, nicolas.pitre@...aro.org,
	Dietmar Eggemann <Dietmar.Eggemann@....com>,
	Linaro Kernel Mailman List <linaro-kernel@...ts.linaro.org>,
	Paul Turner <pjt@...gle.com>, Ben Segall <bsegall@...gle.com>
Subject: [PATCH 1/2] sched: Change arch_scale_*() functions to scale input factor

The arch_scale_{freq, cpu}_capacity() functions currently return a
scaling factor that need to be multiplied and shifted by the caller. The
default weak functions don't result in any scaling by the the
multiplication and shift is still done. By moving the multiplication and
shift into the arch_scale*() functions instead, the weak implementation
can just return the input value and avoid the unnecessary multiplication
and shift.

While we are at it, we can remove the sched_domain parameter by moving
the SD_SHARE_CPUCAPACITY outside the weak arch_scale_cpu_capacity()
function.

cc: Ingo Molnar <mingo@...hat.com>
cc: Peter Zijlstra <peterz@...radead.org>

Signed-off-by: Morten Rasmussen <morten.rasmussen@....com>
---
 arch/arm/kernel/topology.c |  6 +++---
 kernel/sched/fair.c        | 34 ++++++++++++++--------------------
 2 files changed, 17 insertions(+), 23 deletions(-)

diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 08b7847..5328f79 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -42,9 +42,9 @@
  */
 static DEFINE_PER_CPU(unsigned long, cpu_scale);
 
-unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
+unsigned long arch_scale_cpu_capacity(int cpu, unsigned long factor)
 {
-	return per_cpu(cpu_scale, cpu);
+	return (factor * per_cpu(cpu_scale, cpu)) >> SCHED_CAPACITY_SHIFT;
 }
 
 static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
@@ -166,7 +166,7 @@ static void update_cpu_capacity(unsigned int cpu)
 	set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
 
 	pr_info("CPU%u: update cpu_capacity %lu\n",
-		cpu, arch_scale_cpu_capacity(NULL, cpu));
+		cpu, arch_scale_cpu_capacity(cpu, SCHED_CAPACITY_SCALE));
 }
 
 #else
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5080c0d..60c3172 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5958,27 +5958,19 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
 	return load_idx;
 }
 
-static unsigned long default_scale_capacity(struct sched_domain *sd, int cpu)
+static unsigned long default_scale_capacity(int cpu, unsigned long factor)
 {
-	return SCHED_CAPACITY_SCALE;
-}
-
-unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
-{
-	return default_scale_capacity(sd, cpu);
+	return factor;
 }
 
-static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu)
+unsigned long __weak arch_scale_freq_capacity(int cpu, unsigned long factor)
 {
-	if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
-		return sd->smt_gain / sd->span_weight;
-
-	return SCHED_CAPACITY_SCALE;
+	return default_scale_capacity(cpu, factor);
 }
 
-unsigned long __weak arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
+unsigned long __weak arch_scale_cpu_capacity(int cpu, unsigned long factor)
 {
-	return default_scale_cpu_capacity(sd, cpu);
+	return default_scale_capacity(cpu, factor);
 }
 
 static unsigned long scale_rt_capacity(int cpu)
@@ -6020,12 +6012,14 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 	unsigned long capacity = SCHED_CAPACITY_SCALE;
 	struct sched_group *sdg = sd->groups;
 
-	if (sched_feat(ARCH_CAPACITY))
-		capacity *= arch_scale_cpu_capacity(sd, cpu);
-	else
-		capacity *= default_scale_cpu_capacity(sd, cpu);
-
-	capacity >>= SCHED_CAPACITY_SHIFT;
+	if (sched_feat(ARCH_CAPACITY)) {
+		capacity = arch_scale_cpu_capacity(cpu, capacity);
+	} else {
+		if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
+			capacity = sd->smt_gain / sd->span_weight;
+		else
+			capacity = default_scale_capacity(cpu, capacity);
+	}
 
 	sdg->sgc->capacity_orig = capacity;
 
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ