lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 2 Sep 2009 16:54:32 +0530
From:	Gautham R Shenoy <ego@...ibm.com>
To:	Peter Zijlstra <a.p.zijlstra@...llo.nl>
Cc:	Ingo Molnar <mingo@...e.hu>, linux-kernel@...r.kernel.org,
	Andreas Herrmann <andreas.herrmann3@....com>,
	Balbir Singh <balbir@...ibm.com>
Subject: Re: [RFC][PATCH 5/8] sched: dynamic cpu_power

On Tue, Sep 01, 2009 at 10:34:36AM +0200, Peter Zijlstra wrote:
> Recompute the cpu_power for each cpu during load-balance

This patch rocks! Atleast in theory, it does :-)

> 
> Signed-off-by: Peter Zijlstra <a.p.zijlstra@...llo.nl>
> ---
>  kernel/sched.c |   38 +++++++++++++++++++++++++++++++++++---
>  1 file changed, 35 insertions(+), 3 deletions(-)
> 
> Index: linux-2.6/kernel/sched.c
> ===================================================================
> --- linux-2.6.orig/kernel/sched.c
> +++ linux-2.6/kernel/sched.c
> @@ -3691,14 +3691,46 @@ static inline int check_power_save_busie
>  }
>  #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
> 
> -static void update_sched_power(struct sched_domain *sd)
> +unsigned long __weak arch_smt_gain(struct sched_domain *sd, int cpu)
> +{
> +	unsigned long weight = cpumask_weight(sched_domain_span(sd));
> +	unsigned long smt_gain = sd->smt_gain;
> +
> +	smt_gain /= weight;
> +
> +	return smt_gain;
> +}
> +
> +static void update_cpu_power(struct sched_domain *sd, int cpu)
> +{
> +	unsigned long weight = cpumask_weight(sched_domain_span(sd));
> +	unsigned long power = SCHED_LOAD_SCALE;
> +	struct sched_group *sdg = sd->groups;
> +	unsigned long old = sdg->__cpu_power;
> +
> +	/* here we could scale based on cpufreq */
> +
> +	if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
> +		power *= arch_smt_gain(sd, cpu);
> +		power >>= SCHED_LOAD_SHIFT;
> +	}
> +
> +	/* here we could scale based on RT time */
> +
> +	if (power != old) {
> +		sdg->__cpu_power = power;
> +		sdg->reciprocal_cpu_power = reciprocal_value(power);
> +	}
> +}
> +
> +static void update_group_power(struct sched_domain *sd, int cpu)
>  {
>  	struct sched_domain *child = sd->child;
>  	struct sched_group *group, *sdg = sd->groups;
>  	unsigned long power = sdg->__cpu_power;
> 
>  	if (!child) {
> -		/* compute cpu power for this cpu */
> +		update_cpu_power(sd, cpu);
>  		return;
>  	}
> 
> @@ -3743,7 +3775,7 @@ static inline void update_sg_lb_stats(st
>  	if (local_group) {
>  		balance_cpu = group_first_cpu(group);
>  		if (balance_cpu == this_cpu)
> -			update_sched_power(sd);
> +			update_group_power(sd, this_cpu);
>  	}
> 
>  	/* Tally up the load of all CPUs in the group */
> 
> -- 

-- 
Thanks and Regards
gautham
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ