From 1be4a1e126cdb1b47b4d8a5dd156f8bca04715c0 Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Fri, 3 Jul 2015 10:02:53 +0200 Subject: [PATCH] arm64: Frequency invariant scheduler load-tracking support Implements arch-specific function to provide the scheduler with a frequency scaling correction factor for more accurate load-tracking. This patch is based on the implementation that has been done for arm arch by Morten Rasmussen Signed-off-by: Vincent Guittot --- arch/arm64/include/asm/topology.h | 11 ++++++ arch/arm64/kernel/smp.c | 80 +++++++++++++++++++++++++++++++++++++++ arch/arm64/kernel/topology.c | 19 ++++++++++ 3 files changed, 110 insertions(+) diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index 7ebcd31..887835a 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -24,6 +24,17 @@ void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); const struct cpumask *cpu_coregroup_mask(int cpu); +#ifdef CONFIG_CPU_FREQ + +#define arch_scale_freq_capacity arm64_arch_scale_freq_capacity +struct sched_domain; +extern unsigned long arm64_arch_scale_freq_capacity(struct sched_domain *sd, + int cpu); + +DECLARE_PER_CPU(atomic_long_t, cpu_freq_capacity); + +#endif /* CONFIG_CPU_FREQ */ + #else static inline void init_cpu_topology(void) { } diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 328b8ce..45b88cb 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -656,3 +657,82 @@ int setup_profiling_timer(unsigned int multiplier) { return -EINVAL; } + +#ifdef CONFIG_CPU_FREQ + +static DEFINE_PER_CPU(atomic_long_t, cpu_max_freq); +DEFINE_PER_CPU(atomic_long_t, cpu_freq_capacity); + +/* + * Scheduler load-tracking scale-invariance + * + * Provides the scheduler with a scale-invariance correction factor that + * compensates for frequency scaling through arch_scale_freq_capacity() + * (implemented in topology.c). + */ +static inline +void scale_freq_capacity(int cpu, unsigned long curr, unsigned long max) +{ + unsigned long capacity; + + if (!max) + return; + + capacity = (curr << SCHED_CAPACITY_SHIFT) / max; + atomic_long_set(&per_cpu(cpu_freq_capacity, cpu), capacity); +} + +static int cpufreq_callback(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freq = data; + int cpu = freq->cpu; + unsigned long max = atomic_long_read(&per_cpu(cpu_max_freq, cpu)); + + if (freq->flags & CPUFREQ_CONST_LOOPS) + return NOTIFY_OK; + + scale_freq_capacity(cpu, freq->new, max); + + return NOTIFY_OK; +} + +static struct notifier_block cpufreq_notifier = { + .notifier_call = cpufreq_callback, +}; + +static int cpufreq_policy_callback(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_policy *policy = data; + int i; + + if (val != CPUFREQ_NOTIFY) + return NOTIFY_OK; + + for_each_cpu(i, policy->cpus) { + scale_freq_capacity(i, policy->cur, policy->max); + atomic_long_set(&per_cpu(cpu_max_freq, i), policy->max); + } + + return NOTIFY_OK; +} + +static struct notifier_block cpufreq_policy_notifier = { + .notifier_call = cpufreq_policy_callback, +}; + +static int __init register_cpufreq_notifier(void) +{ + int ret; + + ret = cpufreq_register_notifier(&cpufreq_notifier, + CPUFREQ_TRANSITION_NOTIFIER); + if (ret) + return ret; + + return cpufreq_register_notifier(&cpufreq_policy_notifier, + CPUFREQ_POLICY_NOTIFIER); +} +core_initcall(register_cpufreq_notifier); +#endif diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index fcb8f7b..6387f65 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -200,6 +200,25 @@ static int __init parse_dt_topology(void) return ret; } +#ifdef CONFIG_CPU_FREQ +/* + * Scheduler load-tracking scale-invariance + * + * Provides the scheduler with a scale-invariance correction factor that + * compensates for frequency scaling (arch_scale_freq_capacity()). The scaling + * factor is updated in smp.c + */ +unsigned long arm64_arch_scale_freq_capacity(struct sched_domain *sd, int cpu) +{ + unsigned long curr = atomic_long_read(&per_cpu(cpu_freq_capacity, cpu)); + + if (!curr) + return SCHED_CAPACITY_SCALE; + + return curr; +} +#endif + /* * cpu topology table */ -- 1.9.1