[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200408095012.3819-2-dietmar.eggemann@arm.com>
Date: Wed, 8 Apr 2020 11:50:09 +0200
From: Dietmar Eggemann <dietmar.eggemann@....com>
To: Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>
Cc: Vincent Guittot <vincent.guittot@...aro.org>,
Steven Rostedt <rostedt@...dmis.org>,
Luca Abeni <luca.abeni@...tannapisa.it>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Wei Wang <wvw@...gle.com>, Quentin Perret <qperret@...gle.com>,
Alessio Balsini <balsini@...gle.com>,
Pavan Kondeti <pkondeti@...eaurora.org>,
Patrick Bellasi <patrick.bellasi@...bug.net>,
Morten Rasmussen <morten.rasmussen@....com>,
Valentin Schneider <valentin.schneider@....com>,
Qais Yousef <qais.yousef@....com>, linux-kernel@...r.kernel.org
Subject: [PATCH 1/4] sched/topology: Store root domain CPU capacity sum
Add the sum of (original) CPU capacity of all member CPUs to the root
domain.
This is needed for capacity-aware SCHED_DEADLINE admission control.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@....com>
---
kernel/sched/sched.h | 11 +++++++++++
kernel/sched/topology.c | 14 ++++++++++----
2 files changed, 21 insertions(+), 4 deletions(-)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1e72d1b3d3ce..91bd0cb0c529 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -797,6 +797,7 @@ struct root_domain {
cpumask_var_t rto_mask;
struct cpupri cpupri;
+ unsigned long sum_cpu_capacity;
unsigned long max_cpu_capacity;
/*
@@ -2393,6 +2394,16 @@ static inline unsigned long capacity_orig_of(int cpu)
{
return cpu_rq(cpu)->cpu_capacity_orig;
}
+
+static inline unsigned long rd_capacity(int cpu)
+{
+ return cpu_rq(cpu)->rd->sum_cpu_capacity;
+}
+#else
+static inline unsigned long rd_capacity(int cpu)
+{
+ return SCHED_CAPACITY_SCALE;
+}
#endif
/**
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 8344757bba6e..74b0c0fa4b1b 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -2052,12 +2052,17 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
/* Attach the domains */
rcu_read_lock();
for_each_cpu(i, cpu_map) {
+ unsigned long cap = arch_scale_cpu_capacity(i);
+
rq = cpu_rq(i);
sd = *per_cpu_ptr(d.sd, i);
/* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
- if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
- WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
+ if (cap > READ_ONCE(d.rd->max_cpu_capacity))
+ WRITE_ONCE(d.rd->max_cpu_capacity, cap);
+
+ WRITE_ONCE(d.rd->sum_cpu_capacity,
+ READ_ONCE(d.rd->sum_cpu_capacity) + cap);
cpu_attach_domain(sd, d.rd, i);
}
@@ -2067,8 +2072,9 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
static_branch_inc_cpuslocked(&sched_asym_cpucapacity);
if (rq && sched_debug_enabled) {
- pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
- cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
+ pr_info("root domain span: %*pbl (capacity = %lu max cpu_capacity = %lu)\n",
+ cpumask_pr_args(cpu_map), rq->rd->sum_cpu_capacity,
+ rq->rd->max_cpu_capacity);
}
ret = 0;
--
2.17.1
Powered by blists - more mailing lists