[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210819175034.4577-3-mkoutny@suse.com>
Date: Thu, 19 Aug 2021 19:50:31 +0200
From: Michal Koutný <mkoutny@...e.com>
To: linux-kernel@...r.kernel.org
Cc: Vincent Guittot <vincent.guittot@...aro.org>,
Phil Auld <pauld@...hat.com>, Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>, Mel Gorman <mgorman@...e.de>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Odin Ugedal <odin@...d.al>, Rik van Riel <riel@...riel.com>,
Giovanni Gherdovich <ggherdovich@...e.cz>
Subject: [RFC PATCH v2 2/5] sched: Add group_se() helper
No functional change, unify cfs_rq to sched_entity conversion (and move
closer to use where possible). The helper is used only by
CONFIG_FAIR_GROUP_SCHED code, i.e. no dummy variant is defined.
Signed-off-by: Michal Koutný <mkoutny@...e.com>
---
kernel/sched/fair.c | 9 +++------
kernel/sched/sched.h | 8 ++++++++
2 files changed, 11 insertions(+), 6 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2c41a9007928..905f95b91a7a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4824,8 +4824,6 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
if (!dequeue)
return false; /* Throttle no longer required. */
- se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
-
/* freeze hierarchy runnable averages while throttled */
rcu_read_lock();
walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
@@ -4833,6 +4831,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
task_delta = cfs_rq->h_nr_running;
idle_task_delta = cfs_rq->idle_h_nr_running;
+ se = group_se(cfs_rq);
for_each_sched_entity(se) {
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
/* throttled entity or throttle-on-deactivate */
@@ -4884,8 +4883,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
struct sched_entity *se;
long task_delta, idle_task_delta;
- se = cfs_rq->tg->se[cpu_of(rq)];
-
cfs_rq->throttled = 0;
update_rq_clock(rq);
@@ -4898,6 +4895,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
/* update hierarchical throttle state */
walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
+ se = group_se(cfs_rq);
if (!cfs_rq->load.weight) {
/* Nothing to run but something to decay? Complete the branch */
if (cfs_rq->on_list)
@@ -8163,8 +8161,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
*/
static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
{
- struct rq *rq = rq_of(cfs_rq);
- struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
+ struct sched_entity *se = group_se(cfs_rq);
unsigned long now = jiffies;
unsigned long load;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 14a41a243f7b..219ee463fe64 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1383,6 +1383,14 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
return grp->my_q;
}
+/* sched entity representing the cfs_rq, NULL for root */
+static inline struct sched_entity *group_se(struct cfs_rq *cfs_rq)
+{
+ int cpu = cpu_of(rq_of(cfs_rq));
+
+ return cfs_rq->tg->se[cpu];
+}
+
#else
static inline struct task_struct *task_of(struct sched_entity *se)
--
2.32.0
Powered by blists - more mailing lists