[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180907214047.26914-48-jschoenh@amazon.de>
Date: Fri, 7 Sep 2018 23:40:34 +0200
From: Jan H. Schönherr <jschoenh@...zon.de>
To: Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>
Cc: Jan H. Schönherr <jschoenh@...zon.de>,
linux-kernel@...r.kernel.org
Subject: [RFC 47/60] cosched: Adjust SE traversal and locking for common leader activities
Modify some of the core scheduler paths, which function as entry points
into the CFS scheduling class and which are activities where the leader
operates on behalf of the group.
There are (a) handling the tick, (b) picking the next task from the
runqueue, (c) setting a task to be current, and (d) putting the current
task back.
Signed-off-by: Jan H. Schönherr <jschoenh@...zon.de>
---
kernel/sched/fair.c | 28 +++++++++++++++++++++++-----
1 file changed, 23 insertions(+), 5 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2aa3a60dfca5..2227e4840355 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6664,12 +6664,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
static struct task_struct *
pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
- struct cfs_rq *cfs_rq = &rq->cfs;
+ struct cfs_rq *cfs_rq, *top_cfs_rq;
+ struct rq_owner_flags orf;
struct sched_entity *se;
struct task_struct *p;
int new_tasks;
again:
+ top_cfs_rq = cfs_rq = &rq_lock_owned(rq, &orf)->cfs;
if (!cfs_rq->nr_running)
goto idle;
@@ -6707,7 +6709,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
* be correct.
*/
if (unlikely(check_cfs_rq_runtime(cfs_rq))) {
- cfs_rq = &rq->cfs;
+ cfs_rq = top_cfs_rq;
if (!cfs_rq->nr_running)
goto idle;
@@ -6775,9 +6777,13 @@ done: __maybe_unused;
if (hrtick_enabled(rq))
hrtick_start_fair(rq, p);
+ rq_unlock_owned(rq, &orf);
+
return p;
idle:
+ rq_unlock_owned(rq, &orf);
+
new_tasks = idle_balance(rq, rf);
/*
@@ -6796,12 +6802,15 @@ done: __maybe_unused;
void put_prev_entity_fair(struct rq *rq, struct sched_entity *se)
{
+ struct rq_owner_flags orf;
struct cfs_rq *cfs_rq;
- for_each_sched_entity(se) {
+ rq_lock_owned(rq, &orf);
+ for_each_owned_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
put_prev_entity(cfs_rq, se);
}
+ rq_unlock_owned(rq, &orf);
}
/*
@@ -9712,11 +9721,14 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &curr->se;
+ struct rq_owner_flags orf;
- for_each_sched_entity(se) {
+ rq_lock_owned(rq, &orf);
+ for_each_owned_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
entity_tick(cfs_rq, se, queued);
}
+ rq_unlock_owned(rq, &orf);
if (static_branch_unlikely(&sched_numa_balancing))
task_tick_numa(rq, curr);
@@ -9906,13 +9918,19 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
void set_curr_entity_fair(struct rq *rq, struct sched_entity *se)
{
- for_each_sched_entity(se) {
+ struct rq_owner_flags orf;
+
+ rq_lock_owned(rq, &orf);
+
+ for_each_owned_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
set_next_entity(cfs_rq, se);
/* ensure bandwidth has been allocated on our new cfs_rq */
account_cfs_rq_runtime(cfs_rq, 0);
}
+
+ rq_unlock_owned(rq, &orf);
}
/*
--
2.9.3.1.gcba166c.dirty
Powered by blists - more mailing lists