[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <4aaad8379b0b54eca2df9a91cbc0eda47a7a8faf.1572437285.git.vpillai@digitalocean.com>
Date: Wed, 30 Oct 2019 18:33:32 +0000
From: Vineeth Remanan Pillai <vpillai@...italocean.com>
To: Nishanth Aravamudan <naravamudan@...italocean.com>,
Julien Desfossez <jdesfossez@...italocean.com>,
Peter Zijlstra <peterz@...radead.org>,
Tim Chen <tim.c.chen@...ux.intel.com>, mingo@...nel.org,
tglx@...utronix.de, pjt@...gle.com, torvalds@...ux-foundation.org
Cc: Aaron Lu <aaron.lu@...ux.alibaba.com>,
linux-kernel@...r.kernel.org, Dario Faggioli <dfaggioli@...e.com>,
fweisbec@...il.com, keescook@...omium.org, kerrnel@...gle.com,
Phil Auld <pauld@...hat.com>, Aaron Lu <aaron.lwe@...il.com>,
Aubrey Li <aubrey.intel@...il.com>,
Valentin Schneider <valentin.schneider@....com>,
Mel Gorman <mgorman@...hsingularity.net>,
Pawan Gupta <pawan.kumar.gupta@...ux.intel.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Vineeth Remanan Pillai <vpillai@...italocean.com>
Subject: [RFC PATCH v4 19/19] sched/fair : Wake up forced idle siblings if needed
From: Aaron Lu <aaron.lu@...ux.alibaba.com>
If the sibling of a forced idle cpu has only one task and if it has
used up its timeslice, then we should try to wake up the forced idle
cpu to give the starving task on it a chance.
Signed-off-by: Vineeth Remanan Pillai <vpillai@...italocean.com>
Signed-off-by: Julien Desfossez <jdesfossez@...italocean.com>
---
kernel/sched/fair.c | 40 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 40 insertions(+)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e8dd78a8c54d..9d4cc97d4dd8 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4165,6 +4165,13 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_min_vruntime(cfs_rq);
}
+static inline bool
+__entity_slice_used(struct sched_entity *se)
+{
+ return (se->sum_exec_runtime - se->prev_sum_exec_runtime) >
+ sched_slice(cfs_rq_of(se), se);
+}
+
/*
* Preempt the current task with a newly woken task if needed:
*/
@@ -10052,6 +10059,34 @@ static void rq_offline_fair(struct rq *rq)
#endif /* CONFIG_SMP */
+#ifdef CONFIG_SCHED_CORE
+/*
+ * If runqueue has only one task which used up its slice and
+ * if the sibling is forced idle, then trigger schedule
+ * to give forced idle task a chance.
+ */
+static void resched_forceidle_sibling(struct rq *rq, struct sched_entity *se)
+{
+ int cpu = cpu_of(rq), sibling_cpu;
+ if (rq->cfs.nr_running > 1 || !__entity_slice_used(se))
+ return;
+
+ for_each_cpu(sibling_cpu, cpu_smt_mask(cpu)) {
+ struct rq *sibling_rq;
+ if (sibling_cpu == cpu)
+ continue;
+ if (cpu_is_offline(sibling_cpu))
+ continue;
+
+ sibling_rq = cpu_rq(sibling_cpu);
+ if (sibling_rq->core_forceidle) {
+ resched_curr(sibling_rq);
+ }
+ }
+}
+#endif
+
+
/*
* scheduler tick hitting a task of our scheduling class.
*
@@ -10075,6 +10110,11 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
update_misfit_status(curr, rq);
update_overutilized_status(task_rq(curr));
+
+#ifdef CONFIG_SCHED_CORE
+ if (sched_core_enabled(rq))
+ resched_forceidle_sibling(rq, &curr->se);
+#endif
}
/*
--
2.17.1
Powered by blists - more mailing lists