[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180907214047.26914-20-jschoenh@amazon.de>
Date: Fri, 7 Sep 2018 23:40:06 +0200
From: Jan H. Schönherr <jschoenh@...zon.de>
To: Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>
Cc: Jan H. Schönherr <jschoenh@...zon.de>,
linux-kernel@...r.kernel.org
Subject: [RFC 19/60] sched: Add entity variants of enqueue_task_fair() and dequeue_task_fair()
There is fair amount of overlap between enqueue_task_fair() and
unthrottle_cfs_rq(), as well as between dequeue_task_fair() and
throttle_cfs_rq(). This is the first step toward having both of
them use the same basic function.
Signed-off-by: Jan H. Schönherr <jschoenh@...zon.de>
---
kernel/sched/fair.c | 82 ++++++++++++++++++++++++++++++----------------------
kernel/sched/sched.h | 3 ++
2 files changed, 51 insertions(+), 34 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9f63ac37f5ef..a96328c5a864 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4979,32 +4979,9 @@ static inline void hrtick_update(struct rq *rq)
}
#endif
-/*
- * The enqueue_task method is called before nr_running is
- * increased. Here we update the fair scheduling stats and
- * then put the task into the rbtree:
- */
-static void
-enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+bool enqueue_entity_fair(struct rq *rq, struct sched_entity *se, int flags)
{
struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se;
-
- /*
- * The code below (indirectly) updates schedutil which looks at
- * the cfs_rq utilization to select a frequency.
- * Let's add the task's estimated utilization to the cfs_rq's
- * estimated utilization, before we update schedutil.
- */
- util_est_enqueue(&rq->cfs, p);
-
- /*
- * If in_iowait is set, the code below may not trigger any cpufreq
- * utilization updates, so do it here explicitly with the IOWAIT flag
- * passed.
- */
- if (p->in_iowait)
- cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
for_each_sched_entity(se) {
if (se->on_rq)
@@ -5036,7 +5013,38 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
update_cfs_group(se);
}
- if (!se)
+ return se != NULL;
+}
+
+/*
+ * The enqueue_task method is called before nr_running is
+ * increased. Here we update the fair scheduling stats and
+ * then put the task into the rbtree:
+ */
+static void
+enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+{
+ bool throttled;
+
+ /*
+ * The code below (indirectly) updates schedutil which looks at
+ * the cfs_rq utilization to select a frequency.
+ * Let's add the task's estimated utilization to the cfs_rq's
+ * estimated utilization, before we update schedutil.
+ */
+ util_est_enqueue(&rq->cfs, p);
+
+ /*
+ * If in_iowait is set, the code below may not trigger any cpufreq
+ * utilization updates, so do it here explicitly with the IOWAIT flag
+ * passed.
+ */
+ if (p->in_iowait)
+ cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
+
+ throttled = enqueue_entity_fair(rq, &p->se, flags);
+
+ if (!throttled)
add_nr_running(rq, 1);
hrtick_update(rq);
@@ -5044,15 +5052,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
static void set_next_buddy(struct sched_entity *se);
-/*
- * The dequeue_task method is called before nr_running is
- * decreased. We remove the task from the rbtree and
- * update the fair scheduling stats:
- */
-static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+bool dequeue_entity_fair(struct rq *rq, struct sched_entity *se, int flags)
{
struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se;
int task_sleep = flags & DEQUEUE_SLEEP;
for_each_sched_entity(se) {
@@ -5095,10 +5097,22 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
update_cfs_group(se);
}
- if (!se)
+ return se != NULL;
+}
+
+/*
+ * The dequeue_task method is called before nr_running is
+ * decreased. We remove the task from the rbtree and
+ * update the fair scheduling stats:
+ */
+static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+{
+ bool throttled = dequeue_entity_fair(rq, &p->se, flags);
+
+ if (!throttled)
sub_nr_running(rq, 1);
- util_est_dequeue(&rq->cfs, p, task_sleep);
+ util_est_dequeue(&rq->cfs, p, flags & DEQUEUE_SLEEP);
hrtick_update(rq);
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 3e0ad36938fb..9016049f36c3 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1543,6 +1543,9 @@ extern const u32 sched_prio_to_wmult[40];
#define RETRY_TASK ((void *)-1UL)
+bool enqueue_entity_fair(struct rq *rq, struct sched_entity *se, int flags);
+bool dequeue_entity_fair(struct rq *rq, struct sched_entity *se, int flags);
+
struct sched_class {
const struct sched_class *next;
--
2.9.3.1.gcba166c.dirty
Powered by blists - more mailing lists