[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190408214539.2705660-2-songliubraving@fb.com>
Date: Mon, 8 Apr 2019 14:45:33 -0700
From: Song Liu <songliubraving@...com>
To: <linux-kernel@...r.kernel.org>, <cgroups@...r.kernel.org>
CC: <mingo@...hat.com>, <peterz@...radead.org>,
<vincent.guittot@...aro.org>, <tglx@...utronix.de>,
<morten.rasmussen@....com>, <kernel-team@...com>,
Song Liu <songliubraving@...com>
Subject: [PATCH 1/7] sched: refactor tg_set_cfs_bandwidth()
This patch factors tg_switch_cfs_runtime() out of tg_set_cfs_bandwidth(),
so that next patches can extend tg_switch_cfs_runtime() to support the new
target_idle_pct value.
This patch doesn't have any functionality changes.
Signed-off-by: Song Liu <songliubraving@...com>
---
kernel/sched/core.c | 71 +++++++++++++++++++++++++--------------------
1 file changed, 39 insertions(+), 32 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ead464a0f2e5..b8f220860dc7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6578,39 +6578,12 @@ const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
-static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
+/* need get_online_cpus() and hold cfs_constraints_mutex */
+static void tg_switch_cfs_runtime(struct task_group *tg, u64 period, u64 quota)
{
- int i, ret = 0, runtime_enabled, runtime_was_enabled;
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
-
- if (tg == &root_task_group)
- return -EINVAL;
-
- /*
- * Ensure we have at some amount of bandwidth every period. This is
- * to prevent reaching a state of large arrears when throttled via
- * entity_tick() resulting in prolonged exit starvation.
- */
- if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
- return -EINVAL;
-
- /*
- * Likewise, bound things on the otherside by preventing insane quota
- * periods. This also allows us to normalize in computing quota
- * feasibility.
- */
- if (period > max_cfs_quota_period)
- return -EINVAL;
-
- /*
- * Prevent race between setting of cfs_rq->runtime_enabled and
- * unthrottle_offline_cfs_rqs().
- */
- get_online_cpus();
- mutex_lock(&cfs_constraints_mutex);
- ret = __cfs_schedulable(tg, period, quota);
- if (ret)
- goto out_unlock;
+ int runtime_enabled, runtime_was_enabled;
+ int i;
runtime_enabled = quota != RUNTIME_INF;
runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
@@ -6647,7 +6620,41 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
}
if (runtime_was_enabled && !runtime_enabled)
cfs_bandwidth_usage_dec();
-out_unlock:
+}
+
+static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
+{
+ int ret = 0;
+
+ if (tg == &root_task_group)
+ return -EINVAL;
+
+ /*
+ * Ensure we have at some amount of bandwidth every period. This is
+ * to prevent reaching a state of large arrears when throttled via
+ * entity_tick() resulting in prolonged exit starvation.
+ */
+ if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
+ return -EINVAL;
+
+ /*
+ * Likewise, bound things on the otherside by preventing insane quota
+ * periods. This also allows us to normalize in computing quota
+ * feasibility.
+ */
+ if (period > max_cfs_quota_period)
+ return -EINVAL;
+
+ /*
+ * Prevent race between setting of cfs_rq->runtime_enabled and
+ * unthrottle_offline_cfs_rqs().
+ */
+ get_online_cpus();
+ mutex_lock(&cfs_constraints_mutex);
+ ret = __cfs_schedulable(tg, period, quota);
+ if (!ret)
+ tg_switch_cfs_runtime(tg, period, quota);
+
mutex_unlock(&cfs_constraints_mutex);
put_online_cpus();
--
2.17.1
Powered by blists - more mailing lists