[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230615201252.1009678-1-joshdon@google.com>
Date: Thu, 15 Jun 2023 13:12:52 -0700
From: Josh Don <joshdon@...gle.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Ingo Molnar <mingo@...hat.com>, Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>, Mel Gorman <mgorman@...e.de>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Valentin Schneider <vschneid@...hat.com>,
linux-kernel@...r.kernel.org, Josh Don <joshdon@...gle.com>
Subject: [PATCH] sched: fix throttle accounting with nested bandwidth limits
This fixes two issues:
- throttled_clock should only be set on the group that is actually
getting throttled
- self-throttled time should only be accounted on entry/exit to
throttled state when we have nested limits
Fixes: 88cb2868250c ("sched: add throttled time stat for throttled children")
Fixes: 3ab150d011da ("sched: don't account throttle time for empty groups")
Signed-off-by: Josh Don <joshdon@...gle.com>
---
kernel/sched/fair.c | 25 +++++++++++++------------
1 file changed, 13 insertions(+), 12 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0219cf870cef..a5fc825a8d70 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4787,6 +4787,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
}
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
+static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
static inline bool cfs_bandwidth_used(void);
@@ -4879,7 +4880,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
#ifdef CONFIG_CFS_BANDWIDTH
struct rq *rq = rq_of(cfs_rq);
- if (!cfs_rq->throttled_clock)
+ if (cfs_rq_throttled(cfs_rq) && !cfs_rq->throttled_clock)
cfs_rq->throttled_clock = rq_clock(rq);
if (!cfs_rq->throttled_clock_self)
cfs_rq->throttled_clock_self = rq_clock(rq);
@@ -5387,17 +5388,17 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
/* Add cfs_rq with load or one or more already running entities to the list */
if (!cfs_rq_is_decayed(cfs_rq))
list_add_leaf_cfs_rq(cfs_rq);
- }
- if (cfs_rq->throttled_clock_self) {
- u64 delta = rq_clock(rq) - cfs_rq->throttled_clock_self;
+ if (cfs_rq->throttled_clock_self) {
+ u64 delta = rq_clock(rq) - cfs_rq->throttled_clock_self;
- cfs_rq->throttled_clock_self = 0;
+ cfs_rq->throttled_clock_self = 0;
- if (SCHED_WARN_ON((s64)delta < 0))
- delta = 0;
+ if (SCHED_WARN_ON((s64)delta < 0))
+ delta = 0;
- cfs_rq->throttled_clock_self_time += delta;
+ cfs_rq->throttled_clock_self_time += delta;
+ }
}
return 0;
@@ -5412,13 +5413,13 @@ static int tg_throttle_down(struct task_group *tg, void *data)
if (!cfs_rq->throttle_count) {
cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
list_del_leaf_cfs_rq(cfs_rq);
+
+ SCHED_WARN_ON(cfs_rq->throttled_clock_self);
+ if (cfs_rq->nr_running)
+ cfs_rq->throttled_clock_self = rq_clock(rq);
}
cfs_rq->throttle_count++;
- SCHED_WARN_ON(cfs_rq->throttled_clock_self);
- if (cfs_rq->nr_running)
- cfs_rq->throttled_clock_self = rq_clock(rq);
-
return 0;
}
--
2.41.0.162.gfafddb0af9-goog
Powered by blists - more mailing lists