[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YwyOzgbbUbB+JmSH@hirez.programming.kicks-ass.net>
Date: Mon, 29 Aug 2022 12:02:54 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Dietmar Eggemann <dietmar.eggemann@....com>
Cc: Ingo Molnar <mingo@...nel.org>,
Vincent Guittot <vincent.guittot@...aro.org>,
Morten Rasmussen <morten.rasmussen@....com>,
Vincent Donnefort <vdonnefort@...gle.com>,
Quentin Perret <qperret@...gle.com>,
Patrick Bellasi <patrick.bellasi@...bug.net>,
Abhijeet Dharmapurikar <adharmap@...cinc.com>,
Jian-Min <Jian-Min.Liu@...iatek.com>,
Qais Yousef <qais.yousef@....com>, linux-kernel@...r.kernel.org
Subject: Re: [RFC PATCH 1/1] sched/pelt: Introduce PELT multiplier
On Mon, Aug 29, 2022 at 10:08:13AM +0200, Peter Zijlstra wrote:
> On Mon, Aug 29, 2022 at 07:54:50AM +0200, Dietmar Eggemann wrote:
> > From: Vincent Donnefort <vincent.donnefort@....com>
> >
> > The new sysctl sched_pelt_multiplier allows a user to set a clock
> > multiplier to x2 or x4 (x1 being the default). This clock multiplier
> > artificially speeds up PELT ramp up/down similarly to use a faster
> > half-life than the default 32ms.
> >
> > - x1: 32ms half-life
> > - x2: 16ms half-life
> > - x4: 8ms half-life
> >
> > Internally, a new clock is created: rq->clock_task_mult. It sits in the
> > clock hierarchy between rq->clock_task and rq->clock_pelt.
> >
> > Signed-off-by: Vincent Donnefort <vincent.donnefort@....com>
> > Signed-off-by: Dietmar Eggemann <dietmar.eggemann@....com>
>
> > +extern unsigned int sched_pelt_lshift;
> > +
> > +/*
> > + * absolute time |1 |2 |3 |4 |5 |6 |
> > + * @ mult = 1 --------****************--------****************-
> > + * @ mult = 2 --------********----------------********---------
> > + * @ mult = 4 --------****--------------------****-------------
> > + * clock task mult
> > + * @ mult = 2 | | |2 |3 | | | | |5 |6 | | |
> > + * @ mult = 4 | | | | |2|3| | | | | | | | | | |5|6| | | | | | |
> > + *
> > + */
> > +static inline void update_rq_clock_task_mult(struct rq *rq, s64 delta)
> > +{
> > + delta <<= READ_ONCE(sched_pelt_lshift);
> > +
> > + rq->clock_task_mult += delta;
> > +
> > + update_rq_clock_pelt(rq, delta);
> > +}
>
> Hurmph... I'd almost go write you something like
> static_call()/static_branch() but for immediates.
>
> That said; given there's only like 3 options, perhaps a few
> static_branch() instances work just fine ?
Also, I'm not at all sure about exposing that as an official sysctl.
How about something like so?
---
Subject: sched/pelt: Introduce PELT multiplier
From: Vincent Donnefort <vincent.donnefort@....com>
Date: Mon, 29 Aug 2022 07:54:50 +0200
From: Vincent Donnefort <vincent.donnefort@....com>
The new sysctl sched_pelt_multiplier allows a user to set a clock
multiplier to x2 or x4 (x1 being the default). This clock multiplier
artificially speeds up PELT ramp up/down similarly to use a faster
half-life than the default 32ms.
- x1: 32ms half-life
- x2: 16ms half-life
- x4: 8ms half-life
Internally, a new clock is created: rq->clock_task_mult. It sits in the
clock hierarchy between rq->clock_task and rq->clock_pelt.
[peterz: Use sched_feat()]
Signed-off-by: Vincent Donnefort <vincent.donnefort@....com>
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@....com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
kernel/sched/core.c | 2 +-
kernel/sched/features.h | 3 +++
kernel/sched/pelt.h | 45 +++++++++++++++++++++++++++++++++++++++++----
kernel/sched/sched.h | 1 +
4 files changed, 46 insertions(+), 5 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -727,7 +727,7 @@ static void update_rq_clock_task(struct
if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
update_irq_load_avg(rq, irq_delta + steal);
#endif
- update_rq_clock_pelt(rq, delta);
+ update_rq_clock_task_mult(rq, delta);
}
void update_rq_clock(struct rq *rq)
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -101,3 +101,6 @@ SCHED_FEAT(LATENCY_WARN, false)
SCHED_FEAT(ALT_PERIOD, true)
SCHED_FEAT(BASE_SLICE, true)
+
+SCHED_FEAT(PELT_M2, false)
+SCHED_FEAT(PELT_M4, false)
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -61,6 +61,14 @@ static inline void cfs_se_util_change(st
WRITE_ONCE(avg->util_est.enqueued, enqueued);
}
+static inline u64 rq_clock_task_mult(struct rq *rq)
+{
+ lockdep_assert_rq_held(rq);
+ assert_clock_updated(rq);
+
+ return rq->clock_task_mult;
+}
+
static inline u64 rq_clock_pelt(struct rq *rq)
{
lockdep_assert_rq_held(rq);
@@ -72,7 +80,7 @@ static inline u64 rq_clock_pelt(struct r
/* The rq is idle, we can sync to clock_task */
static inline void _update_idle_rq_clock_pelt(struct rq *rq)
{
- rq->clock_pelt = rq_clock_task(rq);
+ rq->clock_pelt = rq_clock_task_mult(rq);
u64_u32_store(rq->clock_idle, rq_clock(rq));
/* Paired with smp_rmb in migrate_se_pelt_lag() */
@@ -121,6 +129,30 @@ static inline void update_rq_clock_pelt(
rq->clock_pelt += delta;
}
+extern unsigned int sched_pelt_lshift;
+
+/*
+ * absolute time |1 |2 |3 |4 |5 |6 |
+ * @ mult = 1 --------****************--------****************-
+ * @ mult = 2 --------********----------------********---------
+ * @ mult = 4 --------****--------------------****-------------
+ * clock task mult
+ * @ mult = 2 | | |2 |3 | | | | |5 |6 | | |
+ * @ mult = 4 | | | | |2|3| | | | | | | | | | |5|6| | | | | | |
+ *
+ */
+static inline void update_rq_clock_task_mult(struct rq *rq, s64 delta)
+{
+ if (sched_feat(PELT_M2))
+ delta *= 2;
+ if (sched_feat(PELT_M4))
+ delta *= 4;
+
+ rq->clock_task_mult += delta;
+
+ update_rq_clock_pelt(rq, delta);
+}
+
/*
* When rq becomes idle, we have to check if it has lost idle time
* because it was fully busy. A rq is fully used when the /Sum util_sum
@@ -147,7 +179,7 @@ static inline void update_idle_rq_clock_
* rq's clock_task.
*/
if (util_sum >= divider)
- rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
+ rq->lost_idle_time += rq_clock_task_mult(rq) - rq->clock_pelt;
_update_idle_rq_clock_pelt(rq);
}
@@ -218,13 +250,18 @@ update_irq_load_avg(struct rq *rq, u64 r
return 0;
}
-static inline u64 rq_clock_pelt(struct rq *rq)
+static inline u64 rq_clock_task_mult(struct rq *rq)
{
return rq_clock_task(rq);
}
+static inline u64 rq_clock_pelt(struct rq *rq)
+{
+ return rq_clock_task_mult(rq);
+}
+
static inline void
-update_rq_clock_pelt(struct rq *rq, s64 delta) { }
+update_rq_clock_task_mult(struct rq *rq, s64 delta) { }
static inline void
update_idle_rq_clock_pelt(struct rq *rq) { }
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1009,6 +1009,7 @@ struct rq {
u64 clock;
/* Ensure that all clocks are in the same cache line */
u64 clock_task ____cacheline_aligned;
+ u64 clock_task_mult;
u64 clock_pelt;
unsigned long lost_idle_time;
u64 clock_pelt_idle;
Powered by blists - more mailing lists