[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d7713049-5b44-4e86-8c0d-ca2c365c925f@arm.com>
Date: Thu, 13 Feb 2025 18:15:37 +0000
From: Christian Loehle <christian.loehle@....com>
To: Xuewen Yan <xuewen.yan@...soc.com>, mingo@...hat.com,
peterz@...radead.org, juri.lelli@...hat.com, vincent.guittot@...aro.org
Cc: dietmar.eggemann@....com, rostedt@...dmis.org, bsegall@...gle.com,
mgorman@...e.de, vschneid@...hat.com, qyousef@...alina.io,
ke.wang@...soc.com, di.shen@...soc.com, xuewen.yan94@...il.com,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/2] sched/uclamp: Alaways using uclamp_is_used()
On 2/13/25 09:15, Xuewen Yan wrote:
> Now, we have the uclamp_is_used() func to judge the uclamp enabled,
> so replace the static_branch_unlikely(&sched_uclamp_used) with it.
>
> Signed-off-by: Xuewen Yan <xuewen.yan@...soc.com>
> ---
> kernel/sched/core.c | 4 ++--
> kernel/sched/sched.h | 28 ++++++++++++++--------------
> 2 files changed, 16 insertions(+), 16 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 165c90ba64ea..841147759ec7 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -1756,7 +1756,7 @@ static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
> * The condition is constructed such that a NOP is generated when
> * sched_uclamp_used is disabled.
> */
> - if (!static_branch_unlikely(&sched_uclamp_used))
> + if (!uclamp_is_used())
> return;
>
> if (unlikely(!p->sched_class->uclamp_enabled))
> @@ -1783,7 +1783,7 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
> * The condition is constructed such that a NOP is generated when
> * sched_uclamp_used is disabled.
> */
> - if (!static_branch_unlikely(&sched_uclamp_used))
> + if (!uclamp_is_used())
> return;
>
> if (unlikely(!p->sched_class->uclamp_enabled))
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 38e0e323dda2..f5de05354d80 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -3394,6 +3394,19 @@ static inline bool update_other_load_avgs(struct rq *rq) { return false; }
>
> unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
>
> +/*
> + * When uclamp is compiled in, the aggregation at rq level is 'turned off'
> + * by default in the fast path and only gets turned on once userspace performs
> + * an operation that requires it.
> + *
> + * Returns true if userspace opted-in to use uclamp and aggregation at rq level
> + * hence is active.
> + */
> +static inline bool uclamp_is_used(void)
> +{
> + return static_branch_likely(&sched_uclamp_used);
> +}
> +
> static inline unsigned long uclamp_rq_get(struct rq *rq,
> enum uclamp_id clamp_id)
> {
> @@ -3417,7 +3430,7 @@ static inline bool uclamp_rq_is_capped(struct rq *rq)
> unsigned long rq_util;
> unsigned long max_util;
>
> - if (!static_branch_likely(&sched_uclamp_used))
> + if (!uclamp_is_used())
> return false;
>
> rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq);
> @@ -3426,19 +3439,6 @@ static inline bool uclamp_rq_is_capped(struct rq *rq)
> return max_util != SCHED_CAPACITY_SCALE && rq_util >= max_util;
> }
>
> -/*
> - * When uclamp is compiled in, the aggregation at rq level is 'turned off'
> - * by default in the fast path and only gets turned on once userspace performs
> - * an operation that requires it.
> - *
> - * Returns true if userspace opted-in to use uclamp and aggregation at rq level
> - * hence is active.
> - */
> -static inline bool uclamp_is_used(void)
> -{
> - return static_branch_likely(&sched_uclamp_used);
> -}
> -
> #define for_each_clamp_id(clamp_id) \
> for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++)
>
Reviewed-by: Christian Loehle <christian.loehle@....com>
Powered by blists - more mailing lists