[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Zpel7oyBNTpkLiPS@linux.ibm.com>
Date: Wed, 17 Jul 2024 16:37:26 +0530
From: Vishal Chourasia <vishalc@...ux.ibm.com>
To: Chuyi Zhou <zhouchuyi@...edance.com>
Cc: mingo@...hat.com, peterz@...radead.org, juri.lelli@...hat.com,
vincent.guittot@...aro.org, chengming.zhou@...ux.dev,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH] sched/fair: Remove cfs_rq::nr_spread_over and
cfs_rq::exec_clock
On Tue, Jul 16, 2024 at 11:06:34PM +0800, Chuyi Zhou wrote:
> cfs_rq::nr_spread_over and cfs_rq::exec_clock are not used anymore in
> eevdf. Remove them from struct cfs_rq.
>
nr_spread_over tracks the number of instances where the difference
between a scheduling entity's virtual runtime and the minimum virtual
runtime in the runqueue exceeds three times the scheduler latency,
indicating significant disparity in task scheduling.
Commit that removed its usage: 5e963f2bd: sched/fair: Commit to EEVDF
cfs_rq->exec_clock was used to account for time spent executing tasks.
Commit that removed its usage: 5d69eca542ee1 sched: Unify runtime
accounting across classes
> Signed-off-by: Chuyi Zhou <zhouchuyi@...edance.com>
Acked-by: Vishal Chourasia <vishalc@...ux.ibm.com>
> ---
> kernel/sched/debug.c | 4 ----
> kernel/sched/sched.h | 6 ------
> 2 files changed, 10 deletions(-)
>
> diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
> index c1eb9a1afd13..90c4a9998377 100644
> --- a/kernel/sched/debug.c
> +++ b/kernel/sched/debug.c
> @@ -641,8 +641,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
> SEQ_printf(m, "\n");
> SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
> #endif
> - SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
> - SPLIT_NS(cfs_rq->exec_clock));
>
> raw_spin_rq_lock_irqsave(rq, flags);
> root = __pick_root_entity(cfs_rq);
> @@ -669,8 +667,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
> SPLIT_NS(right_vruntime));
> spread = right_vruntime - left_vruntime;
> SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
> - SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
> - cfs_rq->nr_spread_over);
> SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
> SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
> SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 4c36cc680361..8a071022bdec 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -599,7 +599,6 @@ struct cfs_rq {
> s64 avg_vruntime;
> u64 avg_load;
>
> - u64 exec_clock;
> u64 min_vruntime;
> #ifdef CONFIG_SCHED_CORE
> unsigned int forceidle_seq;
> @@ -619,10 +618,6 @@ struct cfs_rq {
> struct sched_entity *curr;
> struct sched_entity *next;
>
> -#ifdef CONFIG_SCHED_DEBUG
> - unsigned int nr_spread_over;
> -#endif
> -
> #ifdef CONFIG_SMP
> /*
> * CFS load tracking
> @@ -1158,7 +1153,6 @@ struct rq {
> /* latency stats */
> struct sched_info rq_sched_info;
> unsigned long long rq_cpu_time;
> - /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
>
> /* sys_sched_yield() stats */
> unsigned int yld_count;
> --
> 2.20.1
>
Powered by blists - more mailing lists