[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <xm26fsulpkcp.fsf@google.com>
Date: Fri, 03 Sep 2021 11:47:18 -0700
From: Benjamin Segall <bsegall@...gle.com>
To: Huaixin Chang <changhuaixin@...ux.alibaba.com>
Cc: linux-kernel@...r.kernel.org, peterz@...radead.org,
anderson@...unc.edu, baruah@...tl.edu, dietmar.eggemann@....com,
dtcccc@...ux.alibaba.com, juri.lelli@...hat.com,
khlebnikov@...dex-team.ru, luca.abeni@...tannapisa.it,
mgorman@...e.de, mingo@...hat.com, odin@...d.al, odin@...dal.com,
pauld@...head.com, pjt@...gle.com, rostedt@...dmis.org,
shanpeic@...ux.alibaba.com, tj@...nel.org,
tommaso.cucinotta@...tannapisa.it, vincent.guittot@...aro.org,
xiyou.wangcong@...il.com, daniel.m.jordan@...cle.com
Subject: Re: [PATCH 1/2] sched/fair: Add cfs bandwidth burst statistics
Huaixin Chang <changhuaixin@...ux.alibaba.com> writes:
> Two new statistics are introduced to show the internal of burst feature
> and explain why burst helps or not.
>
> nr_bursts: number of periods bandwidth burst occurs
> burst_time: cumulative wall-time (in nanoseconds) that any cpus has
> used above quota in respective periods
>
> Co-developed-by: Shanpei Chen <shanpeic@...ux.alibaba.com>
> Signed-off-by: Shanpei Chen <shanpeic@...ux.alibaba.com>
> Co-developed-by: Tianchen Ding <dtcccc@...ux.alibaba.com>
> Signed-off-by: Tianchen Ding <dtcccc@...ux.alibaba.com>
> Signed-off-by: Huaixin Chang <changhuaixin@...ux.alibaba.com>
Reviewed-by: Ben Segall <bsegall@...gle.com>
I know there's some worry about the overhead of a constantly increasing
amount of statistics, but as far as the implementation of this goes, it
looks good to me.
> ---
> kernel/sched/core.c | 13 ++++++++++---
> kernel/sched/fair.c | 9 +++++++++
> kernel/sched/sched.h | 3 +++
> 3 files changed, 22 insertions(+), 3 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 20ffcc044134..d00b92712253 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -10068,6 +10068,9 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
> seq_printf(sf, "wait_sum %llu\n", ws);
> }
>
> + seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
> + seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
> +
> return 0;
> }
> #endif /* CONFIG_CFS_BANDWIDTH */
> @@ -10164,16 +10167,20 @@ static int cpu_extra_stat_show(struct seq_file *sf,
> {
> struct task_group *tg = css_tg(css);
> struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
> - u64 throttled_usec;
> + u64 throttled_usec, burst_usec;
>
> throttled_usec = cfs_b->throttled_time;
> do_div(throttled_usec, NSEC_PER_USEC);
> + burst_usec = cfs_b->burst_time;
> + do_div(burst_usec, NSEC_PER_USEC);
>
> seq_printf(sf, "nr_periods %d\n"
> "nr_throttled %d\n"
> - "throttled_usec %llu\n",
> + "throttled_usec %llu\n"
> + "nr_bursts %d\n"
> + "burst_usec %llu\n",
> cfs_b->nr_periods, cfs_b->nr_throttled,
> - throttled_usec);
> + throttled_usec, cfs_b->nr_burst, burst_usec);
> }
> #endif
> return 0;
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 44c452072a1b..464371f364f1 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -4655,11 +4655,20 @@ static inline u64 sched_cfs_bandwidth_slice(void)
> */
> void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
> {
> + s64 runtime;
> +
> if (unlikely(cfs_b->quota == RUNTIME_INF))
> return;
>
> cfs_b->runtime += cfs_b->quota;
> + runtime = cfs_b->runtime_snap - cfs_b->runtime;
> + if (runtime > 0) {
> + cfs_b->burst_time += runtime;
> + cfs_b->nr_burst++;
> + }
> +
> cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst);
> + cfs_b->runtime_snap = cfs_b->runtime;
> }
>
> static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 14a41a243f7b..80e4322727b4 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -367,6 +367,7 @@ struct cfs_bandwidth {
> u64 quota;
> u64 runtime;
> u64 burst;
> + u64 runtime_snap;
> s64 hierarchical_quota;
>
> u8 idle;
> @@ -379,7 +380,9 @@ struct cfs_bandwidth {
> /* Statistics: */
> int nr_periods;
> int nr_throttled;
> + int nr_burst;
> u64 throttled_time;
> + u64 burst_time;
> #endif
> };
Powered by blists - more mailing lists