lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <6df6a8d3-6c5e-4ea7-8f55-08c2a56928f6@linux.dev>
Date: Wed, 24 Jul 2024 10:29:27 +0800
From: Chengming Zhou <chengming.zhou@...ux.dev>
To: Chuyi Zhou <zhouchuyi@...edance.com>, mingo@...hat.com,
 peterz@...radead.org, juri.lelli@...hat.com, vincent.guittot@...aro.org,
 dietmar.eggemann@....com, rostedt@...dmis.org, bsegall@...gle.com,
 mgorman@...e.de, vschneid@...hat.com
Cc: linux-kernel@...r.kernel.org, joshdon@...gle.com
Subject: Re: [PATCH v2 1/2] sched/fair: Decrease cfs bandwidth usage in
 task_group destruction

On 2024/7/23 20:20, Chuyi Zhou wrote:
> The static key __cfs_bandwidth_used is used to indicate whether bandwidth
> control is enabled in the system. Currently, it is only decreased when a
> task group disables bandwidth control. This is incorrect because if there
> was a task group in the past that enabled bandwidth control, the
> __cfs_bandwidth_used will never go to zero, even if there are no task_group
> using bandwidth control now.
> 
> This patch tries to fix this issue by decrsasing bandwidth usage in
> destroy_cfs_bandwidth(). cfs_bandwidth_usage_dec() calls
> static_key_slow_dec_cpuslocked which needs to hold hotplug lock, but cfs
> bandwidth destroy maybe run in a rcu callback. Move the call to
> destroy_cfs_bandwidth() from unregister_fair_sched_group() to
> cpu_cgroup_css_free() which runs in process context.
> 
> Signed-off-by: Chuyi Zhou <zhouchuyi@...edance.com>

Yeah, autogroup can't have bandwidth set, so it's ok to just destroy 
bandwidth in .css_free().

Reviewed-by: Chengming Zhou <chengming.zhou@...ux.dev>

Just some nits below:

> ---
>   kernel/sched/core.c  |  2 ++
>   kernel/sched/fair.c  | 13 +++++++------
>   kernel/sched/sched.h |  2 ++
>   3 files changed, 11 insertions(+), 6 deletions(-)
> 
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 6d35c48239be..7720d34bd71b 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -8816,6 +8816,8 @@ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
>   {
>   	struct task_group *tg = css_tg(css);
>   
> +	destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));

Instead of exporting this tg_cfs_bandwidth(), how about just changing
the parameter of init_cfs_bandwidth()/destroy_cfs_bandwidth() to tg?
Which maybe clearer? but this is your call.

Thanks.

> +
>   	/*
>   	 * Relies on the RCU grace period between css_released() and this.
>   	 */
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index da3cdd86ab2e..c56b6d5b8ed7 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -5615,7 +5615,7 @@ void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
>   	cfs_b->runtime_snap = cfs_b->runtime;
>   }
>   
> -static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
> +struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
>   {
>   	return &tg->cfs_bandwidth;
>   }
> @@ -6438,7 +6438,7 @@ void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
>   	hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
>   }
>   
> -static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
> +void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
>   {
>   	int __maybe_unused i;
>   
> @@ -6472,6 +6472,9 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
>   		local_irq_restore(flags);
>   	}
>   #endif
> +	guard(cpus_read_lock)();
> +	if (cfs_b->quota != RUNTIME_INF)
> +		cfs_bandwidth_usage_dec();
>   }
>   
>   /*
> @@ -6614,11 +6617,11 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *paren
>   static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
>   #endif
>   
> -static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
> +struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
>   {
>   	return NULL;
>   }
> -static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
> +void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
>   static inline void update_runtime_enabled(struct rq *rq) {}
>   static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
>   #ifdef CONFIG_CGROUP_SCHED
> @@ -12992,8 +12995,6 @@ void unregister_fair_sched_group(struct task_group *tg)
>   	struct rq *rq;
>   	int cpu;
>   
> -	destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
> -
>   	for_each_possible_cpu(cpu) {
>   		if (tg->se[cpu])
>   			remove_entity_load_avg(tg->se[cpu]);
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 8a071022bdec..d251842867ce 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -2938,6 +2938,8 @@ extern void init_dl_rq(struct dl_rq *dl_rq);
>   extern void cfs_bandwidth_usage_inc(void);
>   extern void cfs_bandwidth_usage_dec(void);
>   
> +extern struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg);
> +extern void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
>   #ifdef CONFIG_NO_HZ_COMMON
>   
>   #define NOHZ_BALANCE_KICK_BIT	0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ