[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <5145be53-9bfc-42a9-b8e2-d035d151a115@redhat.com>
Date: Fri, 26 Jul 2024 15:58:30 -0400
From: Waiman Long <longman@...hat.com>
To: Chen Ridong <chenridong@...wei.com>, tj@...nel.org,
lizefan.x@...edance.com, hannes@...xchg.org, adityakali@...gle.com,
sergeh@...nel.org, mkoutny@...e.com
Cc: cgroups@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 -next] cgroup/cpuset: add decrease attach_in_progress
helpers
On 7/25/24 21:05, Chen Ridong wrote:
> There are several functions to decrease attach_in_progress, and they
> will wake up cpuset_attach_wq when attach_in_progress is zero. So,
> add a helper to make it concise.
>
> Signed-off-by: Chen Ridong <chenridong@...wei.com>
> ---
> kernel/cgroup/cpuset.c | 39 ++++++++++++++++++++++++---------------
> 1 file changed, 24 insertions(+), 15 deletions(-)
>
> diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
> index d4322619e59a..fa0c2fc5d383 100644
> --- a/kernel/cgroup/cpuset.c
> +++ b/kernel/cgroup/cpuset.c
> @@ -490,6 +490,26 @@ static inline void check_insane_mems_config(nodemask_t *nodes)
> }
> }
>
> +/*
> + * decrease cs->attach_in_progress.
> + * wake_up cpuset_attach_wq if cs->attach_in_progress==0.
> + */
> +static inline void dec_attach_in_progress_locked(struct cpuset *cs)
> +{
> + lockdep_assert_held(&cpuset_mutex);
> +
> + cs->attach_in_progress--;
> + if (!cs->attach_in_progress)
> + wake_up(&cpuset_attach_wq);
> +}
> +
> +static inline void dec_attach_in_progress(struct cpuset *cs)
> +{
> + mutex_lock(&cpuset_mutex);
> + dec_attach_in_progress_locked(cs);
> + mutex_unlock(&cpuset_mutex);
> +}
> +
> /*
> * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
> * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
> @@ -3421,9 +3441,7 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
> cs = css_cs(css);
>
> mutex_lock(&cpuset_mutex);
> - cs->attach_in_progress--;
> - if (!cs->attach_in_progress)
> - wake_up(&cpuset_attach_wq);
> + dec_attach_in_progress_locked(cs);
>
> if (cs->nr_migrate_dl_tasks) {
> int cpu = cpumask_any(cs->effective_cpus);
> @@ -3538,9 +3556,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
> reset_migrate_dl_data(cs);
> }
>
> - cs->attach_in_progress--;
> - if (!cs->attach_in_progress)
> - wake_up(&cpuset_attach_wq);
> + dec_attach_in_progress_locked(cs);
>
> mutex_unlock(&cpuset_mutex);
> }
> @@ -4283,11 +4299,7 @@ static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
> if (same_cs)
> return;
>
> - mutex_lock(&cpuset_mutex);
> - cs->attach_in_progress--;
> - if (!cs->attach_in_progress)
> - wake_up(&cpuset_attach_wq);
> - mutex_unlock(&cpuset_mutex);
> + dec_attach_in_progress(cs);
> }
>
> /*
> @@ -4319,10 +4331,7 @@ static void cpuset_fork(struct task_struct *task)
> guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
> cpuset_attach_task(cs, task);
>
> - cs->attach_in_progress--;
> - if (!cs->attach_in_progress)
> - wake_up(&cpuset_attach_wq);
> -
> + dec_attach_in_progress_locked(cs);
> mutex_unlock(&cpuset_mutex);
> }
>
Reviewed-by: Waiman Long <longman@...hat.com>
Powered by blists - more mailing lists