[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <de6958aa-27f8-4baa-b76d-88266d009f81@huawei.com>
Date: Thu, 25 Jul 2024 19:40:15 +0800
From: chenridong <chenridong@...wei.com>
To: Kamalesh Babulal <kamalesh.babulal@...cle.com>, <tj@...nel.org>,
<lizefan.x@...edance.com>, <hannes@...xchg.org>, <longman@...hat.com>,
<adityakali@...gle.com>, <sergeh@...nel.org>, <mkoutny@...e.com>
CC: <cgroups@...r.kernel.org>, <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH -next] cgroup/cpuset: add dec_attach_in_progress helper
On 2024/7/25 19:01, Kamalesh Babulal wrote:
>
>
> On 7/25/24 7:25 AM, Chen Ridong wrote:
>> There are several functions to decrease attach_in_progress, and they
>> will wake up cpuset_attach_wq when attach_in_progress is zero. So,
>> add a helper to make it concise.
>>
>> Signed-off-by: Chen Ridong <chenridong@...wei.com>
>> ---
>> kernel/cgroup/cpuset.c | 28 +++++++++++++++-------------
>> 1 file changed, 15 insertions(+), 13 deletions(-)
>>
>> diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
>> index d4322619e59a..c241845694ac 100644
>> --- a/kernel/cgroup/cpuset.c
>> +++ b/kernel/cgroup/cpuset.c
>> @@ -490,6 +490,17 @@ static inline void check_insane_mems_config(nodemask_t *nodes)
>> }
>> }
>>
>> +/*
>> + * decrease cs->attach_in_progress.
>> + * wake_up cpuset_attach_wq if cs->attach_in_progress==0.
>
> In the description, adding locking constraint of cpuset_mutex would be helpful.
> Something like "cpuset_mutex must be held by the caller."
>
Thank you, I will to that.
>> + */
>> +static inline void dec_attach_in_progress(struct cpuset *cs)
>> +{
>> + cs->attach_in_progress--;
>> + if (!cs->attach_in_progress)
>> + wake_up(&cpuset_attach_wq);
>> +}
>> +
>> /*
>> * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
>> * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
>> @@ -3421,9 +3432,7 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
>> cs = css_cs(css);
>>
>> mutex_lock(&cpuset_mutex);
>> - cs->attach_in_progress--;
>> - if (!cs->attach_in_progress)
>> - wake_up(&cpuset_attach_wq);
>> + dec_attach_in_progress(cs);
>>
>> if (cs->nr_migrate_dl_tasks) {
>> int cpu = cpumask_any(cs->effective_cpus);
>> @@ -3538,9 +3547,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
>> reset_migrate_dl_data(cs);
>> }
>>
>> - cs->attach_in_progress--;
>> - if (!cs->attach_in_progress)
>> - wake_up(&cpuset_attach_wq);
>> + dec_attach_in_progress(cs);
>>
>> mutex_unlock(&cpuset_mutex);
>> }
>> @@ -4284,9 +4291,7 @@ static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
>> return;
>>
>> mutex_lock(&cpuset_mutex);
>> - cs->attach_in_progress--;
>> - if (!cs->attach_in_progress)
>> - wake_up(&cpuset_attach_wq);
>> + dec_attach_in_progress(cs);
>> mutex_unlock(&cpuset_mutex);
>> }
>>
>> @@ -4319,10 +4324,7 @@ static void cpuset_fork(struct task_struct *task)
>> guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
>> cpuset_attach_task(cs, task);
>>
>> - cs->attach_in_progress--;
>> - if (!cs->attach_in_progress)
>> - wake_up(&cpuset_attach_wq);
>> -
>> + dec_attach_in_progress(cs);
>> mutex_unlock(&cpuset_mutex);
>> }
>>
>
Powered by blists - more mailing lists