lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c6dfcbc9-c7e1-4221-b79c-b4c745e5b167@huaweicloud.com>
Date: Thu, 18 Dec 2025 09:39:43 +0800
From: Chen Ridong <chenridong@...weicloud.com>
To: Waiman Long <llong@...hat.com>, tj@...nel.org, hannes@...xchg.org,
 mkoutny@...e.com
Cc: cgroups@...r.kernel.org, linux-kernel@...r.kernel.org,
 lujialin4@...wei.com
Subject: Re: [PATCH -next 6/6] cpuset: remove v1-specific code from
 generate_sched_domains



On 2025/12/18 3:05, Waiman Long wrote:
> On 12/17/25 3:49 AM, Chen Ridong wrote:
>> From: Chen Ridong <chenridong@...wei.com>
>>
>> Following the introduction of cpuset1_generate_sched_domains() for v1
>> in the previous patch, v1-specific logic can now be removed from the
>> generic generate_sched_domains(). This patch cleans up the v1-only
>> code and ensures uf_node is only visible when CONFIG_CPUSETS_V1=y.
>>
>> Signed-off-by: Chen Ridong <chenridong@...wei.com>
>> ---
>>   kernel/cgroup/cpuset-internal.h |  10 +--
>>   kernel/cgroup/cpuset-v1.c       |   2 +-
>>   kernel/cgroup/cpuset.c          | 144 +++++---------------------------
>>   3 files changed, 27 insertions(+), 129 deletions(-)
>>
>> diff --git a/kernel/cgroup/cpuset-internal.h b/kernel/cgroup/cpuset-internal.h
>> index bd767f8cb0ed..ef7b7c5afd4c 100644
>> --- a/kernel/cgroup/cpuset-internal.h
>> +++ b/kernel/cgroup/cpuset-internal.h
>> @@ -175,14 +175,14 @@ struct cpuset {
>>       /* Handle for cpuset.cpus.partition */
>>       struct cgroup_file partition_file;
>>   -    /* Used to merge intersecting subsets for generate_sched_domains */
>> -    struct uf_node node;
>> -
>>   #ifdef CONFIG_CPUSETS_V1
>>       struct fmeter fmeter;        /* memory_pressure filter */
>>         /* for custom sched domain */
>>       int relax_domain_level;
>> +
>> +    /* Used to merge intersecting subsets for generate_sched_domains */
>> +    struct uf_node node;
>>   #endif
>>   };
>>   @@ -315,8 +315,6 @@ void cpuset1_hotplug_update_tasks(struct cpuset *cs,
>>   int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial);
>>   void cpuset1_init(struct cpuset *cs);
>>   void cpuset1_online_css(struct cgroup_subsys_state *css);
>> -void update_domain_attr_tree(struct sched_domain_attr *dattr,
>> -                    struct cpuset *root_cs);
>>   int cpuset1_generate_sched_domains(cpumask_var_t **domains,
>>               struct sched_domain_attr **attributes);
>>   @@ -331,8 +329,6 @@ static inline int cpuset1_validate_change(struct cpuset *cur,
>>                   struct cpuset *trial) { return 0; }
>>   static inline void cpuset1_init(struct cpuset *cs) {}
>>   static inline void cpuset1_online_css(struct cgroup_subsys_state *css) {}
>> -static inline void update_domain_attr_tree(struct sched_domain_attr *dattr,
>> -                    struct cpuset *root_cs) {}
>>   static inline int cpuset1_generate_sched_domains(cpumask_var_t **domains,
>>               struct sched_domain_attr **attributes) { return 0; };
>>   diff --git a/kernel/cgroup/cpuset-v1.c b/kernel/cgroup/cpuset-v1.c
>> index 5c0bded46a7c..0226350e704f 100644
>> --- a/kernel/cgroup/cpuset-v1.c
>> +++ b/kernel/cgroup/cpuset-v1.c
>> @@ -560,7 +560,7 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
>>           dattr->relax_domain_level = c->relax_domain_level;
>>   }
>>   -void update_domain_attr_tree(struct sched_domain_attr *dattr,
>> +static void update_domain_attr_tree(struct sched_domain_attr *dattr,
>>                       struct cpuset *root_cs)
>>   {
>>       struct cpuset *cp;
>> diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
>> index 6bb0b201c34b..3e3468d928f3 100644
>> --- a/kernel/cgroup/cpuset.c
>> +++ b/kernel/cgroup/cpuset.c
>> @@ -789,18 +789,13 @@ static int generate_sched_domains(cpumask_var_t **domains,
>>   {
>>       struct cpuset *cp;    /* top-down scan of cpusets */
>>       struct cpuset **csa;    /* array of all cpuset ptrs */
>> -    int csn;        /* how many cpuset ptrs in csa so far */
>>       int i, j;        /* indices for partition finding loops */
>>       cpumask_var_t *doms;    /* resulting partition; i.e. sched domains */
>>       struct sched_domain_attr *dattr;  /* attributes for custom domains */
>>       int ndoms = 0;        /* number of sched domains in result */
>> -    int nslot;        /* next empty doms[] struct cpumask slot */
>>       struct cgroup_subsys_state *pos_css;
>> -    bool root_load_balance = is_sched_load_balance(&top_cpuset);
>> -    bool cgrpv2 = cpuset_v2();
>> -    int nslot_update;
>>   -    if (!cgrpv2)
>> +    if (!cpuset_v2())
>>           return cpuset1_generate_sched_domains(domains, attributes);
>>         doms = NULL;
>> @@ -808,70 +803,25 @@ static int generate_sched_domains(cpumask_var_t **domains,
>>       csa = NULL;
>>         /* Special case for the 99% of systems with one, full, sched domain */
>> -    if (root_load_balance && cpumask_empty(subpartitions_cpus)) {
>> -single_root_domain:
>> +    if (cpumask_empty(subpartitions_cpus)) {
>>           ndoms = 1;
>> -        doms = alloc_sched_domains(ndoms);
>> -        if (!doms)
>> -            goto done;
>> -
>> -        dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
>> -        if (dattr) {
>> -            *dattr = SD_ATTR_INIT;
>> -            update_domain_attr_tree(dattr, &top_cpuset);
>> -        }
>> -        cpumask_and(doms[0], top_cpuset.effective_cpus,
>> -                housekeeping_cpumask(HK_TYPE_DOMAIN));
>> -
>> -        goto done;
>> +        goto generate_doms;
> 
> That is not correct. The code under the generate_doms label will need to access csa[0] which is not
> allocated yet and may cause panic. You either need to keep the current code or move it after the csa
> allocation and assign top_cpuset to csa[0].
> 

Thank you, Longman.

Sorry, I should note that I made a small change. I added a !csa check: if csa is not allocated, then
ndoms should equal 1, and we only need the top_cpuset (no csa is indeed required). I think it's
cleaner to avoid allocating csa when there's no valid partition.

```
+	for (i = 0; i < ndoms; i++) {
+		/*
+		 * The top cpuset may contain some boot time isolated
+		 * CPUs that need to be excluded from the sched domain.
+		 */
+		if (!csa || csa[i] == &top_cpuset)
+			cpumask_and(doms[i], top_cpuset.effective_cpus,
+				    housekeeping_cpumask(HK_TYPE_DOMAIN));
+		else
+			cpumask_copy(doms[i], csa[i]->effective_cpus);
+		if (dattr)
+			dattr[i] = SD_ATTR_INIT;
 	}
```

Tested with single‑domain generation — no panic or warning observed.

>>       }
>>         csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
>>       if (!csa)
>>           goto done;
>> -    csn = 0;
>>   +    /* Find how many partitions and cache them to csa[] */
>>       rcu_read_lock();
>> -    if (root_load_balance)
>> -        csa[csn++] = &top_cpuset;
>>       cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
> 
> The cpuset_for_each_descendant_pre() macro will visit the root (top_cpuset) first and so it should
> be OK to remove the above 2 lines of code.
> 
> Cheers,
> Longman
> 

-- 
Best regards,
Ridong


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ