lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e4bde91c-fc82-4c40-8f6c-7fc044ddf79b@huaweicloud.com>
Date: Mon, 3 Nov 2025 11:59:07 +0800
From: Chen Ridong <chenridong@...weicloud.com>
To: Waiman Long <longman@...hat.com>, Tejun Heo <tj@...nel.org>,
 Johannes Weiner <hannes@...xchg.org>, Michal Koutný
 <mkoutny@...e.com>
Cc: cgroups@...r.kernel.org, linux-kernel@...r.kernel.org,
 Chen Ridong <chenridong@...wei.com>, Gabriele Monaco <gmonaco@...hat.com>,
 Frederic Weisbecker <frederic@...nel.org>
Subject: Re: [cgroup/for-6.19 PATCH 2/3] cgroup/cpuset: Fail if isolated and
 nohz_full don't leave any housekeeping



On 2025/11/3 11:47, Chen Ridong wrote:
> 
> 
> On 2025/11/3 9:34, Waiman Long wrote:
>> From: Gabriele Monaco <gmonaco@...hat.com>
>>
>> Currently the user can set up isolated cpus via cpuset and nohz_full in
>> such a way that leaves no housekeeping CPU (i.e. no CPU that is neither
>> domain isolated nor nohz full). This can be a problem for other
>> subsystems (e.g. the timer wheel imgration).
>>
>> Prevent this configuration by blocking any assignation that would cause
>> the union of domain isolated cpus and nohz_full to covers all CPUs.
>>
>> Acked-by: Frederic Weisbecker <frederic@...nel.org>
>> Reviewed-by: Waiman Long <longman@...hat.com>
>> Signed-off-by: Gabriele Monaco <gmonaco@...hat.com>
>> Signed-off-by: Waiman Long <longman@...hat.com>
>> ---
>>  kernel/cgroup/cpuset.c | 67 +++++++++++++++++++++++++++++++++++++++++-
>>  1 file changed, 66 insertions(+), 1 deletion(-)
>>
>> diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
>> index da770dac955e..d6d459c95d82 100644
>> --- a/kernel/cgroup/cpuset.c
>> +++ b/kernel/cgroup/cpuset.c
>> @@ -1329,6 +1329,19 @@ static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus
>>  		cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
>>  }
>>  
>> +/*
>> + * isolated_cpus_should_update - Returns if the isolated_cpus mask needs update
>> + * @prs: new or old partition_root_state
>> + * @parent: parent cpuset
>> + * Return: true if isolated_cpus needs modification, false otherwise
>> + */
>> +static bool isolated_cpus_should_update(int prs, struct cpuset *parent)
>> +{
>> +	if (!parent)
>> +		parent = &top_cpuset;
>> +	return prs != parent->partition_root_state;
>> +}
>> +
>>  /*
>>   * partition_xcpus_add - Add new exclusive CPUs to partition
>>   * @new_prs: new partition_root_state
>> @@ -1393,6 +1406,42 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
>>  	return isolcpus_updated;
>>  }
>>  
>> +/*
>> + * isolated_cpus_can_update - check for isolated & nohz_full conflicts
>> + * @add_cpus: cpu mask for cpus that are going to be isolated
>> + * @del_cpus: cpu mask for cpus that are no longer isolated, can be NULL
>> + * Return: false if there is conflict, true otherwise
>> + *
>> + * If nohz_full is enabled and we have isolated CPUs, their combination must
>> + * still leave housekeeping CPUs.
>> + */
>> +static bool isolated_cpus_can_update(struct cpumask *add_cpus,
>> +				     struct cpumask *del_cpus)
>> +{
>> +	cpumask_var_t full_hk_cpus;
>> +	int res = true;
>> +
>> +	if (!housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
>> +		return true;
>> +
>> +	if (del_cpus && cpumask_weight_and(del_cpus,
>> +			housekeeping_cpumask(HK_TYPE_KERNEL_NOISE)))
>> +		return true;
>> +
>> +	if (!alloc_cpumask_var(&full_hk_cpus, GFP_KERNEL))
>> +		return false;
>> +
>> +	cpumask_and(full_hk_cpus, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE),
>> +		    housekeeping_cpumask(HK_TYPE_DOMAIN));
>> +	cpumask_andnot(full_hk_cpus, full_hk_cpus, isolated_cpus);
>> +	cpumask_and(full_hk_cpus, full_hk_cpus, cpu_active_mask);
>> +	if (!cpumask_weight_andnot(full_hk_cpus, add_cpus))
>> +		res = false;
>> +
>> +	free_cpumask_var(full_hk_cpus);
>> +	return res;
>> +}
>> +
>>  static void update_isolation_cpumasks(bool isolcpus_updated)
>>  {
>>  	int ret;
>> @@ -1551,6 +1600,9 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
>>  	if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) ||
>>  	    cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
>>  		return PERR_INVCPUS;
>> +	if (isolated_cpus_should_update(new_prs, NULL) &&
>> +	    !isolated_cpus_can_update(tmp->new_cpus, NULL))
>> +		return PERR_HKEEPING;
>>  
>>  	spin_lock_irq(&callback_lock);
>>  	isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
>> @@ -1650,6 +1702,9 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
>>  		else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
>>  			 cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
>>  			cs->prs_err = PERR_NOCPUS;
>> +		else if (isolated_cpus_should_update(prs, NULL) &&
>> +			 !isolated_cpus_can_update(tmp->addmask, tmp->delmask))
>> +			cs->prs_err = PERR_HKEEPING;
>>  		if (cs->prs_err)
>>  			goto invalidate;
>>  	}
>> @@ -1988,6 +2043,12 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
>>  			return err;
>>  	}
>>  
>> +	if (deleting && isolated_cpus_should_update(new_prs, parent) &&
>> +	    !isolated_cpus_can_update(tmp->delmask, tmp->addmask)) {
>> +		cs->prs_err = PERR_HKEEPING;
>> +		return PERR_HKEEPING;
>> +	}
>> +
>>  	/*
>>  	 * Change the parent's effective_cpus & effective_xcpus (top cpuset
>>  	 * only).
>> @@ -2994,7 +3055,11 @@ static int update_prstate(struct cpuset *cs, int new_prs)
>>  		 * A change in load balance state only, no change in cpumasks.
>>  		 * Need to update isolated_cpus.
>>  		 */
>> -		isolcpus_updated = true;
>> +		if ((new_prs == PRS_ISOLATED) &&
>> +		    !isolated_cpus_can_update(cs->effective_xcpus, NULL))
>> +			err = PERR_HKEEPING;
>> +		else
>> +			isolcpus_updated = true;
>>  	} else {
>>  		/*
>>  		 * Switching back to member is always allowed even if it
> 
> I'm considering whether I should introduce a new function that consolidates
> isolated_cpus_should_update, isolated_cpus_can_update, and prstate_housekeeping_conflict.
> 

Sorry, we should introduce a new ...

> Just like:
> 
> bool housekeeping_conflict(...)
> {
> 	if (isolated_cpus_should_update && !isolated_cpus_can_update) {
> 		return ture;
> 	}
> 	return prstate_housekeeping_conflict();
> }
> 
> Since all of these are related to isolated CPUs, putting them into a centralized function would make
> the code much easier to maintain.
> 

-- 
Best regards,
Ridong


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ