[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <6457dd87-95cb-4c4d-aaab-6c9b65414a75@redhat.com>
Date: Tue, 2 Sep 2025 11:44:00 -0400
From: Waiman Long <llong@...hat.com>
To: Frederic Weisbecker <frederic@...nel.org>,
LKML <linux-kernel@...r.kernel.org>
Cc: Gabriele Monaco <gmonaco@...hat.com>, Johannes Weiner
<hannes@...xchg.org>, Marco Crivellari <marco.crivellari@...e.com>,
Michal Hocko <mhocko@...e.com>, Michal Koutný
<mkoutny@...e.com>, Peter Zijlstra <peterz@...radead.org>,
Tejun Heo <tj@...nel.org>, Thomas Gleixner <tglx@...utronix.de>,
cgroups@...r.kernel.org
Subject: Re: [PATCH 26/33] cgroup/cpuset: Fail if isolated and nohz_full don't
leave any housekeeping
On 8/29/25 11:48 AM, Frederic Weisbecker wrote:
> From: Gabriele Monaco <gmonaco@...hat.com>
>
> Currently the user can set up isolated cpus via cpuset and nohz_full in
> such a way that leaves no housekeeping CPU (i.e. no CPU that is neither
> domain isolated nor nohz full). This can be a problem for other
> subsystems (e.g. the timer wheel imgration).
>
> Prevent this configuration by blocking any assignation that would cause
> the union of domain isolated cpus and nohz_full to covers all CPUs.
>
> Acked-by: Frederic Weisbecker <frederic@...nel.org>
> Signed-off-by: Gabriele Monaco <gmonaco@...hat.com>
> Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
> ---
> kernel/cgroup/cpuset.c | 57 ++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 57 insertions(+)
>
> diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
> index df1dfacf5f9d..8260dd699fd8 100644
> --- a/kernel/cgroup/cpuset.c
> +++ b/kernel/cgroup/cpuset.c
> @@ -1275,6 +1275,19 @@ static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus
> cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
> }
>
> +/*
> + * isolated_cpus_should_update - Returns if the isolated_cpus mask needs update
> + * @prs: new or old partition_root_state
> + * @parent: parent cpuset
> + * Return: true if isolated_cpus needs modification, false otherwise
> + */
> +static bool isolated_cpus_should_update(int prs, struct cpuset *parent)
> +{
> + if (!parent)
> + parent = &top_cpuset;
> + return prs != parent->partition_root_state;
> +}
> +
> /*
> * partition_xcpus_add - Add new exclusive CPUs to partition
> * @new_prs: new partition_root_state
> @@ -1339,6 +1352,36 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
> return isolcpus_updated;
> }
>
> +/*
> + * isolcpus_nohz_conflict - check for isolated & nohz_full conflicts
> + * @new_cpus: cpu mask for cpus that are going to be isolated
> + * Return: true if there is conflict, false otherwise
> + *
> + * If nohz_full is enabled and we have isolated CPUs, their combination must
> + * still leave housekeeping CPUs.
> + */
> +static bool isolcpus_nohz_conflict(struct cpumask *new_cpus)
> +{
> + cpumask_var_t full_hk_cpus;
> + int res = false;
> +
> + if (!housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
> + return false;
> +
> + if (!alloc_cpumask_var(&full_hk_cpus, GFP_KERNEL))
> + return true;
> +
> + cpumask_and(full_hk_cpus, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE),
> + housekeeping_cpumask(HK_TYPE_DOMAIN));
> + cpumask_andnot(full_hk_cpus, full_hk_cpus, isolated_cpus);
> + cpumask_and(full_hk_cpus, full_hk_cpus, cpu_online_mask);
> + if (!cpumask_weight_andnot(full_hk_cpus, new_cpus))
> + res = true;
> +
> + free_cpumask_var(full_hk_cpus);
> + return res;
> +}
> +
> static void update_housekeeping_cpumask(bool isolcpus_updated)
> {
> int ret;
> @@ -1453,6 +1496,9 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
> if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) ||
> cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
> return PERR_INVCPUS;
> + if (isolated_cpus_should_update(new_prs, NULL) &&
> + isolcpus_nohz_conflict(tmp->new_cpus))
> + return PERR_HKEEPING;
>
> spin_lock_irq(&callback_lock);
> isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
> @@ -1552,6 +1598,9 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
> else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
> cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
> cs->prs_err = PERR_NOCPUS;
> + else if (isolated_cpus_should_update(prs, NULL) &&
> + isolcpus_nohz_conflict(tmp->addmask))
> + cs->prs_err = PERR_HKEEPING;
> if (cs->prs_err)
> goto invalidate;
> }
> @@ -1904,6 +1953,12 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
> return err;
> }
>
> + if (deleting && isolated_cpus_should_update(new_prs, parent) &&
> + isolcpus_nohz_conflict(tmp->delmask)) {
> + cs->prs_err = PERR_HKEEPING;
> + return PERR_HKEEPING;
> + }
> +
> /*
> * Change the parent's effective_cpus & effective_xcpus (top cpuset
> * only).
> @@ -2924,6 +2979,8 @@ static int update_prstate(struct cpuset *cs, int new_prs)
> * Need to update isolated_cpus.
> */
> isolcpus_updated = true;
> + if (isolcpus_nohz_conflict(cs->effective_xcpus))
> + err = PERR_HKEEPING;
> } else {
> /*
> * Switching back to member is always allowed even if it
In both remote_cpus_update() and update_parent_effective_cpumask(), some
new CPUs can be added to the isolation list while other CPUs can be
removed from it. So isolcpus_nohz_conflict() should include both set in
its analysis to avoid false positive. Essentally, if the CPUs removed
from the isolated_cpus intersect with the nohz_full housekeeping mask,
there is no conflict.
Cheers,
Longman
Powered by blists - more mailing lists