[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250829154814.47015-28-frederic@kernel.org>
Date: Fri, 29 Aug 2025 17:48:08 +0200
From: Frederic Weisbecker <frederic@...nel.org>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Frederic Weisbecker <frederic@...nel.org>,
Catalin Marinas <catalin.marinas@....com>,
Marco Crivellari <marco.crivellari@...e.com>,
Michal Hocko <mhocko@...e.com>,
Peter Zijlstra <peterz@...radead.org>,
Tejun Heo <tj@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Waiman Long <longman@...hat.com>,
Will Deacon <will@...nel.org>,
linux-arm-kernel@...ts.infradead.org
Subject: [PATCH 27/33] sched/arm64: Move fallback task cpumask to HK_TYPE_DOMAIN
When none of the allowed CPUs of a task are online, it gets migrated
to the fallback cpumask which is all the non nohz_full CPUs.
However just like nohz_full CPUs, domain isolated CPUs don't want to be
disturbed by tasks that have lost their CPU affinities.
And since nohz_full rely on domain isolation to work correctly, the
housekeeping mask of domain isolated CPUs is always a subset of the
housekeeping mask of nohz_full CPUs (there can be CPUs that are domain
isolated but not nohz_full, OTOH there can't be nohz_full CPUs that are
not domain isolated):
HK_TYPE_DOMAIN & HK_TYPE_KERNEL_NOISE == HK_TYPE_DOMAIN
Therefore use HK_TYPE_DOMAIN as the appropriate fallback target for
tasks and since this cpumask can be modified at runtime, make sure
that 32 bits support CPUs on ARM64 mismatched systems are not isolated
by cpusets.
CC: linux-arm-kernel@...ts.infradead.org
Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
---
arch/arm64/kernel/cpufeature.c | 18 ++++++++++++---
include/linux/cpu.h | 4 ++++
kernel/cgroup/cpuset.c | 40 +++++++++++++++++++++++-----------
3 files changed, 46 insertions(+), 16 deletions(-)
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 9ad065f15f1d..38046489d2ea 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1653,6 +1653,18 @@ has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
return feature_matches(val, entry);
}
+/*
+ * 32 bits support CPUs can't be isolated because tasks may be
+ * arbitrarily affine to them, defeating the purpose of isolation.
+ */
+bool arch_isolated_cpus_can_update(struct cpumask *new_cpus)
+{
+ if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
+ return !cpumask_intersects(cpu_32bit_el0_mask, new_cpus);
+ else
+ return true;
+}
+
const struct cpumask *system_32bit_el0_cpumask(void)
{
if (!system_supports_32bit_el0())
@@ -1666,7 +1678,7 @@ const struct cpumask *system_32bit_el0_cpumask(void)
const struct cpumask *task_cpu_fallback_mask(struct task_struct *p)
{
- return __task_cpu_possible_mask(p, housekeeping_cpumask(HK_TYPE_TICK));
+ return __task_cpu_possible_mask(p, housekeeping_cpumask(HK_TYPE_DOMAIN));
}
static int __init parse_32bit_el0_param(char *str)
@@ -3963,8 +3975,8 @@ static int enable_mismatched_32bit_el0(unsigned int cpu)
bool cpu_32bit = false;
if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
- if (!housekeeping_cpu(cpu, HK_TYPE_TICK))
- pr_info("Treating adaptive-ticks CPU %u as 64-bit only\n", cpu);
+ if (!housekeeping_cpu(cpu, HK_TYPE_DOMAIN))
+ pr_info("Treating domain isolated CPU %u as 64-bit only\n", cpu);
else
cpu_32bit = true;
}
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index b91b993f58ee..8bb239080534 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -228,4 +228,8 @@ static inline bool cpu_attack_vector_mitigated(enum cpu_attack_vectors v)
#define smt_mitigations SMT_MITIGATIONS_OFF
#endif
+struct cpumask;
+
+bool arch_isolated_cpus_can_update(struct cpumask *new_cpus);
+
#endif /* _LINUX_CPU_H_ */
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 8260dd699fd8..cf99ea844c1d 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1352,33 +1352,47 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
return isolcpus_updated;
}
+bool __weak arch_isolated_cpus_can_update(struct cpumask *new_cpus)
+{
+ return true;
+}
+
/*
- * isolcpus_nohz_conflict - check for isolated & nohz_full conflicts
+ * isolated_cpus_can_update - check for conflicts against housekeeping and
+ * CPUs capabilities.
* @new_cpus: cpu mask for cpus that are going to be isolated
- * Return: true if there is conflict, false otherwise
+ * Return: true if there no conflict, false otherwise
*
- * If nohz_full is enabled and we have isolated CPUs, their combination must
- * still leave housekeeping CPUs.
+ * Check for conflicts:
+ * - If nohz_full is enabled and there are isolated CPUs, their combination must
+ * still leave housekeeping CPUs.
+ * - Architecture has CPU capabilities incompatible with being isolated
*/
-static bool isolcpus_nohz_conflict(struct cpumask *new_cpus)
+static bool isolated_cpus_can_update(struct cpumask *new_cpus)
{
cpumask_var_t full_hk_cpus;
- int res = false;
+ bool res;
+
+ if (!arch_isolated_cpus_can_update(new_cpus))
+ return false;
if (!housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
- return false;
+ return true;
if (!alloc_cpumask_var(&full_hk_cpus, GFP_KERNEL))
- return true;
+ return false;
+
+ res = true;
cpumask_and(full_hk_cpus, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE),
housekeeping_cpumask(HK_TYPE_DOMAIN));
cpumask_andnot(full_hk_cpus, full_hk_cpus, isolated_cpus);
cpumask_and(full_hk_cpus, full_hk_cpus, cpu_online_mask);
if (!cpumask_weight_andnot(full_hk_cpus, new_cpus))
- res = true;
+ res = false;
free_cpumask_var(full_hk_cpus);
+
return res;
}
@@ -1497,7 +1511,7 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
return PERR_INVCPUS;
if (isolated_cpus_should_update(new_prs, NULL) &&
- isolcpus_nohz_conflict(tmp->new_cpus))
+ !isolated_cpus_can_update(tmp->new_cpus))
return PERR_HKEEPING;
spin_lock_irq(&callback_lock);
@@ -1599,7 +1613,7 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
cs->prs_err = PERR_NOCPUS;
else if (isolated_cpus_should_update(prs, NULL) &&
- isolcpus_nohz_conflict(tmp->addmask))
+ !isolated_cpus_can_update(tmp->addmask))
cs->prs_err = PERR_HKEEPING;
if (cs->prs_err)
goto invalidate;
@@ -1954,7 +1968,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
}
if (deleting && isolated_cpus_should_update(new_prs, parent) &&
- isolcpus_nohz_conflict(tmp->delmask)) {
+ !isolated_cpus_can_update(tmp->delmask)) {
cs->prs_err = PERR_HKEEPING;
return PERR_HKEEPING;
}
@@ -2979,7 +2993,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
* Need to update isolated_cpus.
*/
isolcpus_updated = true;
- if (isolcpus_nohz_conflict(cs->effective_xcpus))
+ if (!isolated_cpus_can_update(cs->effective_xcpus))
err = PERR_HKEEPING;
} else {
/*
--
2.51.0
Powered by blists - more mailing lists