[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <aRS82LMOQZ-u92NZ@localhost.localdomain>
Date: Wed, 12 Nov 2025 17:59:04 +0100
From: Frederic Weisbecker <frederic@...nel.org>
To: Gabriele Monaco <gmonaco@...hat.com>
Cc: linux-kernel@...r.kernel.org,
Anna-Maria Behnsen <anna-maria@...utronix.de>,
Thomas Gleixner <tglx@...utronix.de>,
Waiman Long <llong@...hat.com>,
"John B . Wyatt IV" <jwyatt@...hat.com>,
"John B . Wyatt IV" <sageofredondo@...il.com>
Subject: Re: [PATCH v14 7/7] timers: Exclude isolated cpus from timer
migration
Le Wed, Nov 12, 2025 at 05:18:26PM +0100, Gabriele Monaco a écrit :
>
>
> On Wed, 2025-11-12 at 17:04 +0100, Frederic Weisbecker wrote:
> > Le Tue, Nov 04, 2025 at 11:47:39AM +0100, Gabriele Monaco a écrit :
> >
> >
> > This duplicates a lot tmigr_isolated_exclude_cpumask().
> > Would this work?
> >
> > static int __init tmigr_init_isolation(void)
> > {
> > cpumask_var_t cpumask __free(free_cpumask_var) = CPUMASK_VAR_NULL;
> >
> > static_branch_enable(&tmigr_exclude_isolated);
> >
> > if (!housekeeping_enabled(HK_TYPE_DOMAIN))
> > return 0;
> > if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
> > return -ENOMEM;
> >
> > cpumask_andnot(cpumask, cpu_possible_mask,
> > housekeeping_cpumask(HK_TYPE_DOMAIN));
> >
> > return tmigr_isolated_exclude_cpumask(cpumask);
> > }
> >
> > If so please add my Reviewed-by to the next version.
>
> Good point, it should work, I'm going to test it tomorrow.
>
> What comes to my mind is that tmigr_isolated_exclude_cpumask expects the hotplug
> lock to be held, so I should probably just hold it before calling it:
>
> static int __init tmigr_init_isolation(void)
> {
> cpumask_var_t cpumask __free(free_cpumask_var) = CPUMASK_VAR_NULL;
>
> static_branch_enable(&tmigr_exclude_isolated);
>
> if (!housekeeping_enabled(HK_TYPE_DOMAIN))
> return 0;
> if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
> return -ENOMEM;
>
> cpumask_andnot(cpumask, cpu_possible_mask,
> housekeeping_cpumask(HK_TYPE_DOMAIN));
> + guard(cpus_read_lock)();
> return tmigr_isolated_exclude_cpumask(cpumask);
> }
Well it doesn't make much sense to lock hotplug at this stage.
How about:
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index e2f9a4637d7b..6ca870f1b951 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -1539,17 +1539,7 @@ static void tmigr_cpu_unisolate(struct work_struct *ignored)
tmigr_set_cpu_available(smp_processor_id());
}
-/**
- * tmigr_isolated_exclude_cpumask - Exclude given CPUs from hierarchy
- * @exclude_cpumask: the cpumask to be excluded from timer migration hierarchy
- *
- * This function can be called from cpuset code to provide the new set of
- * isolated CPUs that should be excluded from the hierarchy.
- * Online CPUs not present in exclude_cpumask but already excluded are brought
- * back to the hierarchy.
- * Functions to isolate/unisolate need to be called locally and can sleep.
- */
-int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask)
+static int __tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask)
{
struct work_struct __percpu *works __free(free_percpu) =
alloc_percpu(struct work_struct);
@@ -1557,8 +1547,6 @@ int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask)
cpumask_var_t cpumask_isol __free(free_cpumask_var) = CPUMASK_VAR_NULL;
int cpu;
- lockdep_assert_cpus_held();
-
if (!alloc_cpumask_var(&cpumask_isol, GFP_KERNEL))
return -ENOMEM;
if (!alloc_cpumask_var(&cpumask_unisol, GFP_KERNEL))
@@ -1604,40 +1592,36 @@ int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask)
return 0;
}
+/**
+ * tmigr_isolated_exclude_cpumask - Exclude given CPUs from hierarchy
+ * @exclude_cpumask: the cpumask to be excluded from timer migration hierarchy
+ *
+ * This function can be called from cpuset code to provide the new set of
+ * isolated CPUs that should be excluded from the hierarchy.
+ * Online CPUs not present in exclude_cpumask but already excluded are brought
+ * back to the hierarchy.
+ * Functions to isolate/unisolate need to be called locally and can sleep.
+ */
+int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask)
+{
+ lockdep_assert_cpus_held();
+ return __tmigr_isolated_exclude_cpumask(exclude_cpumask);
+}
+
static int __init tmigr_init_isolation(void)
{
- struct work_struct __percpu *works __free(free_percpu) =
- alloc_percpu(struct work_struct);
cpumask_var_t cpumask __free(free_cpumask_var) = CPUMASK_VAR_NULL;
- int cpu;
static_branch_enable(&tmigr_exclude_isolated);
+
if (!housekeeping_enabled(HK_TYPE_DOMAIN))
return 0;
if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
return -ENOMEM;
- if (!works)
- return -ENOMEM;
- cpumask_andnot(cpumask, tmigr_available_cpumask,
- housekeeping_cpumask(HK_TYPE_DOMAIN));
- cpumask_and(cpumask, cpumask, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE));
- /* Never disable the tick CPU, see tmigr_is_isolated for details */
- for_each_cpu(cpu, cpumask) {
- if (!tick_nohz_cpu_hotpluggable(cpu)) {
- cpumask_clear_cpu(cpu, cpumask);
- break;
- }
- }
- for_each_cpu(cpu, cpumask) {
- struct work_struct *work = per_cpu_ptr(works, cpu);
- INIT_WORK(work, tmigr_cpu_isolate);
- schedule_work_on(cpu, work);
- }
- for_each_cpu(cpu, cpumask)
- flush_work(per_cpu_ptr(works, cpu));
+ cpumask_andnot(cpumask, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN));
- return 0;
+ return __tmigr_isolated_exclude_cpumask(cpumask);
}
static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
Powered by blists - more mailing lists