[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <af440ae9-e6ee-4cc8-a2d6-2178d9c80a50@redhat.com>
Date: Fri, 26 Dec 2025 16:37:31 -0500
From: Waiman Long <llong@...hat.com>
To: Frederic Weisbecker <frederic@...nel.org>,
LKML <linux-kernel@...r.kernel.org>
Cc: Michal Koutný <mkoutny@...e.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Bjorn Helgaas <bhelgaas@...gle.com>,
Catalin Marinas <catalin.marinas@....com>,
Chen Ridong <chenridong@...wei.com>, Danilo Krummrich <dakr@...nel.org>,
"David S . Miller" <davem@...emloft.net>, Eric Dumazet
<edumazet@...gle.com>, Gabriele Monaco <gmonaco@...hat.com>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Ingo Molnar <mingo@...hat.com>, Jakub Kicinski <kuba@...nel.org>,
Jens Axboe <axboe@...nel.dk>, Johannes Weiner <hannes@...xchg.org>,
Lai Jiangshan <jiangshanlai@...il.com>,
Marco Crivellari <marco.crivellari@...e.com>, Michal Hocko
<mhocko@...e.com>, Muchun Song <muchun.song@...ux.dev>,
Paolo Abeni <pabeni@...hat.com>, Peter Zijlstra <peterz@...radead.org>,
Phil Auld <pauld@...hat.com>, "Rafael J . Wysocki" <rafael@...nel.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Shakeel Butt <shakeel.butt@...ux.dev>, Simon Horman <horms@...nel.org>,
Tejun Heo <tj@...nel.org>, Thomas Gleixner <tglx@...utronix.de>,
Vlastimil Babka <vbabka@...e.cz>, Will Deacon <will@...nel.org>,
cgroups@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
linux-block@...r.kernel.org, linux-mm@...ck.org, linux-pci@...r.kernel.org,
netdev@...r.kernel.org
Subject: Re: [PATCH 24/33] kthread: Refine naming of affinity related fields
On 12/24/25 8:45 AM, Frederic Weisbecker wrote:
> The kthreads preferred affinity related fields use "hotplug" as the base
> of their naming because the affinity management was initially deemed to
> deal with CPU hotplug.
>
> The scope of this role is going to broaden now and also deal with
> cpuset isolated partition updates.
>
> Switch the naming accordingly.
>
> Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
> ---
> kernel/kthread.c | 38 +++++++++++++++++++-------------------
> 1 file changed, 19 insertions(+), 19 deletions(-)
>
> diff --git a/kernel/kthread.c b/kernel/kthread.c
> index 99a3808d086f..f1e4f1f35cae 100644
> --- a/kernel/kthread.c
> +++ b/kernel/kthread.c
> @@ -35,8 +35,8 @@ static DEFINE_SPINLOCK(kthread_create_lock);
> static LIST_HEAD(kthread_create_list);
> struct task_struct *kthreadd_task;
>
> -static LIST_HEAD(kthreads_hotplug);
> -static DEFINE_MUTEX(kthreads_hotplug_lock);
> +static LIST_HEAD(kthread_affinity_list);
> +static DEFINE_MUTEX(kthread_affinity_lock);
>
> struct kthread_create_info
> {
> @@ -69,7 +69,7 @@ struct kthread {
> /* To store the full name if task comm is truncated. */
> char *full_name;
> struct task_struct *task;
> - struct list_head hotplug_node;
> + struct list_head affinity_node;
> struct cpumask *preferred_affinity;
> };
>
> @@ -128,7 +128,7 @@ bool set_kthread_struct(struct task_struct *p)
>
> init_completion(&kthread->exited);
> init_completion(&kthread->parked);
> - INIT_LIST_HEAD(&kthread->hotplug_node);
> + INIT_LIST_HEAD(&kthread->affinity_node);
> p->vfork_done = &kthread->exited;
>
> kthread->task = p;
> @@ -323,10 +323,10 @@ void __noreturn kthread_exit(long result)
> {
> struct kthread *kthread = to_kthread(current);
> kthread->result = result;
> - if (!list_empty(&kthread->hotplug_node)) {
> - mutex_lock(&kthreads_hotplug_lock);
> - list_del(&kthread->hotplug_node);
> - mutex_unlock(&kthreads_hotplug_lock);
> + if (!list_empty(&kthread->affinity_node)) {
> + mutex_lock(&kthread_affinity_lock);
> + list_del(&kthread->affinity_node);
> + mutex_unlock(&kthread_affinity_lock);
>
> if (kthread->preferred_affinity) {
> kfree(kthread->preferred_affinity);
> @@ -390,9 +390,9 @@ static void kthread_affine_node(void)
> return;
> }
>
> - mutex_lock(&kthreads_hotplug_lock);
> - WARN_ON_ONCE(!list_empty(&kthread->hotplug_node));
> - list_add_tail(&kthread->hotplug_node, &kthreads_hotplug);
> + mutex_lock(&kthread_affinity_lock);
> + WARN_ON_ONCE(!list_empty(&kthread->affinity_node));
> + list_add_tail(&kthread->affinity_node, &kthread_affinity_list);
> /*
> * The node cpumask is racy when read from kthread() but:
> * - a racing CPU going down will either fail on the subsequent
> @@ -402,7 +402,7 @@ static void kthread_affine_node(void)
> */
> kthread_fetch_affinity(kthread, affinity);
> set_cpus_allowed_ptr(current, affinity);
> - mutex_unlock(&kthreads_hotplug_lock);
> + mutex_unlock(&kthread_affinity_lock);
>
> free_cpumask_var(affinity);
> }
> @@ -873,16 +873,16 @@ int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask)
> goto out;
> }
>
> - mutex_lock(&kthreads_hotplug_lock);
> + mutex_lock(&kthread_affinity_lock);
> cpumask_copy(kthread->preferred_affinity, mask);
> - WARN_ON_ONCE(!list_empty(&kthread->hotplug_node));
> - list_add_tail(&kthread->hotplug_node, &kthreads_hotplug);
> + WARN_ON_ONCE(!list_empty(&kthread->affinity_node));
> + list_add_tail(&kthread->affinity_node, &kthread_affinity_list);
> kthread_fetch_affinity(kthread, affinity);
>
> scoped_guard (raw_spinlock_irqsave, &p->pi_lock)
> set_cpus_allowed_force(p, affinity);
>
> - mutex_unlock(&kthreads_hotplug_lock);
> + mutex_unlock(&kthread_affinity_lock);
> out:
> free_cpumask_var(affinity);
>
> @@ -903,9 +903,9 @@ static int kthreads_online_cpu(unsigned int cpu)
> struct kthread *k;
> int ret;
>
> - guard(mutex)(&kthreads_hotplug_lock);
> + guard(mutex)(&kthread_affinity_lock);
>
> - if (list_empty(&kthreads_hotplug))
> + if (list_empty(&kthread_affinity_list))
> return 0;
>
> if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
> @@ -913,7 +913,7 @@ static int kthreads_online_cpu(unsigned int cpu)
>
> ret = 0;
>
> - list_for_each_entry(k, &kthreads_hotplug, hotplug_node) {
> + list_for_each_entry(k, &kthread_affinity_list, affinity_node) {
> if (WARN_ON_ONCE((k->task->flags & PF_NO_SETAFFINITY) ||
> kthread_is_per_cpu(k->task))) {
> ret = -EINVAL;
Acked-by: Waiman Long <longman@...hat.com>
Powered by blists - more mailing lists