[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251013203146.10162-31-frederic@kernel.org>
Date: Mon, 13 Oct 2025 22:31:43 +0200
From: Frederic Weisbecker <frederic@...nel.org>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Frederic Weisbecker <frederic@...nel.org>,
Michal Koutný <mkoutny@...e.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Bjorn Helgaas <bhelgaas@...gle.com>,
Catalin Marinas <catalin.marinas@....com>,
Danilo Krummrich <dakr@...nel.org>,
"David S . Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Gabriele Monaco <gmonaco@...hat.com>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Ingo Molnar <mingo@...hat.com>,
Jakub Kicinski <kuba@...nel.org>,
Jens Axboe <axboe@...nel.dk>,
Johannes Weiner <hannes@...xchg.org>,
Lai Jiangshan <jiangshanlai@...il.com>,
Marco Crivellari <marco.crivellari@...e.com>,
Michal Hocko <mhocko@...e.com>,
Muchun Song <muchun.song@...ux.dev>,
Paolo Abeni <pabeni@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Phil Auld <pauld@...hat.com>,
"Rafael J . Wysocki" <rafael@...nel.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Shakeel Butt <shakeel.butt@...ux.dev>,
Simon Horman <horms@...nel.org>,
Tejun Heo <tj@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Vlastimil Babka <vbabka@...e.cz>,
Waiman Long <longman@...hat.com>,
Will Deacon <will@...nel.org>,
cgroups@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org,
linux-block@...r.kernel.org,
linux-mm@...ck.org,
linux-pci@...r.kernel.org,
netdev@...r.kernel.org
Subject: [PATCH 30/33] kthread: Add API to update preferred affinity on kthread runtime
Kthreads can apply for a preferred affinity upon creation but they have
no means to update that preferred affinity after the first wake up.
kthread_affine_preferred() is optimized by assuming the kthread
is sleeping while applying the allowed cpumask.
Therefore introduce a new API to further update the preferred affinity.
It will be used by IRQ kthreads.
Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
---
include/linux/kthread.h | 1 +
kernel/kthread.c | 55 +++++++++++++++++++++++++++++++++++------
2 files changed, 48 insertions(+), 8 deletions(-)
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index c92c1149ee6e..a06cae7f2c55 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -86,6 +86,7 @@ void free_kthread_struct(struct task_struct *k);
void kthread_bind(struct task_struct *k, unsigned int cpu);
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask);
+int kthread_affine_preferred_update(struct task_struct *p, const struct cpumask *mask);
int kthread_stop(struct task_struct *k);
int kthread_stop_put(struct task_struct *k);
bool kthread_should_stop(void);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index d36bdfbd004e..f3397cf7542a 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -322,17 +322,16 @@ EXPORT_SYMBOL_GPL(kthread_parkme);
void __noreturn kthread_exit(long result)
{
struct kthread *kthread = to_kthread(current);
+ struct cpumask *to_free = NULL;
kthread->result = result;
- if (!list_empty(&kthread->affinity_node)) {
- mutex_lock(&kthread_affinity_lock);
- list_del(&kthread->affinity_node);
- mutex_unlock(&kthread_affinity_lock);
- if (kthread->preferred_affinity) {
- kfree(kthread->preferred_affinity);
- kthread->preferred_affinity = NULL;
- }
+ scoped_guard(mutex, &kthread_affinity_lock) {
+ if (!list_empty(&kthread->affinity_node))
+ list_del_init(&kthread->affinity_node);
+ to_free = kthread->preferred_affinity;
+ kthread->preferred_affinity = NULL;
}
+ kfree(to_free);
do_exit(0);
}
EXPORT_SYMBOL(kthread_exit);
@@ -900,6 +899,46 @@ int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask)
}
EXPORT_SYMBOL_GPL(kthread_affine_preferred);
+/**
+ * kthread_affine_preferred_update - update a kthread's preferred affinity
+ * @p: thread created by kthread_create().
+ * @cpumask: new mask of CPUs (might not be online, must be possible) for @k
+ * to run on.
+ *
+ * Update the cpumask of the desired kthread's affinity that was passed by
+ * a previous call to kthread_affine_preferred(). This can be called either
+ * before or after the first wakeup of the kthread.
+ *
+ * Returns 0 if the affinity has been applied.
+ */
+int kthread_affine_preferred_update(struct task_struct *p,
+ const struct cpumask *mask)
+{
+ struct kthread *kthread = to_kthread(p);
+ cpumask_var_t affinity;
+ int ret = 0;
+
+ if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
+ return -ENOMEM;
+
+ scoped_guard(mutex, &kthread_affinity_lock) {
+ if (WARN_ON_ONCE(!kthread->preferred_affinity ||
+ list_empty(&kthread->affinity_node))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cpumask_copy(kthread->preferred_affinity, mask);
+ kthread_fetch_affinity(kthread, affinity);
+ set_cpus_allowed_ptr(p, affinity);
+ }
+out:
+ free_cpumask_var(affinity);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kthread_affine_preferred_update);
+
static int kthreads_update_affinity(bool force)
{
cpumask_var_t affinity;
--
2.51.0
Powered by blists - more mailing lists