lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a8968902-7fc3-4dec-9a53-e9685a5705b9@redhat.com>
Date: Fri, 26 Dec 2025 18:59:34 -0500
From: Waiman Long <llong@...hat.com>
To: Frederic Weisbecker <frederic@...nel.org>,
 LKML <linux-kernel@...r.kernel.org>
Cc: Michal Koutný <mkoutny@...e.com>,
 Andrew Morton <akpm@...ux-foundation.org>,
 Bjorn Helgaas <bhelgaas@...gle.com>,
 Catalin Marinas <catalin.marinas@....com>,
 Chen Ridong <chenridong@...wei.com>, Danilo Krummrich <dakr@...nel.org>,
 "David S . Miller" <davem@...emloft.net>, Eric Dumazet
 <edumazet@...gle.com>, Gabriele Monaco <gmonaco@...hat.com>,
 Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
 Ingo Molnar <mingo@...hat.com>, Jakub Kicinski <kuba@...nel.org>,
 Jens Axboe <axboe@...nel.dk>, Johannes Weiner <hannes@...xchg.org>,
 Lai Jiangshan <jiangshanlai@...il.com>,
 Marco Crivellari <marco.crivellari@...e.com>, Michal Hocko
 <mhocko@...e.com>, Muchun Song <muchun.song@...ux.dev>,
 Paolo Abeni <pabeni@...hat.com>, Peter Zijlstra <peterz@...radead.org>,
 Phil Auld <pauld@...hat.com>, "Rafael J . Wysocki" <rafael@...nel.org>,
 Roman Gushchin <roman.gushchin@...ux.dev>,
 Shakeel Butt <shakeel.butt@...ux.dev>, Simon Horman <horms@...nel.org>,
 Tejun Heo <tj@...nel.org>, Thomas Gleixner <tglx@...utronix.de>,
 Vlastimil Babka <vbabka@...e.cz>, Will Deacon <will@...nel.org>,
 cgroups@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
 linux-block@...r.kernel.org, linux-mm@...ck.org, linux-pci@...r.kernel.org,
 netdev@...r.kernel.org
Subject: Re: [PATCH 30/33] kthread: Honour kthreads preferred affinity after
 cpuset changes

On 12/24/25 8:45 AM, Frederic Weisbecker wrote:
> When cpuset isolated partitions get updated, unbound kthreads get
> indifferently affine to all non isolated CPUs, regardless of their
> individual affinity preferences.
>
> For example kswapd is a per-node kthread that prefers to be affine to
> the node it refers to. Whenever an isolated partition is created,
> updated or deleted, kswapd's node affinity is going to be broken if any
> CPU in the related node is not isolated because kswapd will be affine
> globally.
>
> Fix this with letting the consolidated kthread managed affinity code do
> the affinity update on behalf of cpuset.
>
> Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
> ---
>   include/linux/kthread.h  |  1 +
>   kernel/cgroup/cpuset.c   |  5 ++---
>   kernel/kthread.c         | 41 ++++++++++++++++++++++++++++++----------
>   kernel/sched/isolation.c |  3 +++
>   4 files changed, 37 insertions(+), 13 deletions(-)
>
> diff --git a/include/linux/kthread.h b/include/linux/kthread.h
> index 8d27403888ce..c92c1149ee6e 100644
> --- a/include/linux/kthread.h
> +++ b/include/linux/kthread.h
> @@ -100,6 +100,7 @@ void kthread_unpark(struct task_struct *k);
>   void kthread_parkme(void);
>   void kthread_exit(long result) __noreturn;
>   void kthread_complete_and_exit(struct completion *, long) __noreturn;
> +int kthreads_update_housekeeping(void);
>   
>   int kthreadd(void *unused);
>   extern struct task_struct *kthreadd_task;
> diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
> index 1cc83a3c25f6..c8cfaf5cd4a1 100644
> --- a/kernel/cgroup/cpuset.c
> +++ b/kernel/cgroup/cpuset.c
> @@ -1208,11 +1208,10 @@ void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
>   
>   		if (top_cs) {
>   			/*
> +			 * PF_KTHREAD tasks are handled by housekeeping.
>   			 * PF_NO_SETAFFINITY tasks are ignored.
> -			 * All per cpu kthreads should have PF_NO_SETAFFINITY
> -			 * flag set, see kthread_set_per_cpu().
>   			 */
> -			if (task->flags & PF_NO_SETAFFINITY)
> +			if (task->flags & (PF_KTHREAD | PF_NO_SETAFFINITY))
>   				continue;
>   			cpumask_andnot(new_cpus, possible_mask, subpartitions_cpus);
>   		} else {
> diff --git a/kernel/kthread.c b/kernel/kthread.c
> index 968fa5868d21..03008154249c 100644
> --- a/kernel/kthread.c
> +++ b/kernel/kthread.c
> @@ -891,14 +891,7 @@ int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask)
>   }
>   EXPORT_SYMBOL_GPL(kthread_affine_preferred);
>   
> -/*
> - * Re-affine kthreads according to their preferences
> - * and the newly online CPU. The CPU down part is handled
> - * by select_fallback_rq() which default re-affines to
> - * housekeepers from other nodes in case the preferred
> - * affinity doesn't apply anymore.
> - */
> -static int kthreads_online_cpu(unsigned int cpu)
> +static int kthreads_update_affinity(bool force)
>   {
>   	cpumask_var_t affinity;
>   	struct kthread *k;
> @@ -924,7 +917,8 @@ static int kthreads_online_cpu(unsigned int cpu)
>   		/*
>   		 * Unbound kthreads without preferred affinity are already affine
>   		 * to housekeeping, whether those CPUs are online or not. So no need
> -		 * to handle newly online CPUs for them.
> +		 * to handle newly online CPUs for them. However housekeeping changes
> +		 * have to be applied.
>   		 *
>   		 * But kthreads with a preferred affinity or node are different:
>   		 * if none of their preferred CPUs are online and part of
> @@ -932,7 +926,7 @@ static int kthreads_online_cpu(unsigned int cpu)
>   		 * But as soon as one of their preferred CPU becomes online, they must
>   		 * be affine to them.
>   		 */
> -		if (k->preferred_affinity || k->node != NUMA_NO_NODE) {
> +		if (force || k->preferred_affinity || k->node != NUMA_NO_NODE) {
>   			kthread_fetch_affinity(k, affinity);
>   			set_cpus_allowed_ptr(k->task, affinity);
>   		}
> @@ -943,6 +937,33 @@ static int kthreads_online_cpu(unsigned int cpu)
>   	return ret;
>   }
>   
> +/**
> + * kthreads_update_housekeeping - Update kthreads affinity on cpuset change
> + *
> + * When cpuset changes a partition type to/from "isolated" or updates related
> + * cpumasks, propagate the housekeeping cpumask change to preferred kthreads
> + * affinity.
> + *
> + * Returns 0 if successful, -ENOMEM if temporary mask couldn't
> + * be allocated or -EINVAL in case of internal error.
> + */
> +int kthreads_update_housekeeping(void)
> +{
> +	return kthreads_update_affinity(true);
> +}
> +
> +/*
> + * Re-affine kthreads according to their preferences
> + * and the newly online CPU. The CPU down part is handled
> + * by select_fallback_rq() which default re-affines to
> + * housekeepers from other nodes in case the preferred
> + * affinity doesn't apply anymore.
> + */
> +static int kthreads_online_cpu(unsigned int cpu)
> +{
> +	return kthreads_update_affinity(false);
> +}
> +
>   static int kthreads_init(void)
>   {
>   	return cpuhp_setup_state(CPUHP_AP_KTHREADS_ONLINE, "kthreads:online",
> diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c
> index 84a257d05918..c499474866b8 100644
> --- a/kernel/sched/isolation.c
> +++ b/kernel/sched/isolation.c
> @@ -157,6 +157,9 @@ int housekeeping_update(struct cpumask *isol_mask, enum hk_type type)
>   	err = tmigr_isolated_exclude_cpumask(isol_mask);
>   	WARN_ON_ONCE(err < 0);
>   
> +	err = kthreads_update_housekeeping();
> +	WARN_ON_ONCE(err < 0);
> +
>   	kfree(old);
>   
>   	return err;
Reviewed-by: Waiman Long <longman@...hat.com>


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ