[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <b0d222e0-3380-4014-8d9a-57e8be8b082c@redhat.com>
Date: Wed, 19 Nov 2025 15:51:18 -0500
From: Waiman Long <llong@...hat.com>
To: Pingfan Liu <piliu@...hat.com>, cgroups@...r.kernel.org
Cc: Chen Ridong <chenridong@...weicloud.com>,
Peter Zijlstra <peterz@...radead.org>, Juri Lelli <juri.lelli@...hat.com>,
Pierre Gondois <pierre.gondois@....com>, Ingo Molnar <mingo@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>, Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>, Valentin Schneider <vschneid@...hat.com>,
Tejun Heo <tj@...nel.org>, Johannes Weiner <hannes@...xchg.org>,
mkoutny@...e.com, linux-kernel@...r.kernel.org
Subject: Re: [PATCHv7 1/2] cgroup/cpuset: Introduce
cpuset_cpus_allowed_locked()
On 11/19/25 4:55 AM, Pingfan Liu wrote:
> cpuset_cpus_allowed() uses a reader lock that is sleepable under RT,
> which means it cannot be called inside raw_spin_lock_t context.
>
> Introduce a new cpuset_cpus_allowed_locked() helper that performs the
> same function as cpuset_cpus_allowed() except that the caller must have
> acquired the cpuset_mutex so that no further locking will be needed.
>
> Suggested-by: Waiman Long <longman@...hat.com>
> Signed-off-by: Pingfan Liu <piliu@...hat.com>
> Cc: Waiman Long <longman@...hat.com>
> Cc: Tejun Heo <tj@...nel.org>
> Cc: Johannes Weiner <hannes@...xchg.org>
> Cc: "Michal Koutný" <mkoutny@...e.com>
> Cc: linux-kernel@...r.kernel.org
> To: cgroups@...r.kernel.org
> ---
> include/linux/cpuset.h | 9 +++++++-
> kernel/cgroup/cpuset.c | 51 +++++++++++++++++++++++++++++-------------
> 2 files changed, 44 insertions(+), 16 deletions(-)
>
> diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
> index 2ddb256187b51..a98d3330385c2 100644
> --- a/include/linux/cpuset.h
> +++ b/include/linux/cpuset.h
> @@ -74,6 +74,7 @@ extern void inc_dl_tasks_cs(struct task_struct *task);
> extern void dec_dl_tasks_cs(struct task_struct *task);
> extern void cpuset_lock(void);
> extern void cpuset_unlock(void);
> +extern void cpuset_cpus_allowed_locked(struct task_struct *p, struct cpumask *mask);
> extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
> extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
> extern bool cpuset_cpu_is_isolated(int cpu);
> @@ -195,10 +196,16 @@ static inline void dec_dl_tasks_cs(struct task_struct *task) { }
> static inline void cpuset_lock(void) { }
> static inline void cpuset_unlock(void) { }
>
> +static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
> + struct cpumask *mask)
> +{
> + cpumask_copy(mask, task_cpu_possible_mask(p));
> +}
> +
> static inline void cpuset_cpus_allowed(struct task_struct *p,
> struct cpumask *mask)
> {
> - cpumask_copy(mask, task_cpu_possible_mask(p));
> + cpuset_cpus_allowed_locked(p, mask);
> }
>
> static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
> diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
> index 52468d2c178a3..7a179a1a2e30a 100644
> --- a/kernel/cgroup/cpuset.c
> +++ b/kernel/cgroup/cpuset.c
> @@ -4116,24 +4116,13 @@ void __init cpuset_init_smp(void)
> BUG_ON(!cpuset_migrate_mm_wq);
> }
>
> -/**
> - * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
> - * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
> - * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
> - *
> - * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
> - * attached to the specified @tsk. Guaranteed to return some non-empty
> - * subset of cpu_active_mask, even if this means going outside the
> - * tasks cpuset, except when the task is in the top cpuset.
> - **/
> -
> -void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
> +/*
> + * Return cpus_allowed mask from a task's cpuset.
> + */
> +static void __cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
> {
> - unsigned long flags;
> struct cpuset *cs;
>
> - spin_lock_irqsave(&callback_lock, flags);
> -
> cs = task_cs(tsk);
> if (cs != &top_cpuset)
> guarantee_active_cpus(tsk, pmask);
> @@ -4153,7 +4142,39 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
> if (!cpumask_intersects(pmask, cpu_active_mask))
> cpumask_copy(pmask, possible_mask);
> }
> +}
>
> +/**
> + * cpuset_cpus_allowed_locked - return cpus_allowed mask from a task's cpuset.
> + * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
> + * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
> + *
> + * Similir to cpuset_cpus_allowed() except that the caller must have acquired
> + * cpuset_mutex.
> + */
> +void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
> +{
> + lockdep_assert_held(&cpuset_mutex);
> + __cpuset_cpus_allowed_locked(tsk, pmask);
> +}
> +
> +/**
> + * cpuset_cpus_allowed - return cpus_allowed mask from a task's cpuset.
> + * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
> + * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
> + *
> + * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
> + * attached to the specified @tsk. Guaranteed to return some non-empty
> + * subset of cpu_active_mask, even if this means going outside the
> + * tasks cpuset, except when the task is in the top cpuset.
> + **/
> +
> +void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
> +{
> + unsigned long flags;
> +
> + spin_lock_irqsave(&callback_lock, flags);
> + __cpuset_cpus_allowed_locked(tsk, pmask);
> spin_unlock_irqrestore(&callback_lock, flags);
> }
>
Reviewed-by: Waiman Long <longman@...hat.com>
Powered by blists - more mailing lists