[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <81f06470-ad5b-2b92-86d6-dc2ca5d21d53@redhat.com>
Date: Thu, 18 Aug 2022 08:31:24 -0700
From: Waiman Long <longman@...hat.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Ingo Molnar <mingo@...hat.com>, Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>, Mel Gorman <mgorman@...e.de>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Valentin Schneider <vschneid@...hat.com>,
Tejun Heo <tj@...nel.org>, Zefan Li <lizefan.x@...edance.com>,
Johannes Weiner <hannes@...xchg.org>,
Will Deacon <will@...nel.org>, cgroups@...r.kernel.org,
linux-kernel@...r.kernel.org,
Linus Torvalds <torvalds@...ux-foundation.org>
Subject: Re: [PATCH v5 1/3] sched: Use user_cpus_ptr for saving user provided
cpumask in sched_setaffinity()
On 8/17/22 04:41, Peter Zijlstra wrote:
> On Tue, Aug 16, 2022 at 03:27:32PM -0400, Waiman Long wrote:
>> @@ -2981,25 +2969,21 @@ static int restrict_cpus_allowed_ptr(struct task_struct *p,
>> goto err_unlock;
>> }
>>
>> - if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) {
>> +
>> + if (p->user_cpus_ptr)
>> + not_empty = cpumask_and(new_mask, p->user_cpus_ptr, subset_mask);
>> + else
>> + not_empty = cpumask_and(new_mask, cpu_online_mask, subset_mask);
>> +
>> + if (!not_empty) {
>> err = -EINVAL;
>> goto err_unlock;
>> }
>>
>> - /*
>> - * We're about to butcher the task affinity, so keep track of what
>> - * the user asked for in case we're able to restore it later on.
>> - */
>> - if (user_mask) {
>> - cpumask_copy(user_mask, p->cpus_ptr);
>> - p->user_cpus_ptr = user_mask;
>> - }
>> -
>> return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);
>>
>> err_unlock:
>> task_rq_unlock(rq, p, &rf);
>> - kfree(user_mask);
>> return err;
>> }
>>
>> @@ -3049,34 +3033,27 @@ void force_compatible_cpus_allowed_ptr(struct task_struct *p)
>> }
>>
>> static int
>> -__sched_setaffinity(struct task_struct *p, const struct cpumask *mask);
>> +__sched_setaffinity(struct task_struct *p, const struct cpumask *mask, bool save_mask);
>>
>> /*
>> * Restore the affinity of a task @p which was previously restricted by a
>> - * call to force_compatible_cpus_allowed_ptr(). This will clear (and free)
>> - * @p->user_cpus_ptr.
>> + * call to force_compatible_cpus_allowed_ptr().
>> *
>> * It is the caller's responsibility to serialise this with any calls to
>> * force_compatible_cpus_allowed_ptr(@p).
>> */
>> void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
>> {
>> - struct cpumask *user_mask = p->user_cpus_ptr;
>> - unsigned long flags;
>> + const struct cpumask *user_mask = p->user_cpus_ptr;
>> +
>> + if (!user_mask)
>> + user_mask = cpu_online_mask;
>>
>> /*
>> - * Try to restore the old affinity mask. If this fails, then
>> - * we free the mask explicitly to avoid it being inherited across
>> - * a subsequent fork().
>> + * Try to restore the old affinity mask with __sched_setaffinity().
>> + * Cpuset masking will be done there too.
>> */
>> - if (!user_mask || !__sched_setaffinity(p, user_mask))
>> - return;
>> -
>> - raw_spin_lock_irqsave(&p->pi_lock, flags);
>> - user_mask = clear_user_cpus_ptr(p);
>> - raw_spin_unlock_irqrestore(&p->pi_lock, flags);
>> -
>> - kfree(user_mask);
>> + __sched_setaffinity(p, user_mask, false);
>> }
>>
>> void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
>
> Would it not be simpler to write it something like so?
>
> ---
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 03053eebb22e..cdae4d50a588 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -2955,7 +2955,6 @@ static int restrict_cpus_allowed_ptr(struct task_struct *p,
> struct rq_flags rf;
> struct rq *rq;
> int err;
> - bool not_empty;
>
> rq = task_rq_lock(p, &rf);
>
> @@ -2969,13 +2968,7 @@ static int restrict_cpus_allowed_ptr(struct task_struct *p,
> goto err_unlock;
> }
>
> -
> - if (p->user_cpus_ptr)
> - not_empty = cpumask_and(new_mask, p->user_cpus_ptr, subset_mask);
> - else
> - not_empty = cpumask_and(new_mask, cpu_online_mask, subset_mask);
> -
> - if (!not_empty) {
> + if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
> err = -EINVAL;
> goto err_unlock;
> }
> @@ -3044,16 +3037,11 @@ __sched_setaffinity(struct task_struct *p, const struct cpumask *mask, bool save
> */
> void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
> {
> - const struct cpumask *user_mask = p->user_cpus_ptr;
> -
> - if (!user_mask)
> - user_mask = cpu_online_mask;
> -
> /*
> * Try to restore the old affinity mask with __sched_setaffinity().
> * Cpuset masking will be done there too.
> */
> - __sched_setaffinity(p, user_mask, false);
> + __sched_setaffinity(p, task_user_cpus(p), false);
> }
>
> void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 15eefcd65faa..426e9b64b587 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1881,6 +1881,13 @@ static inline void dirty_sched_domain_sysctl(int cpu)
> #endif
>
> extern int sched_update_scaling(void);
> +
> +static inline const struct cpumask *task_user_cpus(struct task_struct *p)
> +{
> + if (!p->user_cpus_ptr)
> + return cpus_possible_mask; /* &init_task.cpus_mask */
> + return p->user_cpus_ptr;
> +}
> #endif /* CONFIG_SMP */
>
> #include "stats.h"
>
Thanks for the good suggestions, will make the changes.
Cheers,
Longman
Powered by blists - more mailing lists