[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <jhj5z83mlvu.mognet@arm.com>
Date: Thu, 24 Sep 2020 12:53:25 +0100
From: Valentin Schneider <valentin.schneider@....com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: tglx@...utronix.de, mingo@...nel.org, linux-kernel@...r.kernel.org,
bigeasy@...utronix.de, qais.yousef@....com, swood@...hat.com,
juri.lelli@...hat.com, vincent.guittot@...aro.org,
dietmar.eggemann@....com, rostedt@...dmis.org, bsegall@...gle.com,
mgorman@...e.de, bristot@...hat.com, vincent.donnefort@....com
Subject: Re: [PATCH 7/9] sched: Add migrate_disable()
Still trying to digest 8/9, but have some comments before I next get
preempted :)
On 21/09/20 17:36, Peter Zijlstra wrote:
[...]
> +void migrate_enable(void)
> +{
> + if (--current->migration_disabled)
> + return;
> +
> + barrier();
> +
> + if (p->cpus_ptr == &p->cpus_mask)
> + return;
If we get to here this means we're the migrate_enable() invocation that
marks the end of the migration_disabled region. How can cpus_ptr already
point back to current's cpus_mask?
Also, this is fixed in 8/9 but this here wants an s/p/current/
> +
> + __set_cpus_allowed_ptr(p, &p->cpus_mask, SCA_MIGRATE_ENABLE);
> +}
[...]
> @@ -1830,8 +1892,19 @@ static int migration_cpu_stop(void *data
> */
> void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
> {
> - cpumask_copy(&p->cpus_mask, new_mask);
> - p->nr_cpus_allowed = cpumask_weight(new_mask);
> + if (flags & SCA_MIGRATE_DISABLE) {
> + p->cpus_ptr = new_mask;
> + p->nr_cpus_allowed = 1;
> + return;
> + }
> +
> + if (flags & SCA_MIGRATE_ENABLE)
> + p->cpus_ptr = &p->cpus_mask;
There's that check in __set_cpus_allowed_ptr() that new_mask *must* be
p->cpus_mask when SCA_MIGRATE_ENABLE; that means we could mayhaps shuffle
that to:
---
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7cb13df48366..e0e4e42c5e32 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1892,18 +1892,14 @@ static int migration_cpu_stop(void *data)
*/
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
{
- if (flags & SCA_MIGRATE_DISABLE) {
+ if (flags & (SCA_MIGRATE_DISABLE | SCA_MIGRATE_ENABLE))
p->cpus_ptr = new_mask;
- p->nr_cpus_allowed = 1;
- return;
- }
-
- if (flags & SCA_MIGRATE_ENABLE)
- p->cpus_ptr = &p->cpus_mask;
else
cpumask_copy(&p->cpus_mask, new_mask);
- if (p->cpus_ptr == &p->cpus_mask)
+ if (flags & SCA_MIGRATE_DISABLE)
+ p->nr_cpus_allowed = 1;
+ else if (p->cpus_ptr == &p->cpus_mask)
p->nr_cpus_allowed = cpumask_weight(p->cpus_ptr);
}
---
> + else
> + cpumask_copy(&p->cpus_mask, new_mask);
> +
> + if (p->cpus_ptr == &p->cpus_mask)
> + p->nr_cpus_allowed = cpumask_weight(p->cpus_ptr);
> }
>
> static void
[...]
> @@ -1891,9 +1979,14 @@ static int __set_cpus_allowed_ptr(struct
> rq = task_rq_lock(p, &rf);
> update_rq_clock(rq);
>
> - if (p->flags & PF_KTHREAD) {
> + if (p->flags & PF_KTHREAD || is_migration_disabled(p)) {
> /*
> - * Kernel threads are allowed on online && !active CPUs
> + * Kernel threads are allowed on online && !active CPUs.
> + *
> + * Specifically, migration_disabled() tasks must not fail the
> + * cpumask_and_and_distribute() pick below, esp. so on
s/and_and/and/
> + * SCA_MIGRATE_ENABLE, otherwise we'll not call
> + * set_cpus_allowed_common() and actually reset p->cpus_ptr.
> */
> cpu_valid_mask = cpu_online_mask;
> }
Powered by blists - more mailing lists