[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180706105857.5tgi5irdxdryet64@linutronix.de>
Date: Fri, 6 Jul 2018 12:58:57 +0200
From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
To: Joe Korty <joe.korty@...current-rt.com>
Cc: Julia Cartwright <julia@...com>, tglx@...utronix.de,
rostedt@...dmis.org, linux-rt-users@...r.kernel.org,
linux-kernel@...r.kernel.org, Peter Zijlstra <peterz@...radead.org>
Subject: [PATCH RT v2] sched/migrate_disable: fallback to preempt_disable()
instead barrier()
On SMP + !RT migrate_disable() is still around. It is not part of spin_lock()
anymore so it has almost no users. However the futex code has a workaround for
the !in_atomic() part of migrate disable which fails because the matching
migrade_disable() is no longer part of spin_lock().
On !SMP + !RT migrate_disable() is reduced to barrier(). This is not optimal
because we few spots where a "preempt_disable()" statement was replaced with
"migrate_disable()".
We also used the migration_disable counter to figure out if a sleeping lock is
acquired so RCU does not complain about schedule() during rcu_read_lock() while
a sleeping lock is held. This changed, we no longer use it, we have now a
sleeping_lock counter for the RCU purpose.
This means we can now:
- for SMP + RT_BASE
full migration program, nothing changes here
- for !SMP + RT_BASE
the migration counting is no longer required. It used to ensure that the task
is not migrated to another CPU and that this CPU remains online. !SMP ensures
that already.
Move it to CONFIG_SCHED_DEBUG so the counting is done for debugging purpose
only.
- for all other cases including !RT
fallback to preempt_disable(). The only remaining users of migrate_disable()
are those which were converted from preempt_disable() and the futex
workaround which is already in the preempt_disable() section due to the
spin_lock that is held.
Cc: stable-rt@...r.kernel.org
Reported-by: joe.korty@...current-rt.com
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
---
v1…v2: limit migrate_disable to RT only. Use preempt_disable() for !RT
if migrate_disable() is used.
include/linux/preempt.h | 6 +++---
include/linux/sched.h | 4 ++--
kernel/sched/core.c | 23 +++++++++++------------
kernel/sched/debug.c | 2 +-
4 files changed, 17 insertions(+), 18 deletions(-)
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -204,7 +204,7 @@ do { \
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
extern void migrate_disable(void);
extern void migrate_enable(void);
@@ -221,8 +221,8 @@ static inline int __migrate_disabled(str
}
#else
-#define migrate_disable() barrier()
-#define migrate_enable() barrier()
+#define migrate_disable() preempt_disable()
+#define migrate_enable() preempt_enable()
static inline int __migrate_disabled(struct task_struct *p)
{
return 0;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -645,7 +645,7 @@ struct task_struct {
int nr_cpus_allowed;
const cpumask_t *cpus_ptr;
cpumask_t cpus_mask;
-#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
int migrate_disable;
int migrate_disable_update;
# ifdef CONFIG_SCHED_DEBUG
@@ -653,8 +653,8 @@ struct task_struct {
# endif
#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
- int migrate_disable;
# ifdef CONFIG_SCHED_DEBUG
+ int migrate_disable;
int migrate_disable_atomic;
# endif
#endif
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1059,7 +1059,7 @@ void set_cpus_allowed_common(struct task
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
-#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
int __migrate_disabled(struct task_struct *p)
{
return p->migrate_disable;
@@ -1098,7 +1098,7 @@ static void __do_set_cpus_allowed_tail(s
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
-#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
if (__migrate_disabled(p)) {
lockdep_assert_held(&p->pi_lock);
@@ -1171,7 +1171,7 @@ static int __set_cpus_allowed_ptr(struct
if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
goto out;
-#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
if (__migrate_disabled(p)) {
p->migrate_disable_update = 1;
goto out;
@@ -7134,7 +7134,7 @@ const u32 sched_prio_to_wmult[40] = {
/* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
};
-#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
static inline void
update_nr_migratory(struct task_struct *p, long delta)
@@ -7282,45 +7282,44 @@ EXPORT_SYMBOL(migrate_enable);
#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
void migrate_disable(void)
{
+#ifdef CONFIG_SCHED_DEBUG
struct task_struct *p = current;
if (in_atomic() || irqs_disabled()) {
-#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic++;
-#endif
return;
}
-#ifdef CONFIG_SCHED_DEBUG
+
if (unlikely(p->migrate_disable_atomic)) {
tracing_off();
WARN_ON_ONCE(1);
}
-#endif
p->migrate_disable++;
+#endif
+ barrier();
}
EXPORT_SYMBOL(migrate_disable);
void migrate_enable(void)
{
+#ifdef CONFIG_SCHED_DEBUG
struct task_struct *p = current;
if (in_atomic() || irqs_disabled()) {
-#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic--;
-#endif
return;
}
-#ifdef CONFIG_SCHED_DEBUG
if (unlikely(p->migrate_disable_atomic)) {
tracing_off();
WARN_ON_ONCE(1);
}
-#endif
WARN_ON_ONCE(p->migrate_disable <= 0);
p->migrate_disable--;
+#endif
+ barrier();
}
EXPORT_SYMBOL(migrate_enable);
#endif
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -1030,7 +1030,7 @@ void proc_sched_show_task(struct task_st
P(dl.runtime);
P(dl.deadline);
}
-#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
P(migrate_disable);
#endif
P(nr_cpus_allowed);
Powered by blists - more mailing lists