lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sat,  9 Jan 2021 03:05:34 +0100
From:   Frederic Weisbecker <frederic@...nel.org>
To:     Peter Zijlstra <peterz@...radead.org>,
        "Paul E . McKenney" <paulmck@...nel.org>
Cc:     LKML <linux-kernel@...r.kernel.org>,
        Frederic Weisbecker <frederic@...nel.org>,
        "Rafael J . Wysocki" <rafael.j.wysocki@...el.com>,
        Ingo Molnar <mingo@...nel.org>,
        Thomas Gleixner <tglx@...utronix.de>, stable@...r.kernel.org
Subject: [RFC PATCH 6/8] sched: Report local wake up on resched blind zone within idle loop

The idle loop has several need_resched() checks that make sure we don't
miss a rescheduling request. This means that any wake up performed on
the local runqueue after the last generic need_resched() check is going
to have its rescheduling silently ignored. This has happened in the
past with rcu kthreads awaken from rcu_idle_enter() for example.

Perform sanity checks to report these situations.

Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar<mingo@...nel.org>
Cc: Paul E. McKenney <paulmck@...nel.org>
Cc: Rafael J. Wysocki <rafael.j.wysocki@...el.com>
---
 include/linux/sched.h | 11 +++++++++++
 kernel/sched/core.c   | 42 ++++++++++++++++++++++++++++++++++++++++++
 kernel/sched/idle.c   |  3 +++
 kernel/sched/sched.h  |  3 +++
 4 files changed, 59 insertions(+)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6e3a5eeec509..83fedda54943 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1917,6 +1917,17 @@ static __always_inline bool need_resched(void)
 	return unlikely(tif_need_resched());
 }
 
+#ifdef CONFIG_SCHED_DEBUG
+extern void sched_resched_local_allow(void);
+extern void sched_resched_local_forbid(void);
+extern void sched_resched_local_assert_allowed(void);
+#else
+static inline void sched_resched_local_allow(void) { }
+static inline void sched_resched_local_forbid(void) { }
+static inline void sched_resched_local_assert_allowed(void) { }
+#endif
+
+
 /*
  * Wrappers for p->thread_info->cpu access. No-op on UP.
  */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 15d2562118d1..6056f0374674 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -591,6 +591,44 @@ void wake_up_q(struct wake_q_head *head)
 	}
 }
 
+#ifdef CONFIG_SCHED_DEBUG
+void noinstr sched_resched_local_allow(void)
+{
+	this_rq()->resched_local_allow = 1;
+}
+
+void noinstr sched_resched_local_forbid(void)
+{
+	this_rq()->resched_local_allow = 0;
+}
+
+void noinstr sched_resched_local_assert_allowed(void)
+{
+	if (this_rq()->resched_local_allow)
+		return;
+
+	/*
+	 * Idle interrupts break the CPU from its pause and
+	 * rescheduling happens on idle loop exit.
+	 */
+	if (in_hardirq())
+		return;
+
+	/*
+	 * What applies to hardirq also applies to softirq as
+	 * we assume they execute on hardirq tail. Ksoftirqd
+	 * shouldn't have resched_local_allow == 0.
+	 * We also assume that no local_bh_enable() call may
+	 * execute softirqs inline on fragile idle/entry
+	 * path...
+	 */
+	if (in_serving_softirq())
+		return;
+
+	WARN_ONCE(1, "Late current task rescheduling may be lost\n");
+}
+#endif
+
 /*
  * resched_curr - mark rq's current task 'to be rescheduled now'.
  *
@@ -613,6 +651,7 @@ void resched_curr(struct rq *rq)
 	if (cpu == smp_processor_id()) {
 		set_tsk_need_resched(curr);
 		set_preempt_need_resched();
+		sched_resched_local_assert_allowed();
 		return;
 	}
 
@@ -7796,6 +7835,9 @@ void __init sched_init(void)
 #endif /* CONFIG_SMP */
 		hrtick_rq_init(rq);
 		atomic_set(&rq->nr_iowait, 0);
+#ifdef CONFIG_SCHED_DEBUG
+		rq->resched_local_allow = 1;
+#endif
 	}
 
 	set_load_weight(&init_task, false);
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index b601a3aa2152..cdffd32812bd 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -185,6 +185,8 @@ static void cpuidle_idle_call(void)
 		return;
 	}
 
+	sched_resched_local_forbid();
+
 	/*
 	 * The RCU framework needs to be told that we are entering an idle
 	 * section, so no more rcu read side critical sections and one more
@@ -247,6 +249,7 @@ static void cpuidle_idle_call(void)
 	}
 
 exit_idle:
+	sched_resched_local_allow();
 	__current_set_polling();
 
 	/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 12ada79d40f3..a9416c383451 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1060,6 +1060,9 @@ struct rq {
 #endif
 	unsigned int		push_busy;
 	struct cpu_stop_work	push_work;
+#ifdef CONFIG_SCHED_DEBUG
+	unsigned int		resched_local_allow;
+#endif
 };
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-- 
2.25.1

Powered by blists - more mailing lists