[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <172874255804.1442.16221172414886128944.tip-bot2@tip-bot2>
Date: Sat, 12 Oct 2024 14:15:58 -0000
From: "tip-bot2 for Peter Zijlstra" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Sean Christopherson <seanjc@...gle.com>,
"Peter Zijlstra (Intel)" <peterz@...radead.org>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [tip: sched/urgent] sched: Fix external p->on_rq users
The following commit has been merged into the sched/urgent branch of tip:
Commit-ID: 1cc2f68c016ad3ac8b3a0495797dd61e19a10025
Gitweb: https://git.kernel.org/tip/1cc2f68c016ad3ac8b3a0495797dd61e19a10025
Author: Peter Zijlstra <peterz@...radead.org>
AuthorDate: Thu, 10 Oct 2024 11:38:10 +02:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Fri, 11 Oct 2024 10:49:33 +02:00
sched: Fix external p->on_rq users
Sean noted that ever since commit 152e11f6df29 ("sched/fair: Implement
delayed dequeue") KVM's preemption notifiers have started
mis-classifying preemption vs blocking.
Notably p->on_rq is no longer sufficient to determine if a task is
runnable or blocked -- the aforementioned commit introduces tasks that
remain on the runqueue even through they will not run again, and
should be considered blocked for many cases.
Add the task_is_runnable() helper to classify things and audit all
external users of the p->on_rq state. Also add a few comments.
Fixes: 152e11f6df29 ("sched/fair: Implement delayed dequeue")
Reported-by: Sean Christopherson <seanjc@...gle.com>
Tested-by: Sean Christopherson <seanjc@...gle.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Link: https://lkml.kernel.org/r/20241010091843.GK33184@noisy.programming.kicks-ass.net
---
include/linux/sched.h | 5 +++++
kernel/events/core.c | 2 +-
kernel/freezer.c | 7 ++++++-
kernel/rcu/tasks.h | 9 +++++++++
kernel/sched/core.c | 12 +++++++++---
kernel/time/tick-sched.c | 5 +++++
kernel/trace/trace_selftest.c | 2 +-
virt/kvm/kvm_main.c | 2 +-
8 files changed, 37 insertions(+), 7 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e6ee425..8a9517e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2133,6 +2133,11 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
+static inline bool task_is_runnable(struct task_struct *p)
+{
+ return p->on_rq && !p->se.sched_delayed;
+}
+
extern bool sched_task_on_rq(struct task_struct *p);
extern unsigned long get_wchan(struct task_struct *p);
extern struct task_struct *cpu_curr_snapshot(int cpu);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e3589c4..cdd0976 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -9251,7 +9251,7 @@ static void perf_event_switch(struct task_struct *task,
},
};
- if (!sched_in && task->on_rq) {
+ if (!sched_in && task_is_runnable(task)) {
switch_event.event_id.header.misc |=
PERF_RECORD_MISC_SWITCH_OUT_PREEMPT;
}
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 44bbd7d..8d530d0 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -109,7 +109,12 @@ static int __set_task_frozen(struct task_struct *p, void *arg)
{
unsigned int state = READ_ONCE(p->__state);
- if (p->on_rq)
+ /*
+ * Allow freezing the sched_delayed tasks; they will not execute until
+ * ttwu() fixes them up, so it is safe to swap their state now, instead
+ * of waiting for them to get fully dequeued.
+ */
+ if (task_is_runnable(p))
return 0;
if (p != current && task_curr(p))
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 6333f4c..4d7ee95 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -986,6 +986,15 @@ static bool rcu_tasks_is_holdout(struct task_struct *t)
return false;
/*
+ * t->on_rq && !t->se.sched_delayed *could* be considered sleeping but
+ * since it is a spurious state (it will transition into the
+ * traditional blocked state or get woken up without outside
+ * dependencies), not considering it such should only affect timing.
+ *
+ * Be conservative for now and not include it.
+ */
+
+ /*
* Idle tasks (or idle injection) within the idle loop are RCU-tasks
* quiescent states. But CPU boot code performed by the idle task
* isn't a quiescent state.
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 71232f8..7db711b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -548,6 +548,11 @@ sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
* ON_RQ_MIGRATING state is used for migration without holding both
* rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
*
+ * Additionally it is possible to be ->on_rq but still be considered not
+ * runnable when p->se.sched_delayed is true. These tasks are on the runqueue
+ * but will be dequeued as soon as they get picked again. See the
+ * task_is_runnable() helper.
+ *
* p->on_cpu <- { 0, 1 }:
*
* is set by prepare_task() and cleared by finish_task() such that it will be
@@ -4317,9 +4322,10 @@ static bool __task_needs_rq_lock(struct task_struct *p)
* @arg: Argument to function.
*
* Fix the task in it's current state by avoiding wakeups and or rq operations
- * and call @func(@arg) on it. This function can use ->on_rq and task_curr()
- * to work out what the state is, if required. Given that @func can be invoked
- * with a runqueue lock held, it had better be quite lightweight.
+ * and call @func(@arg) on it. This function can use task_is_runnable() and
+ * task_curr() to work out what the state is, if required. Given that @func
+ * can be invoked with a runqueue lock held, it had better be quite
+ * lightweight.
*
* Returns:
* Whatever @func returns
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 753a184..59efa14 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -435,6 +435,11 @@ static void tick_nohz_kick_task(struct task_struct *tsk)
* tick_nohz_task_switch()
* LOAD p->tick_dep_mask
*/
+ // XXX given a task picks up the dependency on schedule(), should we
+ // only care about tasks that are currently on the CPU instead of all
+ // that are on the runqueue?
+ //
+ // That is, does this want to be: task_on_cpu() / task_curr()?
if (!sched_task_on_rq(tsk))
return;
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index c4ad7cd..1469dd8 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -1485,7 +1485,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
/* reset the max latency */
tr->max_latency = 0;
- while (p->on_rq) {
+ while (task_is_runnable(p)) {
/*
* Sleep to make sure the -deadline thread is asleep too.
* On virtual machines we can't rely on timings,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 05cbb25..0c666f1 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -6387,7 +6387,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
WRITE_ONCE(vcpu->scheduled_out, true);
- if (current->on_rq && vcpu->wants_to_run) {
+ if (task_is_runnable(current) && vcpu->wants_to_run) {
WRITE_ONCE(vcpu->preempted, true);
WRITE_ONCE(vcpu->ready, true);
}
Powered by blists - more mailing lists