[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <166721363659.7716.10522729624614709628.tip-bot2@tip-bot2>
Date: Mon, 31 Oct 2022 10:53:56 -0000
From: "tip-bot2 for Chengming Zhou" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Chengming Zhou <zhouchengming@...edance.com>,
"Peter Zijlstra (Intel)" <peterz@...radead.org>,
Johannes Weiner <hannes@...xchg.org>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [tip: sched/core] sched/psi: Use task->psi_flags to clear in CPU migration
The following commit has been merged into the sched/core branch of tip:
Commit-ID: 52b33d87b9197c51e8ffdc61873739d90dd0a16f
Gitweb: https://git.kernel.org/tip/52b33d87b9197c51e8ffdc61873739d90dd0a16f
Author: Chengming Zhou <zhouchengming@...edance.com>
AuthorDate: Mon, 26 Sep 2022 16:19:31 +08:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Sun, 30 Oct 2022 10:12:15 +01:00
sched/psi: Use task->psi_flags to clear in CPU migration
The commit d583d360a620 ("psi: Fix psi state corruption when schedule()
races with cgroup move") fixed a race problem by making cgroup_move_task()
use task->psi_flags instead of looking at the scheduler state.
We can extend task->psi_flags usage to CPU migration, which should be
a minor optimization for performance and code simplicity.
Signed-off-by: Chengming Zhou <zhouchengming@...edance.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Acked-by: Johannes Weiner <hannes@...xchg.org>
Link: https://lore.kernel.org/r/20220926081931.45420-1-zhouchengming@bytedance.com
---
include/linux/sched.h | 3 ---
kernel/sched/core.c | 2 +-
kernel/sched/stats.h | 22 ++++------------------
3 files changed, 5 insertions(+), 22 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ffb6eb5..23de7fe 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -888,9 +888,6 @@ struct task_struct {
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;
-#ifdef CONFIG_PSI
- unsigned sched_psi_wake_requeue:1;
-#endif
/* Force alignment to the next boundary: */
unsigned :0;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 87c9cdf..07ac08c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2053,7 +2053,7 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
if (!(flags & ENQUEUE_RESTORE)) {
sched_info_enqueue(rq, p);
- psi_enqueue(p, flags & ENQUEUE_WAKEUP);
+ psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED));
}
uclamp_rq_inc(rq, p);
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 84a1889..38f3698 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -128,11 +128,9 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup)
if (p->in_memstall)
set |= TSK_MEMSTALL_RUNNING;
- if (!wakeup || p->sched_psi_wake_requeue) {
+ if (!wakeup) {
if (p->in_memstall)
set |= TSK_MEMSTALL;
- if (p->sched_psi_wake_requeue)
- p->sched_psi_wake_requeue = 0;
} else {
if (p->in_iowait)
clear |= TSK_IOWAIT;
@@ -143,8 +141,6 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup)
static inline void psi_dequeue(struct task_struct *p, bool sleep)
{
- int clear = TSK_RUNNING;
-
if (static_branch_likely(&psi_disabled))
return;
@@ -157,10 +153,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
if (sleep)
return;
- if (p->in_memstall)
- clear |= (TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
-
- psi_task_change(p, clear, 0);
+ psi_task_change(p, p->psi_flags, 0);
}
static inline void psi_ttwu_dequeue(struct task_struct *p)
@@ -172,19 +165,12 @@ static inline void psi_ttwu_dequeue(struct task_struct *p)
* deregister its sleep-persistent psi states from the old
* queue, and let psi_enqueue() know it has to requeue.
*/
- if (unlikely(p->in_iowait || p->in_memstall)) {
+ if (unlikely(p->psi_flags)) {
struct rq_flags rf;
struct rq *rq;
- int clear = 0;
-
- if (p->in_iowait)
- clear |= TSK_IOWAIT;
- if (p->in_memstall)
- clear |= TSK_MEMSTALL;
rq = __task_rq_lock(p, &rf);
- psi_task_change(p, clear, 0);
- p->sched_psi_wake_requeue = 1;
+ psi_task_change(p, p->psi_flags, 0);
__task_rq_unlock(rq, &rf);
}
}
Powered by blists - more mailing lists