[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <172923526255.1442.5036039111433625204.tip-bot2@tip-bot2>
Date: Fri, 18 Oct 2024 07:07:42 -0000
From: "tip-bot2 for Connor O'Brien" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: "Connor O'Brien" <connoro@...gle.com>, John Stultz <jstultz@...gle.com>,
"Peter Zijlstra (Intel)" <peterz@...radead.org>,
Metin Kaya <metin.kaya@....com>, Valentin Schneider <vschneid@...hat.com>,
Qais Yousef <qyousef@...alina.io>, K Prateek Nayak <kprateek.nayak@....com>,
x86@...nel.org, linux-kernel@...r.kernel.org
Subject: [tip: sched/core] sched: Add move_queued_task_locked helper
The following commit has been merged into the sched/core branch of tip:
Commit-ID: 2b05a0b4c08ffd6dedfbd27af8708742cde39b95
Gitweb: https://git.kernel.org/tip/2b05a0b4c08ffd6dedfbd27af8708742cde39b95
Author: Connor O'Brien <connoro@...gle.com>
AuthorDate: Wed, 09 Oct 2024 16:53:37 -07:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Mon, 14 Oct 2024 12:52:41 +02:00
sched: Add move_queued_task_locked helper
Switch logic that deactivates, sets the task cpu,
and reactivates a task on a different rq to use a
helper that will be later extended to push entire
blocked task chains.
This patch was broken out from a larger chain migration
patch originally by Connor O'Brien.
[jstultz: split out from larger chain migration patch]
Signed-off-by: Connor O'Brien <connoro@...gle.com>
Signed-off-by: John Stultz <jstultz@...gle.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Reviewed-by: Metin Kaya <metin.kaya@....com>
Reviewed-by: Valentin Schneider <vschneid@...hat.com>
Reviewed-by: Qais Yousef <qyousef@...alina.io>
Tested-by: K Prateek Nayak <kprateek.nayak@....com>
Tested-by: Metin Kaya <metin.kaya@....com>
Link: https://lore.kernel.org/r/20241009235352.1614323-5-jstultz@google.com
---
kernel/sched/core.c | 13 +++----------
kernel/sched/deadline.c | 8 ++------
kernel/sched/rt.c | 8 ++------
kernel/sched/sched.h | 12 ++++++++++++
4 files changed, 19 insertions(+), 22 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f5ec452..ab0b775 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2620,9 +2620,7 @@ int push_cpu_stop(void *arg)
// XXX validate p is still the highest prio task
if (task_rq(p) == rq) {
- deactivate_task(rq, p, 0);
- set_task_cpu(p, lowest_rq->cpu);
- activate_task(lowest_rq, p, 0);
+ move_queued_task_locked(rq, lowest_rq, p);
resched_curr(lowest_rq);
}
@@ -3309,9 +3307,7 @@ static void __migrate_swap_task(struct task_struct *p, int cpu)
rq_pin_lock(src_rq, &srf);
rq_pin_lock(dst_rq, &drf);
- deactivate_task(src_rq, p, 0);
- set_task_cpu(p, cpu);
- activate_task(dst_rq, p, 0);
+ move_queued_task_locked(src_rq, dst_rq, p);
wakeup_preempt(dst_rq, p, 0);
rq_unpin_lock(dst_rq, &drf);
@@ -6300,10 +6296,7 @@ static bool try_steal_cookie(int this, int that)
if (sched_task_is_throttled(p, this))
goto next;
- deactivate_task(src, p, 0);
- set_task_cpu(p, this);
- activate_task(dst, p, 0);
-
+ move_queued_task_locked(src, dst, p);
resched_curr(dst);
success = true;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index be1b917..4acf5e3 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2751,9 +2751,7 @@ retry:
goto retry;
}
- deactivate_task(rq, next_task, 0);
- set_task_cpu(next_task, later_rq->cpu);
- activate_task(later_rq, next_task, 0);
+ move_queued_task_locked(rq, later_rq, next_task);
ret = 1;
resched_curr(later_rq);
@@ -2839,9 +2837,7 @@ static void pull_dl_task(struct rq *this_rq)
if (is_migration_disabled(p)) {
push_task = get_push_task(src_rq);
} else {
- deactivate_task(src_rq, p, 0);
- set_task_cpu(p, this_cpu);
- activate_task(this_rq, p, 0);
+ move_queued_task_locked(src_rq, this_rq, p);
dmin = p->dl.deadline;
resched = true;
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 172c588..e2506ab 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2088,9 +2088,7 @@ retry:
goto retry;
}
- deactivate_task(rq, next_task, 0);
- set_task_cpu(next_task, lowest_rq->cpu);
- activate_task(lowest_rq, next_task, 0);
+ move_queued_task_locked(rq, lowest_rq, next_task);
resched_curr(lowest_rq);
ret = 1;
@@ -2361,9 +2359,7 @@ static void pull_rt_task(struct rq *this_rq)
if (is_migration_disabled(p)) {
push_task = get_push_task(src_rq);
} else {
- deactivate_task(src_rq, p, 0);
- set_task_cpu(p, this_cpu);
- activate_task(this_rq, p, 0);
+ move_queued_task_locked(src_rq, this_rq, p);
resched = true;
}
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 20b6e75..71ce1b0 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3788,6 +3788,18 @@ static inline void init_sched_mm_cid(struct task_struct *t) { }
extern u64 avg_vruntime(struct cfs_rq *cfs_rq);
extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se);
+#ifdef CONFIG_SMP
+static inline
+void move_queued_task_locked(struct rq *src_rq, struct rq *dst_rq, struct task_struct *task)
+{
+ lockdep_assert_rq_held(src_rq);
+ lockdep_assert_rq_held(dst_rq);
+
+ deactivate_task(src_rq, task, 0);
+ set_task_cpu(task, dst_rq->cpu);
+ activate_task(dst_rq, task, 0);
+}
+#endif
#ifdef CONFIG_RT_MUTEXES
Powered by blists - more mailing lists