[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1dba1ec6f0846402eb80755b5f7b44efb6237e6d.1598643276.git.jdesfossez@digitalocean.com>
Date: Fri, 28 Aug 2020 15:51:24 -0400
From: Julien Desfossez <jdesfossez@...italocean.com>
To: Peter Zijlstra <peterz@...radead.org>,
Vineeth Pillai <viremana@...ux.microsoft.com>,
Joel Fernandes <joelaf@...gle.com>,
Tim Chen <tim.c.chen@...ux.intel.com>,
Aaron Lu <aaron.lwe@...il.com>,
Aubrey Li <aubrey.intel@...il.com>,
Dhaval Giani <dhaval.giani@...cle.com>,
Chris Hyser <chris.hyser@...cle.com>,
Nishanth Aravamudan <naravamudan@...italocean.com>
Cc: mingo@...nel.org, tglx@...utronix.de, pjt@...gle.com,
torvalds@...ux-foundation.org, linux-kernel@...r.kernel.org,
fweisbec@...il.com, keescook@...omium.org, kerrnel@...gle.com,
Phil Auld <pauld@...hat.com>,
Valentin Schneider <valentin.schneider@....com>,
Mel Gorman <mgorman@...hsingularity.net>,
Pawan Gupta <pawan.kumar.gupta@...ux.intel.com>,
Paolo Bonzini <pbonzini@...hat.com>, joel@...lfernandes.org,
vineeth@...byteword.org, Chen Yu <yu.c.chen@...el.com>,
Christian Brauner <christian.brauner@...ntu.com>,
Agata Gruza <agata.gruza@...el.com>,
Antonio Gomez Iglesias <antonio.gomez.iglesias@...el.com>,
graf@...zon.com, konrad.wilk@...cle.com, dfaggioli@...e.com,
rostedt@...dmis.org, derkling@...gle.com, benbjiang@...cent.com
Subject: [RFC PATCH v7 23/23] sched: Debug bits...
From: Peter Zijlstra <peterz@...radead.org>
Not-Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
kernel/sched/core.c | 40 +++++++++++++++++++++++++++++++++++++++-
1 file changed, 39 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5f77e575bbac..def25fe5e0d4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -123,6 +123,10 @@ static inline bool prio_less(struct task_struct *a, struct task_struct *b)
int pa = __task_prio(a), pb = __task_prio(b);
+ trace_printk("(%s/%d;%d,%Lu,%Lu) ?< (%s/%d;%d,%Lu,%Lu)\n",
+ a->comm, a->pid, pa, a->se.vruntime, a->dl.deadline,
+ b->comm, b->pid, pb, b->se.vruntime, b->dl.deadline);
+
if (-pa < -pb)
return true;
@@ -320,12 +324,16 @@ static void __sched_core_enable(void)
static_branch_enable(&__sched_core_enabled);
stop_machine(__sched_core_stopper, (void *)true, NULL);
+
+ printk("core sched enabled\n");
}
static void __sched_core_disable(void)
{
stop_machine(__sched_core_stopper, (void *)false, NULL);
static_branch_disable(&__sched_core_enabled);
+
+ printk("core sched disabled\n");
}
void sched_core_get(void)
@@ -4977,6 +4985,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
put_prev_task(rq, prev);
set_next_task(rq, next);
}
+
+ trace_printk("pick pre selected (%u %u %u): %s/%d %lx\n",
+ rq->core->core_task_seq,
+ rq->core->core_pick_seq,
+ rq->core_sched_seq,
+ next->comm, next->pid,
+ next->core_cookie);
+
return next;
}
@@ -5062,6 +5078,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
*/
if (i == cpu && !need_sync && !p->core_cookie) {
next = p;
+ trace_printk("unconstrained pick: %s/%d %lx\n",
+ next->comm, next->pid, next->core_cookie);
goto done;
}
@@ -5070,6 +5088,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
rq_i->core_pick = p;
+ trace_printk("cpu(%d): selected: %s/%d %lx\n",
+ i, p->comm, p->pid, p->core_cookie);
+
/*
* If this new candidate is of higher priority than the
* previous; and they're incompatible; we need to wipe
@@ -5086,6 +5107,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
rq->core->core_cookie = p->core_cookie;
max = p;
+ trace_printk("max: %s/%d %lx\n", max->comm, max->pid, max->core_cookie);
+
if (old_max) {
for_each_cpu(j, smt_mask) {
if (j == i)
@@ -5114,6 +5137,7 @@ next_class:;
/* Something should have been selected for current CPU */
WARN_ON_ONCE(!next);
+ trace_printk("picked: %s/%d %lx\n", next->comm, next->pid, next->core_cookie);
/*
* Reschedule siblings
@@ -5145,12 +5169,20 @@ next_class:;
continue;
if (rq_i->curr != rq_i->core_pick) {
+ trace_printk("IPI(%d)\n", i);
WRITE_ONCE(rq_i->core_pick_seq, rq->core->core_task_seq);
resched_curr(rq_i);
}
/* Did we break L1TF mitigation requirements? */
- WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
+ if (unlikely(!cookie_match(next, rq_i->core_pick))) {
+ trace_printk("[%d]: cookie mismatch. %s/%d/0x%lx/0x%lx\n",
+ rq_i->cpu, rq_i->core_pick->comm,
+ rq_i->core_pick->pid,
+ rq_i->core_pick->core_cookie,
+ rq_i->core->core_cookie);
+ WARN_ON_ONCE(1);
+ }
}
done:
@@ -5189,6 +5221,10 @@ static bool try_steal_cookie(int this, int that)
if (p->core_occupation > dst->idle->core_occupation)
goto next;
+ trace_printk("core fill: %s/%d (%d->%d) %d %d %lx\n",
+ p->comm, p->pid, that, this,
+ p->core_occupation, dst->idle->core_occupation, cookie);
+
p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(src, p, 0);
set_task_cpu(p, this);
@@ -7900,6 +7936,8 @@ int sched_cpu_starting(unsigned int cpu)
rq->core = core_rq;
}
}
+
+ printk("core: %d -> %d\n", cpu, cpu_of(core_rq));
#endif /* CONFIG_SCHED_CORE */
sched_rq_cpu_starting(cpu);
--
2.17.1
Powered by blists - more mailing lists