[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <223b16a5-8532-4f5b-b34a-c7a0448f2454@amd.com>
Date: Thu, 12 Jun 2025 16:11:52 +0530
From: K Prateek Nayak <kprateek.nayak@....com>
To: Leon Romanovsky <leon@...nel.org>
Cc: Steve Wahl <steve.wahl@....com>, Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>, Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>, Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>, Valentin Schneider <vschneid@...hat.com>,
linux-kernel@...r.kernel.org, Vishal Chourasia <vishalc@...ux.ibm.com>,
samir <samir@...ux.ibm.com>, Naman Jain <namjain@...ux.microsoft.com>,
Saurabh Singh Sengar <ssengar@...ux.microsoft.com>, srivatsa@...il.mit.edu,
Michael Kelley <mhklinux@...look.com>, Russ Anderson <rja@....com>,
Dimitri Sivanich <sivanich@....com>
Subject: Re: [PATCH v4 1/2] sched/topology: improve topology_span_sane speed
On 6/12/2025 3:00 PM, K Prateek Nayak wrote:
> Ah! Since this happens so early topology isn't created yet for
> the debug prints to hit! Is it possible to get a dmesg with
> "ignore_loglevel" and "sched_verbose" on an older kernel that
> did not throw this error on the same host?
One better would be running with the following diff on top of v6.16-rc1
is possible:
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 9026d325d0fd..811c8d0f5b9a 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -2398,7 +2398,7 @@ static bool topology_span_sane(const struct cpumask *cpu_map)
{
struct sched_domain_topology_level *tl;
struct cpumask *covered, *id_seen;
- int cpu;
+ int cpu, id;
lockdep_assert_held(&sched_domains_mutex);
covered = sched_domains_tmpmask;
@@ -2421,19 +2421,21 @@ static bool topology_span_sane(const struct cpumask *cpu_map)
*/
for_each_cpu(cpu, cpu_map) {
const struct cpumask *tl_cpu_mask = tl->mask(cpu);
- int id;
/* lowest bit set in this mask is used as a unique id */
id = cpumask_first(tl_cpu_mask);
+ pr_warn("tl(%s) CPU(%d) ID(%d) CPU_TL_SPAN(%*pbl) ID_TL_SPAN(%*pbl)\n",
+ tl->name, cpu, id, cpumask_pr_args(tl->mask(cpu)), cpumask_pr_args(tl->mask(id)));
+
if (cpumask_test_cpu(id, id_seen)) {
/* First CPU has already been seen, ensure identical spans */
if (!cpumask_equal(tl->mask(id), tl_cpu_mask))
- return false;
+ goto fail;
} else {
/* First CPU hasn't been seen before, ensure it's a completely new span */
if (cpumask_intersects(tl_cpu_mask, covered))
- return false;
+ goto fail;
cpumask_or(covered, covered, tl_cpu_mask);
cpumask_set_cpu(id, id_seen);
@@ -2441,6 +2443,16 @@ static bool topology_span_sane(const struct cpumask *cpu_map)
}
}
return true;
+
+fail:
+ pr_warn("Failed tl: %s\n", tl->name);
+ pr_warn("Failed for CPU: %d\n", cpu);
+ pr_warn("ID CPU at tl: %d\n", id);
+ pr_warn("Failed CPU span at tl: %*pbl\n", cpumask_pr_args(tl->mask(cpu)));
+ pr_warn("ID CPU span: %*pbl\n", cpumask_pr_args(tl->mask(id)));
+ pr_warn("ID CPUs seen: %*pbl\n", cpumask_pr_args(id_seen));
+ pr_warn("CPUs covered: %*pbl\n", cpumask_pr_args(covered));
+ return false;
}
/*
--
In my case, it logs the following (no failures seen yet):
tl(SMT) CPU(0) ID(0) CPU_TL_SPAN(0) ID_TL_SPAN(0)
tl(SMT) CPU(1) ID(1) CPU_TL_SPAN(1) ID_TL_SPAN(1)
tl(SMT) CPU(2) ID(2) CPU_TL_SPAN(2) ID_TL_SPAN(2)
tl(SMT) CPU(3) ID(3) CPU_TL_SPAN(3) ID_TL_SPAN(3)
tl(SMT) CPU(4) ID(4) CPU_TL_SPAN(4) ID_TL_SPAN(4)
tl(SMT) CPU(5) ID(5) CPU_TL_SPAN(5) ID_TL_SPAN(5)
tl(SMT) CPU(6) ID(6) CPU_TL_SPAN(6) ID_TL_SPAN(6)
tl(SMT) CPU(7) ID(7) CPU_TL_SPAN(7) ID_TL_SPAN(7)
tl(SMT) CPU(8) ID(8) CPU_TL_SPAN(8) ID_TL_SPAN(8)
tl(SMT) CPU(9) ID(9) CPU_TL_SPAN(9) ID_TL_SPAN(9)
tl(CLS) CPU(0) ID(0) CPU_TL_SPAN(0) ID_TL_SPAN(0)
tl(CLS) CPU(1) ID(1) CPU_TL_SPAN(1) ID_TL_SPAN(1)
tl(CLS) CPU(2) ID(2) CPU_TL_SPAN(2) ID_TL_SPAN(2)
tl(CLS) CPU(3) ID(3) CPU_TL_SPAN(3) ID_TL_SPAN(3)
tl(CLS) CPU(4) ID(4) CPU_TL_SPAN(4) ID_TL_SPAN(4)
tl(CLS) CPU(5) ID(5) CPU_TL_SPAN(5) ID_TL_SPAN(5)
tl(CLS) CPU(6) ID(6) CPU_TL_SPAN(6) ID_TL_SPAN(6)
tl(CLS) CPU(7) ID(7) CPU_TL_SPAN(7) ID_TL_SPAN(7)
tl(CLS) CPU(8) ID(8) CPU_TL_SPAN(8) ID_TL_SPAN(8)
tl(CLS) CPU(9) ID(9) CPU_TL_SPAN(9) ID_TL_SPAN(9)
tl(MC) CPU(0) ID(0) CPU_TL_SPAN(0) ID_TL_SPAN(0)
tl(MC) CPU(1) ID(1) CPU_TL_SPAN(1) ID_TL_SPAN(1)
tl(MC) CPU(2) ID(2) CPU_TL_SPAN(2) ID_TL_SPAN(2)
tl(MC) CPU(3) ID(3) CPU_TL_SPAN(3) ID_TL_SPAN(3)
tl(MC) CPU(4) ID(4) CPU_TL_SPAN(4) ID_TL_SPAN(4)
tl(MC) CPU(5) ID(5) CPU_TL_SPAN(5) ID_TL_SPAN(5)
tl(MC) CPU(6) ID(6) CPU_TL_SPAN(6) ID_TL_SPAN(6)
tl(MC) CPU(7) ID(7) CPU_TL_SPAN(7) ID_TL_SPAN(7)
tl(MC) CPU(8) ID(8) CPU_TL_SPAN(8) ID_TL_SPAN(8)
tl(MC) CPU(9) ID(9) CPU_TL_SPAN(9) ID_TL_SPAN(9)
tl(PKG) CPU(0) ID(0) CPU_TL_SPAN(0-1) ID_TL_SPAN(0-1)
tl(PKG) CPU(1) ID(0) CPU_TL_SPAN(0-1) ID_TL_SPAN(0-1)
tl(PKG) CPU(2) ID(2) CPU_TL_SPAN(2-3) ID_TL_SPAN(2-3)
tl(PKG) CPU(3) ID(2) CPU_TL_SPAN(2-3) ID_TL_SPAN(2-3)
tl(PKG) CPU(4) ID(4) CPU_TL_SPAN(4-5) ID_TL_SPAN(4-5)
tl(PKG) CPU(5) ID(4) CPU_TL_SPAN(4-5) ID_TL_SPAN(4-5)
tl(PKG) CPU(6) ID(6) CPU_TL_SPAN(6-7) ID_TL_SPAN(6-7)
tl(PKG) CPU(7) ID(6) CPU_TL_SPAN(6-7) ID_TL_SPAN(6-7)
tl(PKG) CPU(8) ID(8) CPU_TL_SPAN(8-9) ID_TL_SPAN(8-9)
tl(PKG) CPU(9) ID(8) CPU_TL_SPAN(8-9) ID_TL_SPAN(8-9)
tl(NODE) CPU(0) ID(0) CPU_TL_SPAN(0-9) ID_TL_SPAN(0-9)
tl(NODE) CPU(1) ID(0) CPU_TL_SPAN(0-9) ID_TL_SPAN(0-9)
tl(NODE) CPU(2) ID(0) CPU_TL_SPAN(0-9) ID_TL_SPAN(0-9)
tl(NODE) CPU(3) ID(0) CPU_TL_SPAN(0-9) ID_TL_SPAN(0-9)
tl(NODE) CPU(4) ID(0) CPU_TL_SPAN(0-9) ID_TL_SPAN(0-9)
tl(NODE) CPU(5) ID(0) CPU_TL_SPAN(0-9) ID_TL_SPAN(0-9)
tl(NODE) CPU(6) ID(0) CPU_TL_SPAN(0-9) ID_TL_SPAN(0-9)
tl(NODE) CPU(7) ID(0) CPU_TL_SPAN(0-9) ID_TL_SPAN(0-9)
tl(NODE) CPU(8) ID(0) CPU_TL_SPAN(0-9) ID_TL_SPAN(0-9)
tl(NODE) CPU(9) ID(0) CPU_TL_SPAN(0-9) ID_TL_SPAN(0-9)
--
Thanks and Regards,
Prateek
Powered by blists - more mailing lists