[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <174980023684.406.471722394508861936.tip-bot2@tip-bot2>
Date: Fri, 13 Jun 2025 07:37:16 -0000
From: "tip-bot2 for Ingo Molnar" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Ingo Molnar <mingo@...nel.org>, Peter Zijlstra <peterz@...radead.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Juri Lelli <juri.lelli@...hat.com>,
Linus Torvalds <torvalds@...ux-foundation.org>, Mel Gorman <mgorman@...e.de>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Shrikanth Hegde <sshegde@...ux.ibm.com>, Steven Rostedt <rostedt@...dmis.org>,
Valentin Schneider <vschneid@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject:
[tip: sched/core] sched/smp: Use the SMP version of scheduler debugging data
The following commit has been merged into the sched/core branch of tip:
Commit-ID: 02fb885ebdc4ad029aabff4b85dcbc540d7cdb32
Gitweb: https://git.kernel.org/tip/02fb885ebdc4ad029aabff4b85dcbc540d7cdb32
Author: Ingo Molnar <mingo@...nel.org>
AuthorDate: Wed, 28 May 2025 10:09:11 +02:00
Committer: Ingo Molnar <mingo@...nel.org>
CommitterDate: Fri, 13 Jun 2025 08:47:20 +02:00
sched/smp: Use the SMP version of scheduler debugging data
Simplify the scheduler by making CONFIG_SMP=y debug output
unconditional.
Signed-off-by: Ingo Molnar <mingo@...nel.org>
Acked-by: Peter Zijlstra <peterz@...radead.org>
Cc: Dietmar Eggemann <dietmar.eggemann@....com>
Cc: Juri Lelli <juri.lelli@...hat.com>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Mel Gorman <mgorman@...e.de>
Cc: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Cc: Shrikanth Hegde <sshegde@...ux.ibm.com>
Cc: Steven Rostedt <rostedt@...dmis.org>
Cc: Valentin Schneider <vschneid@...hat.com>
Cc: Vincent Guittot <vincent.guittot@...aro.org>
Link: https://lore.kernel.org/r/20250528080924.2273858-31-mingo@kernel.org
---
kernel/sched/debug.c | 14 --------------
1 file changed, 14 deletions(-)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 04c0354..f16d539 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -169,8 +169,6 @@ static const struct file_operations sched_feat_fops = {
.release = single_release,
};
-#ifdef CONFIG_SMP
-
static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
@@ -217,8 +215,6 @@ static const struct file_operations sched_scaling_fops = {
.release = single_release,
};
-#endif /* CONFIG_SMP */
-
#ifdef CONFIG_PREEMPT_DYNAMIC
static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
@@ -511,7 +507,6 @@ static __init int sched_init_debug(void)
debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
-#ifdef CONFIG_SMP
debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
@@ -519,7 +514,6 @@ static __init int sched_init_debug(void)
sched_domains_mutex_lock();
update_sched_domain_debugfs();
sched_domains_mutex_unlock();
-#endif /* CONFIG_SMP */
#ifdef CONFIG_NUMA_BALANCING
numa = debugfs_create_dir("numa_balancing", debugfs_sched);
@@ -685,11 +679,9 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
}
P(se->load.weight);
-#ifdef CONFIG_SMP
P(se->avg.load_avg);
P(se->avg.util_avg);
P(se->avg.runnable_avg);
-#endif /* CONFIG_SMP */
#undef PN_SCHEDSTAT
#undef PN
@@ -849,7 +841,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
SEQ_printf(m, " .%-30s: %d\n", "h_nr_idle", cfs_rq->h_nr_idle);
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
-#ifdef CONFIG_SMP
SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
cfs_rq->avg.load_avg);
SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
@@ -870,7 +861,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
atomic_long_read(&cfs_rq->tg->load_avg));
#endif /* CONFIG_FAIR_GROUP_SCHED */
-#endif /* CONFIG_SMP */
#ifdef CONFIG_CFS_BANDWIDTH
SEQ_printf(m, " .%-30s: %d\n", "throttled",
cfs_rq->throttled);
@@ -967,12 +957,10 @@ do { \
#undef P
#undef PN
-#ifdef CONFIG_SMP
#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
P64(avg_idle);
P64(max_idle_balance_cost);
#undef P64
-#endif /* CONFIG_SMP */
#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
if (schedstat_enabled()) {
@@ -1242,7 +1230,6 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
__PS("nr_involuntary_switches", p->nivcsw);
P(se.load.weight);
-#ifdef CONFIG_SMP
P(se.avg.load_sum);
P(se.avg.runnable_sum);
P(se.avg.util_sum);
@@ -1251,7 +1238,6 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(se.avg.util_avg);
P(se.avg.last_update_time);
PM(se.avg.util_est, ~UTIL_AVG_UNCHANGED);
-#endif /* CONFIG_SMP */
#ifdef CONFIG_UCLAMP_TASK
__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
__PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
Powered by blists - more mailing lists