[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aEzHEFgNfQFiDafq@slm.duckdns.org>
Date: Fri, 13 Jun 2025 14:49:20 -1000
From: Tejun Heo <tj@...nel.org>
To: Cheng-Yang Chou <yphbchou0911@...il.com>
Cc: linux-kernel@...r.kernel.org, void@...ifault.com, arighi@...dia.com,
changwoo@...lia.com, jserv@...s.ncku.edu.tw
Subject: Re: [PATCH 3/4] sched_ext: Clean up and standardize #if/#else/#endif
markers in sched/ext_idle.c
The following is the updated version that got applied to sched_ext/for-6.17.
Thanks.
------ 8< ------
>From 8834ace4a86db0a85cb003c2efd98e6a4389243c Mon Sep 17 00:00:00 2001
From: Cheng-Yang Chou <yphbchou0911@...il.com>
Date: Wed, 11 Jun 2025 21:54:03 +0800
Subject: [PATCH] sched_ext: Always use SMP versions in kernel/sched/ext_idle.c
Simplify the scheduler by making formerly SMP-only primitives and data
structures unconditional.
tj: Updated subject for clarity. Fixed stray #else block which wasn't
removed causing build failure.
Signed-off-by: Cheng-Yang Chou <yphbchou0911@...il.com>
Signed-off-by: Tejun Heo <tj@...nel.org>
---
kernel/sched/ext_idle.c | 35 +----------------------------------
1 file changed, 1 insertion(+), 34 deletions(-)
diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c
index 17802693e304..b79cbdb7999a 100644
--- a/kernel/sched/ext_idle.c
+++ b/kernel/sched/ext_idle.c
@@ -17,7 +17,6 @@ static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
/* Enable/disable per-node idle cpumasks */
static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_per_node);
-#ifdef CONFIG_SMP
/* Enable/disable LLC aware optimizations */
static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
@@ -794,17 +793,6 @@ static void reset_idle_masks(struct sched_ext_ops *ops)
cpumask_and(idle_cpumask(node)->smt, cpu_online_mask, node_mask);
}
}
-#else /* !CONFIG_SMP */
-static bool scx_idle_test_and_clear_cpu(int cpu)
-{
- return -EBUSY;
-}
-
-static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags)
-{
- return -EBUSY;
-}
-#endif /* CONFIG_SMP */
void scx_idle_enable(struct sched_ext_ops *ops)
{
@@ -818,9 +806,7 @@ void scx_idle_enable(struct sched_ext_ops *ops)
else
static_branch_disable_cpuslocked(&scx_builtin_idle_per_node);
-#ifdef CONFIG_SMP
reset_idle_masks(ops);
-#endif
}
void scx_idle_disable(void)
@@ -906,7 +892,6 @@ static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_f
if (!rq)
lockdep_assert_held(&p->pi_lock);
-#ifdef CONFIG_SMP
/*
* This may also be called from ops.enqueue(), so we need to handle
* per-CPU tasks as well. For these tasks, we can skip all idle CPU
@@ -923,9 +908,7 @@ static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_f
cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags,
allowed ?: p->cpus_ptr, flags);
}
-#else
- cpu = -EBUSY;
-#endif
+
if (scx_kf_allowed_if_unlocked())
task_rq_unlock(rq, p, &rf);
@@ -1016,11 +999,7 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask_node(int node)
if (node < 0)
return cpu_none_mask;
-#ifdef CONFIG_SMP
return idle_cpumask(node)->cpu;
-#else
- return cpu_none_mask;
-#endif
}
/**
@@ -1040,11 +1019,7 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
if (!check_builtin_idle_enabled())
return cpu_none_mask;
-#ifdef CONFIG_SMP
return idle_cpumask(NUMA_NO_NODE)->cpu;
-#else
- return cpu_none_mask;
-#endif
}
/**
@@ -1063,14 +1038,10 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask_node(int node)
if (node < 0)
return cpu_none_mask;
-#ifdef CONFIG_SMP
if (sched_smt_active())
return idle_cpumask(node)->smt;
else
return idle_cpumask(node)->cpu;
-#else
- return cpu_none_mask;
-#endif
}
/**
@@ -1091,14 +1062,10 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
if (!check_builtin_idle_enabled())
return cpu_none_mask;
-#ifdef CONFIG_SMP
if (sched_smt_active())
return idle_cpumask(NUMA_NO_NODE)->smt;
else
return idle_cpumask(NUMA_NO_NODE)->cpu;
-#else
- return cpu_none_mask;
-#endif
}
/**
--
2.49.0
Powered by blists - more mailing lists