[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220124103302.3124748-1-foxhoundsk.tw@gmail.com>
Date: Mon, 24 Jan 2022 18:33:02 +0800
From: Huichun Feng <foxhoundsk.tw@...il.com>
To: henrybear327@...il.com
Cc: bristot@...hat.com, bsegall@...gle.com, christian@...uner.io,
dietmar.eggemann@....com, juri.lelli@...hat.com,
linux-kernel@...r.kernel.org, mgorman@...e.de, mingo@...hat.com,
peterz@...radead.org, rostedt@...dmis.org,
vincent.guittot@...aro.org
Subject: Re: [PATCH v2] sched: Simplify __sched_init runtime checks
Improve runtime checks in __sched_init(void) by replacing if conditional
checks with preprocessor directives.
Signed-off-by: Chun-Hung Tseng <henrybear327@...il.com>
Signed-off-by: Huichun Feng <foxhoundsk.tw@...il.com>
---
kernel/sched/core.c | 29 +++++++++++++++--------------
1 file changed, 15 insertions(+), 14 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 848eaa0efe0e..1b27ca7f485a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -9308,28 +9308,29 @@ void __init sched_init(void)
#ifdef CONFIG_RT_GROUP_SCHED
ptr += 2 * nr_cpu_ids * sizeof(void **);
#endif
- if (ptr) {
- ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
+
+#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
+ ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
#ifdef CONFIG_FAIR_GROUP_SCHED
- root_task_group.se = (struct sched_entity **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
+ root_task_group.se = (struct sched_entity **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
- root_task_group.cfs_rq = (struct cfs_rq **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
+ root_task_group.cfs_rq = (struct cfs_rq **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
- root_task_group.shares = ROOT_TASK_GROUP_LOAD;
- init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
+ root_task_group.shares = ROOT_TASK_GROUP_LOAD;
+ init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
- root_task_group.rt_se = (struct sched_rt_entity **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
-
- root_task_group.rt_rq = (struct rt_rq **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
+ root_task_group.rt_se = (struct sched_rt_entity **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
+ root_task_group.rt_rq = (struct rt_rq **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_RT_GROUP_SCHED */
- }
+#endif /* CONFIG_FAIR_GROUP_SCHED || CONFIG_RT_GROUP_SCHED */
+
#ifdef CONFIG_CPUMASK_OFFSTACK
for_each_possible_cpu(i) {
per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
--
2.34.1
Powered by blists - more mailing lists