[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAA5xa-==1d2UATF1Q1YnijD4_j-sBY=E5P52qrigX4rKFjbMxw@mail.gmail.com>
Date: Fri, 20 May 2022 08:54:44 +0200
From: Henry Tseng <henrybear327@...il.com>
To: mingo@...hat.com, Peter Zijlstra <peterz@...radead.org>,
juri.lelli@...hat.com, vincent.guittot@...aro.org,
dietmar.eggemann@....com, rostedt@...dmis.org,
Benjamin Segall <bsegall@...gle.com>, mgorman@...e.de,
bristot@...hat.com, christian@...uner.io,
linux-kernel@...r.kernel.org, Jim Huang <jserv@...s.ncku.edu.tw>,
Huichun Feng <foxhoundsk.tw@...il.com>
Subject: Re: [PATCH v3] sched: Simplify __sched_init runtime checks
A friendly ping on this patch!
On Thu, Mar 3, 2022 at 2:19 PM Chun-Hung Tseng <henrybear327@...il.com> wrote:
>
> Improve runtime checks in __sched_init(void) by replacing if conditional
> checks with preprocessor directives.
>
> Signed-off-by: Chun-Hung Tseng <henrybear327@...il.com>
> ---
> kernel/sched/core.c | 29 +++++++++++++++--------------
> 1 file changed, 15 insertions(+), 14 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 9745613d531c..003e8677f6ba 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -9317,28 +9317,29 @@ void __init sched_init(void)
> #ifdef CONFIG_RT_GROUP_SCHED
> ptr += 2 * nr_cpu_ids * sizeof(void **);
> #endif
> - if (ptr) {
> - ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
> +
> +#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
> + ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
>
> #ifdef CONFIG_FAIR_GROUP_SCHED
> - root_task_group.se = (struct sched_entity **)ptr;
> - ptr += nr_cpu_ids * sizeof(void **);
> + root_task_group.se = (struct sched_entity **)ptr;
> + ptr += nr_cpu_ids * sizeof(void **);
>
> - root_task_group.cfs_rq = (struct cfs_rq **)ptr;
> - ptr += nr_cpu_ids * sizeof(void **);
> + root_task_group.cfs_rq = (struct cfs_rq **)ptr;
> + ptr += nr_cpu_ids * sizeof(void **);
>
> - root_task_group.shares = ROOT_TASK_GROUP_LOAD;
> - init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
> + root_task_group.shares = ROOT_TASK_GROUP_LOAD;
> + init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
> #endif /* CONFIG_FAIR_GROUP_SCHED */
> #ifdef CONFIG_RT_GROUP_SCHED
> - root_task_group.rt_se = (struct sched_rt_entity **)ptr;
> - ptr += nr_cpu_ids * sizeof(void **);
> -
> - root_task_group.rt_rq = (struct rt_rq **)ptr;
> - ptr += nr_cpu_ids * sizeof(void **);
> + root_task_group.rt_se = (struct sched_rt_entity **)ptr;
> + ptr += nr_cpu_ids * sizeof(void **);
>
> + root_task_group.rt_rq = (struct rt_rq **)ptr;
> + ptr += nr_cpu_ids * sizeof(void **);
> #endif /* CONFIG_RT_GROUP_SCHED */
> - }
> +#endif /* CONFIG_FAIR_GROUP_SCHED || CONFIG_RT_GROUP_SCHED */
> +
> #ifdef CONFIG_CPUMASK_OFFSTACK
> for_each_possible_cpu(i) {
> per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
> --
> 2.35.1
>
--
Best wishes,
Henry
Powered by blists - more mailing lists