Clean up some of the excessive ifdeffery introduces in the last patch. Signed-off-by: Peter Zijlstra --- kernel/sched.c | 150 ++++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 100 insertions(+), 50 deletions(-) Index: linux-2.6/kernel/sched.c =================================================================== --- linux-2.6.orig/kernel/sched.c +++ linux-2.6/kernel/sched.c @@ -7563,56 +7563,29 @@ static int load_balance_monitor(void *un } #endif /* CONFIG_SMP */ -static void free_sched_group(struct task_group *tg) +#ifdef CONFIG_FAIR_GROUP_SCHED +static void free_fair_sched_group(struct task_group *tg) { int i; for_each_possible_cpu(i) { -#ifdef CONFIG_FAIR_GROUP_SCHED if (tg->cfs_rq) kfree(tg->cfs_rq[i]); if (tg->se) kfree(tg->se[i]); -#endif -#ifdef CONFIG_RT_GROUP_SCHED - if (tg->rt_rq) - kfree(tg->rt_rq[i]); - if (tg->rt_se) - kfree(tg->rt_se[i]); -#endif } -#ifdef CONFIG_FAIR_GROUP_SCHED kfree(tg->cfs_rq); kfree(tg->se); -#endif -#ifdef CONFIG_RT_GROUP_SCHED - kfree(tg->rt_rq); - kfree(tg->rt_se); -#endif - kfree(tg); } -/* allocate runqueue etc for a new task group */ -struct task_group *sched_create_group(void) +static int alloc_fair_sched_group(struct task_group *tg) { - struct task_group *tg; -#ifdef CONFIG_FAIR_GROUP_SCHED struct cfs_rq *cfs_rq; struct sched_entity *se; -#endif -#ifdef CONFIG_RT_GROUP_SCHED - struct rt_rq *rt_rq; - struct sched_rt_entity *rt_se; -#endif struct rq *rq; int i; - tg = kzalloc(sizeof(*tg), GFP_KERNEL); - if (!tg) - return ERR_PTR(-ENOMEM); - -#ifdef CONFIG_FAIR_GROUP_SCHED tg->cfs_rq = kzalloc(sizeof(cfs_rq) * NR_CPUS, GFP_KERNEL); if (!tg->cfs_rq) goto err; @@ -7621,23 +7594,10 @@ struct task_group *sched_create_group(vo goto err; tg->shares = NICE_0_LOAD; -#endif - -#ifdef CONFIG_RT_GROUP_SCHED - tg->rt_rq = kzalloc(sizeof(rt_rq) * NR_CPUS, GFP_KERNEL); - if (!tg->rt_rq) - goto err; - tg->rt_se = kzalloc(sizeof(rt_se) * NR_CPUS, GFP_KERNEL); - if (!tg->rt_se) - goto err; - - tg->rt_runtime = 0; -#endif for_each_possible_cpu(i) { rq = cpu_rq(i); -#ifdef CONFIG_FAIR_GROUP_SCHED cfs_rq = kmalloc_node(sizeof(struct cfs_rq), GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); if (!cfs_rq) @@ -7649,9 +7609,68 @@ struct task_group *sched_create_group(vo goto err; init_tg_cfs_entry(rq, tg, cfs_rq, se, i, 0); + } + + lock_task_group_list(); + for_each_possible_cpu(i) { + rq = cpu_rq(i); + cfs_rq = tg->cfs_rq[i]; + list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); + } + list_add_rcu(&tg->list, &task_groups); + unlock_task_group_list(); + + return 1; + + err: + return 0; +} +#else +static inline void free_fair_sched_group(struct task_group *tg) +{ +} + +static inline int alloc_fair_sched_group(struct task_group *tg) +{ + return 1; +} #endif #ifdef CONFIG_RT_GROUP_SCHED +static void free_rt_sched_group(struct task_group *tg) +{ + int i; + + for_each_possible_cpu(i) { + if (tg->rt_rq) + kfree(tg->rt_rq[i]); + if (tg->rt_se) + kfree(tg->rt_se[i]); + } + + kfree(tg->rt_rq); + kfree(tg->rt_se); +} + +static int alloc_rt_sched_group(struct task_group *tg) +{ + struct rt_rq *rt_rq; + struct sched_rt_entity *rt_se; + struct rq *rq; + int i; + + tg->rt_rq = kzalloc(sizeof(rt_rq) * NR_CPUS, GFP_KERNEL); + if (!tg->rt_rq) + goto err; + tg->rt_se = kzalloc(sizeof(rt_se) * NR_CPUS, GFP_KERNEL); + if (!tg->rt_se) + goto err; + + tg->rt_runtime = 0; + + for_each_possible_cpu(i) { + rq = cpu_rq(i); + rt_rq = kmalloc_node(sizeof(struct rt_rq), GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); if (!rt_rq) @@ -7663,24 +7682,55 @@ struct task_group *sched_create_group(vo goto err; init_tg_rt_entry(rq, tg, rt_rq, rt_se, i, 0); -#endif } lock_task_group_list(); for_each_possible_cpu(i) { rq = cpu_rq(i); -#ifdef CONFIG_FAIR_GROUP_SCHED - cfs_rq = tg->cfs_rq[i]; - list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); -#endif -#ifdef CONFIG_RT_GROUP_SCHED rt_rq = tg->rt_rq[i]; list_add_rcu(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list); -#endif } list_add_rcu(&tg->list, &task_groups); unlock_task_group_list(); + return 1; + + err: + return 0; +} +#else +static inline void free_rt_sched_group(struct task_group *tg) +{ +} + +static inline int alloc_rt_sched_group(struct task_group *tg) +{ + return 1; +} +#endif + +static void free_sched_group(struct task_group *tg) +{ + free_fair_sched_group(tg); + free_rt_sched_group(tg); + kfree(tg); +} + +/* allocate runqueue etc for a new task group */ +struct task_group *sched_create_group(void) +{ + struct task_group *tg; + + tg = kzalloc(sizeof(*tg), GFP_KERNEL); + if (!tg) + return ERR_PTR(-ENOMEM); + + if (!alloc_fair_sched_group(tg)) + goto err; + + if (!alloc_rt_sched_group(tg)) + goto err; + return tg; err: -- -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/