kernel/sched/core.c | 2 +- kernel/sched/fair.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index bc1638b33449..6980b7ad6da1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6852,7 +6852,7 @@ struct task_group root_task_group; LIST_HEAD(task_groups); #endif -DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); +DECLARE_PER_CPU(struct cpumask *, load_balance_mask); void __init sched_init(void) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fea7d3335e1f..ef84a37ba19a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6421,7 +6421,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, #define MAX_PINNED_INTERVAL 512 /* Working cpumask for load_balance and load_balance_newidle. */ -DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); +DEFINE_PER_CPU(struct cpumask *, load_balance_mask); static int need_active_balance(struct lb_env *env) { @@ -6490,7 +6490,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, struct sched_group *group; struct rq *busiest; unsigned long flags; - struct cpumask *cpus = __get_cpu_var(load_balance_mask); + struct cpumask *cpus = __this_cpu_read(load_balance_mask); struct lb_env env = { .sd = sd,