Signed-off-by: Peter Zijlstra (Intel) --- kernel/sched/core.c | 6 +++--- kernel/sched/fair.c | 23 +++++++++++++++-------- kernel/sched/sched.h | 11 ++++++----- 3 files changed, 24 insertions(+), 16 deletions(-) --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -608,7 +608,7 @@ static inline bool got_nohz_idle_kick(vo { int cpu = smp_processor_id(); - if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) + if (!(atomic_read(nohz_flags(cpu)) & NOHZ_BALANCE_KICK)) return false; if (idle_cpu(cpu) && !need_resched()) @@ -618,7 +618,7 @@ static inline bool got_nohz_idle_kick(vo * We can't run Idle Load Balance on this CPU for this time so we * cancel it and clear NOHZ_BALANCE_KICK */ - clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); + atomic_andnot(NOHZ_BALANCE_KICK, nohz_flags(cpu)); return false; } @@ -6002,7 +6002,7 @@ void __init sched_init(void) rq_attach_root(rq, &def_root_domain); #ifdef CONFIG_NO_HZ_COMMON rq->last_load_update_tick = jiffies; - rq->nohz_flags = 0; + atomic_set(&rq->nohz_flags, 0); #endif #ifdef CONFIG_NO_HZ_FULL rq->last_sched_tick = 0; --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8991,6 +8991,7 @@ static inline int find_new_ilb(void) */ static void nohz_balancer_kick(void) { + unsigned int flags; int ilb_cpu; nohz.next_balance++; @@ -9000,7 +9001,8 @@ static void nohz_balancer_kick(void) if (ilb_cpu >= nr_cpu_ids) return; - if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu))) + flags = atomic_fetch_or(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)); + if (flags & NOHZ_BALANCE_KICK) return; /* * Use smp_send_reschedule() instead of resched_cpu(). @@ -9014,7 +9016,9 @@ static void nohz_balancer_kick(void) void nohz_balance_exit_idle(unsigned int cpu) { - if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { + unsigned int flags = atomic_read(nohz_flags(cpu)); + + if (unlikely(flags & NOHZ_TICK_STOPPED)) { /* * Completely isolated CPUs don't ever set, so we must test. */ @@ -9022,7 +9026,8 @@ void nohz_balance_exit_idle(unsigned int cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); atomic_dec(&nohz.nr_cpus); } - clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); + + atomic_andnot(NOHZ_TICK_STOPPED, nohz_flags(cpu)); } } @@ -9076,7 +9081,7 @@ void nohz_balance_enter_idle(int cpu) if (!housekeeping_cpu(cpu, HK_FLAG_SCHED)) return; - if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) + if (atomic_read(nohz_flags(cpu)) & NOHZ_TICK_STOPPED) return; /* @@ -9087,7 +9092,7 @@ void nohz_balance_enter_idle(int cpu) cpumask_set_cpu(cpu, nohz.idle_cpus_mask); atomic_inc(&nohz.nr_cpus); - set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); + atomic_or(NOHZ_TICK_STOPPED, nohz_flags(cpu)); } #endif @@ -9225,8 +9230,10 @@ static void nohz_idle_balance(struct rq unsigned long next_balance = jiffies + 60*HZ; int update_next_balance = 0; - if (idle != CPU_IDLE || - !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu))) + if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_BALANCE_KICK)) + return; + + if (idle != CPU_IDLE) goto end; for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { @@ -9272,7 +9279,7 @@ static void nohz_idle_balance(struct rq if (likely(update_next_balance)) nohz.next_balance = next_balance; end: - clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)); + atomic_andnot(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)); } /* --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -723,7 +723,7 @@ struct rq { #ifdef CONFIG_SMP unsigned long last_load_update_tick; #endif /* CONFIG_SMP */ - unsigned long nohz_flags; + atomic_t nohz_flags; #endif /* CONFIG_NO_HZ_COMMON */ #ifdef CONFIG_NO_HZ_FULL unsigned long last_sched_tick; @@ -2003,10 +2003,11 @@ extern void cfs_bandwidth_usage_inc(void extern void cfs_bandwidth_usage_dec(void); #ifdef CONFIG_NO_HZ_COMMON -enum rq_nohz_flag_bits { - NOHZ_TICK_STOPPED, - NOHZ_BALANCE_KICK, -}; +#define NOHZ_TICK_STOPPED_BIT 0 +#define NOHZ_BALANCE_KICK_BIT 1 + +#define NOHZ_TICK_STOPPED BIT(NOHZ_TICK_STOPPED_BIT) +#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)