[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-683be13a284720205228e29207ef11a1c3c322b9@git.kernel.org>
Date: Fri, 19 Jun 2015 06:24:13 -0700
From: tip-bot for Thomas Gleixner <tipbot@...or.com>
To: linux-tip-commits@...r.kernel.org
Cc: joonwoop@...eaurora.org, tglx@...utronix.de, edumazet@...gle.com,
paulmck@...ux.vnet.ibm.com, peterz@...radead.org,
linux-kernel@...r.kernel.org, hpa@...or.com, fweisbec@...il.com,
john.stultz@...aro.org, wenbo.wang@...blaze.com, mingo@...nel.org,
viresh.kumar@...aro.org
Subject: [tip:timers/core] timer: Minimize nohz off overhead
Commit-ID: 683be13a284720205228e29207ef11a1c3c322b9
Gitweb: http://git.kernel.org/tip/683be13a284720205228e29207ef11a1c3c322b9
Author: Thomas Gleixner <tglx@...utronix.de>
AuthorDate: Tue, 26 May 2015 22:50:35 +0000
Committer: Thomas Gleixner <tglx@...utronix.de>
CommitDate: Fri, 19 Jun 2015 15:18:28 +0200
timer: Minimize nohz off overhead
If nohz is disabled on the kernel command line the [hr]timer code
still calls wake_up_nohz_cpu() and tick_nohz_full_cpu(), a pretty
pointless exercise. Cache nohz_active in [hr]timer per cpu bases and
avoid the overhead.
Before:
48.10% hog [.] main
15.25% [kernel] [k] _raw_spin_lock_irqsave
9.76% [kernel] [k] _raw_spin_unlock_irqrestore
6.50% [kernel] [k] mod_timer
6.44% [kernel] [k] lock_timer_base.isra.38
3.87% [kernel] [k] detach_if_pending
3.80% [kernel] [k] del_timer
2.67% [kernel] [k] internal_add_timer
1.33% [kernel] [k] __internal_add_timer
0.73% [kernel] [k] timerfn
0.54% [kernel] [k] wake_up_nohz_cpu
After:
48.73% hog [.] main
15.36% [kernel] [k] _raw_spin_lock_irqsave
9.77% [kernel] [k] _raw_spin_unlock_irqrestore
6.61% [kernel] [k] lock_timer_base.isra.38
6.42% [kernel] [k] mod_timer
3.90% [kernel] [k] detach_if_pending
3.76% [kernel] [k] del_timer
2.41% [kernel] [k] internal_add_timer
1.39% [kernel] [k] __internal_add_timer
0.76% [kernel] [k] timerfn
We probably should have a cached value for nohz full in the per cpu
bases as well to avoid the cpumask check. The base cache line is hot
already, the cpumask not necessarily.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Paul McKenney <paulmck@...ux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@...il.com>
Cc: Eric Dumazet <edumazet@...gle.com>
Cc: Viresh Kumar <viresh.kumar@...aro.org>
Cc: John Stultz <john.stultz@...aro.org>
Cc: Joonwoo Park <joonwoop@...eaurora.org>
Cc: Wenbo Wang <wenbo.wang@...blaze.com>
Link: http://lkml.kernel.org/r/20150526224512.207378134@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
include/linux/hrtimer.h | 2 ++
kernel/time/hrtimer.c | 3 ++-
kernel/time/tick-internal.h | 4 ++--
kernel/time/tick-sched.c | 2 +-
kernel/time/timer.c | 16 ++++++++++++----
5 files changed, 19 insertions(+), 8 deletions(-)
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 6955102..76dd4f0 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -164,6 +164,7 @@ enum hrtimer_base_type {
* @active_bases: Bitfield to mark bases with active timers
* @clock_was_set_seq: Sequence counter of clock was set events
* @migration_enabled: The migration of hrtimers to other cpus is enabled
+ * @nohz_active: The nohz functionality is enabled
* @expires_next: absolute time of the next event which was scheduled
* via clock_set_next_event()
* @next_timer: Pointer to the first expiring timer
@@ -188,6 +189,7 @@ struct hrtimer_cpu_base {
unsigned int active_bases;
unsigned int clock_was_set_seq;
bool migration_enabled;
+ bool nohz_active;
#ifdef CONFIG_HIGH_RES_TIMERS
unsigned int in_hrtirq : 1,
hres_active : 1,
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 6115f4d..db5c950 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -994,7 +994,8 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
* Kick to reschedule the next tick to handle the new timer
* on dynticks target.
*/
- wake_up_nohz_cpu(new_base->cpu_base->cpu);
+ if (new_base->cpu_base->nohz_active)
+ wake_up_nohz_cpu(new_base->cpu_base->cpu);
} else {
hrtimer_reprogram(timer, new_base);
}
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 2edde84..966a5a6 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -156,9 +156,9 @@ extern unsigned long tick_nohz_active;
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
-extern void timers_update_migration(void);
+extern void timers_update_migration(bool update_nohz);
#else
-static inline void timers_update_migration(void) { }
+static inline void timers_update_migration(bool update_nohz) { }
#endif
DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index b1cb016..c792429 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -963,7 +963,7 @@ static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
ts->nohz_mode = mode;
/* One update is enough */
if (!test_and_set_bit(0, &tick_nohz_active))
- timers_update_migration();
+ timers_update_migration(true);
}
/**
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 343142e..520499d 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -86,6 +86,7 @@ struct tvec_base {
unsigned long all_timers;
int cpu;
bool migration_enabled;
+ bool nohz_active;
struct tvec_root tv1;
struct tvec tv2;
struct tvec tv3;
@@ -99,7 +100,7 @@ static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
unsigned int sysctl_timer_migration = 1;
-void timers_update_migration(void)
+void timers_update_migration(bool update_nohz)
{
bool on = sysctl_timer_migration && tick_nohz_active;
unsigned int cpu;
@@ -111,6 +112,10 @@ void timers_update_migration(void)
for_each_possible_cpu(cpu) {
per_cpu(tvec_bases.migration_enabled, cpu) = on;
per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
+ if (!update_nohz)
+ continue;
+ per_cpu(tvec_bases.nohz_active, cpu) = true;
+ per_cpu(hrtimer_bases.nohz_active, cpu) = true;
}
}
@@ -124,7 +129,7 @@ int timer_migration_handler(struct ctl_table *table, int write,
mutex_lock(&mutex);
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (!ret && write)
- timers_update_migration();
+ timers_update_migration(false);
mutex_unlock(&mutex);
return ret;
}
@@ -436,8 +441,11 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
* require special care against races with idle_cpu(), lets deal
* with that later.
*/
- if (!(timer->flags & TIMER_DEFERRABLE) || tick_nohz_full_cpu(base->cpu))
- wake_up_nohz_cpu(base->cpu);
+ if (base->nohz_active) {
+ if (!(timer->flags & TIMER_DEFERRABLE) ||
+ tick_nohz_full_cpu(base->cpu))
+ wake_up_nohz_cpu(base->cpu);
+ }
}
#ifdef CONFIG_TIMER_STATS
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists