diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 51556b9..b43bccb 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -422,13 +422,12 @@ static inline void tick_nohz_switch_to_nohz(void) { } #ifdef CONFIG_HIGH_RES_TIMERS /* * We rearm the timer until we get disabled by the idle code - * Called with interrupts disabled and timer->base->cpu_base->lock held. + * Called with interrupts disabled and timer->base->cpu_base->lock *not* held. */ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) { struct tick_sched *ts = container_of(timer, struct tick_sched, sched_timer); - struct hrtimer_cpu_base *base = timer->base->cpu_base; struct pt_regs *regs = get_irq_regs(); ktime_t now = ktime_get(); @@ -454,13 +453,12 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) } /* * update_process_times() might take tasklist_lock, hence - * drop the base lock. sched-tick hrtimers are per-CPU and - * never accessible by userspace APIs, so this is safe to do. + * we don't attempt to grab the base lock here. + * sched-tick hrtimers are per-CPU and never accessible by + * userspace APIs, so this is safe to do. */ - spin_unlock(&base->lock); update_process_times(user_mode(regs)); profile_tick(CPU_PROFILING); - spin_lock(&base->lock); } /* Do not restart, when we are in the idle loop */