diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index c20c9e7..e06ecab 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -295,8 +295,8 @@ sysret_careful: /* Handle a signal */ sysret_signal: TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) - testl $_TIF_DO_NOTIFY_MASK,%edx + sti + testl $(_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx jz 1f /* Really a signal */ @@ -390,7 +390,7 @@ int_very_careful: jmp int_restore_rest int_signal: - testl $_TIF_DO_NOTIFY_MASK,%edx + testl $(_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx jz 1f movq %rsp,%rdi # &ptregs -> arg1 xorl %esi,%esi # oldset -> arg2 @@ -637,7 +637,7 @@ retint_careful: jmp retint_check retint_signal: - testl $_TIF_DO_NOTIFY_MASK,%edx + testl $(_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx jz retint_swapgs TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 0157a6f..1b085d2 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -667,9 +667,6 @@ void do_notify_resume(struct pt_regs *regs, void *_unused, /* deal with pending signal delivery */ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) do_signal(regs); - - if (thread_info_flags & _TIF_HRTICK_RESCHED) - hrtick_resched(); clear_thread_flag(TIF_IRET); } diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index 1c83e51..9691bb8 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c @@ -504,9 +504,6 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) /* deal with pending signal delivery */ if (thread_info_flags & (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK)) do_signal(regs); - - if (thread_info_flags & _TIF_HRTICK_RESCHED) - hrtick_resched(); } void signal_fault(struct pt_regs *regs, void __user *frame, char *where) diff --git a/include/asm-x86/thread_info_32.h b/include/asm-x86/thread_info_32.h index 5bd5082..0514e3b 100644 --- a/include/asm-x86/thread_info_32.h +++ b/include/asm-x86/thread_info_32.h @@ -132,7 +132,6 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ #define TIF_SECCOMP 7 /* secure computing */ #define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */ -#define TIF_HRTICK_RESCHED 9 /* reprogram hrtick timer */ #define TIF_MEMDIE 16 #define TIF_DEBUG 17 /* uses debug registers */ #define TIF_IO_BITMAP 18 /* uses I/O bitmap */ @@ -152,7 +151,6 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SYSCALL_AUDIT (1<base->get_time(); } -static inline int hrtimer_is_hres_active(struct hrtimer *timer) -{ - return timer->base->cpu_base->hres_active; -} - /* * The resolution of the clocks. The resolution value is returned in * the clock_getres() system call to give application programmers an @@ -253,10 +248,6 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) return timer->base->softirq_time; } -static inline int hrtimer_is_hres_active(struct hrtimer *timer) -{ - return 0; -} #endif extern ktime_t ktime_get(void); diff --git a/include/linux/sched.h b/include/linux/sched.h index 6a1e7af..7b7905d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -285,7 +285,6 @@ extern void trap_init(void); extern void account_process_tick(struct task_struct *task, int user); extern void update_process_times(int user); extern void scheduler_tick(void); -extern void hrtick_resched(void); extern void sched_show_task(struct task_struct *p); @@ -887,7 +886,7 @@ struct sched_class { #endif void (*set_curr_task) (struct rq *rq); - void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); + void (*task_tick) (struct rq *rq, struct task_struct *p); void (*task_new) (struct rq *rq, struct task_struct *p); void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask); diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz index 526128a..4af1580 100644 --- a/kernel/Kconfig.hz +++ b/kernel/Kconfig.hz @@ -54,5 +54,3 @@ config HZ default 300 if HZ_300 default 1000 if HZ_1000 -config SCHED_HRTICK - def_bool HIGH_RES_TIMERS && X86 diff --git a/kernel/sched.c b/kernel/sched.c index 8dcdec6..c481937 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -65,7 +65,6 @@ #include #include #include -#include #include #include @@ -452,12 +451,6 @@ struct rq { struct list_head migration_queue; #endif -#ifdef CONFIG_SCHED_HRTICK - unsigned long hrtick_flags; - ktime_t hrtick_expire; - struct hrtimer hrtick_timer; -#endif - #ifdef CONFIG_SCHEDSTATS /* latency stats */ struct sched_info rq_sched_info; @@ -594,16 +587,16 @@ enum { SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, SCHED_FEAT_WAKEUP_PREEMPT = 2, SCHED_FEAT_START_DEBIT = 4, - SCHED_FEAT_HRTICK = 8, - SCHED_FEAT_DOUBLE_TICK = 16, + SCHED_FEAT_TREE_AVG = 8, + SCHED_FEAT_APPROX_AVG = 16, }; const_debug unsigned int sysctl_sched_features = SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | SCHED_FEAT_WAKEUP_PREEMPT * 1 | SCHED_FEAT_START_DEBIT * 1 | - SCHED_FEAT_HRTICK * 1 | - SCHED_FEAT_DOUBLE_TICK * 0; + SCHED_FEAT_TREE_AVG * 0 | + SCHED_FEAT_APPROX_AVG * 0; #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) @@ -841,173 +834,6 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) } EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); -static void __resched_task(struct task_struct *p, int tif_bit); - -static inline void resched_task(struct task_struct *p) -{ - __resched_task(p, TIF_NEED_RESCHED); -} - -#ifdef CONFIG_SCHED_HRTICK -/* - * Use HR-timers to deliver accurate preemption points. - * - * Its all a bit involved since we cannot program an hrt while holding the - * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a - * reschedule event. - * - * When we get rescheduled we reprogram the hrtick_timer outside of the - * rq->lock. - */ -static inline void resched_hrt(struct task_struct *p) -{ - __resched_task(p, TIF_HRTICK_RESCHED); -} - -static inline void resched_rq(struct rq *rq) -{ - unsigned long flags; - - spin_lock_irqsave(&rq->lock, flags); - resched_task(rq->curr); - spin_unlock_irqrestore(&rq->lock, flags); -} - -enum { - HRTICK_SET, /* re-programm hrtick_timer */ - HRTICK_RESET, /* not a new slice */ -}; - -/* - * Use hrtick when: - * - enabled by features - * - hrtimer is actually high res - */ -static inline int hrtick_enabled(struct rq *rq) -{ - if (!sched_feat(HRTICK)) - return 0; - return hrtimer_is_hres_active(&rq->hrtick_timer); -} - -/* - * Called to set the hrtick timer state. - * - * called with rq->lock held and irqs disabled - */ -static void hrtick_start(struct rq *rq, u64 delay, int reset) -{ - assert_spin_locked(&rq->lock); - - /* - * preempt at: now + delay - */ - rq->hrtick_expire = - ktime_add_ns(rq->hrtick_timer.base->get_time(), delay); - /* - * indicate we need to program the timer - */ - __set_bit(HRTICK_SET, &rq->hrtick_flags); - if (reset) - __set_bit(HRTICK_RESET, &rq->hrtick_flags); - - /* - * New slices are called from the schedule path and don't need a - * forced reschedule. - */ - if (reset) - resched_hrt(rq->curr); -} - -static void hrtick_clear(struct rq *rq) -{ - if (hrtimer_active(&rq->hrtick_timer)) - hrtimer_cancel(&rq->hrtick_timer); -} - -/* - * Update the timer from the possible pending state. - */ -static void hrtick_set(struct rq *rq) -{ - ktime_t time; - int set, reset; - unsigned long flags; - - WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); - - spin_lock_irqsave(&rq->lock, flags); - set = __test_and_clear_bit(HRTICK_SET, &rq->hrtick_flags); - reset = __test_and_clear_bit(HRTICK_RESET, &rq->hrtick_flags); - time = rq->hrtick_expire; - clear_thread_flag(TIF_HRTICK_RESCHED); - spin_unlock_irqrestore(&rq->lock, flags); - - if (set) { - hrtimer_start(&rq->hrtick_timer, time, HRTIMER_MODE_ABS); - if (reset && !hrtimer_active(&rq->hrtick_timer)) - resched_rq(rq); - } else - hrtick_clear(rq); -} - -/* - * High-resolution timer tick. - * Runs from hardirq context with interrupts disabled. - */ -static enum hrtimer_restart hrtick(struct hrtimer *timer) -{ - struct rq *rq = container_of(timer, struct rq, hrtick_timer); - - WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); - - spin_lock(&rq->lock); - __update_rq_clock(rq); - rq->curr->sched_class->task_tick(rq, rq->curr, 1); - spin_unlock(&rq->lock); - - return HRTIMER_NORESTART; -} - -static inline void init_rq_hrtick(struct rq *rq) -{ - rq->hrtick_flags = 0; - hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - rq->hrtick_timer.function = hrtick; - rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; -} - -void hrtick_resched(void) -{ - struct rq *rq; - unsigned long flags; - - if (!test_thread_flag(TIF_HRTICK_RESCHED)) - return; - - local_irq_save(flags); - rq = cpu_rq(smp_processor_id()); - hrtick_set(rq); - local_irq_restore(flags); -} -#else -static inline void hrtick_clear(struct rq *rq) -{ -} - -static inline void hrtick_set(struct rq *rq) -{ -} - -static inline void init_rq_hrtick(struct rq *rq) -{ -} - -void hrtick_resched(void) -{ -} -#endif - /* * resched_task - mark a task 'to be rescheduled now'. * @@ -1021,16 +847,16 @@ void hrtick_resched(void) #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) #endif -static void __resched_task(struct task_struct *p, int tif_bit) +static void resched_task(struct task_struct *p) { int cpu; assert_spin_locked(&task_rq(p)->lock); - if (unlikely(test_tsk_thread_flag(p, tif_bit))) + if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) return; - set_tsk_thread_flag(p, tif_bit); + set_tsk_thread_flag(p, TIF_NEED_RESCHED); cpu = task_cpu(p); if (cpu == smp_processor_id()) @@ -1096,10 +922,10 @@ void wake_up_idle_cpu(int cpu) #endif #else -static void __resched_task(struct task_struct *p, int tif_bit) +static inline void resched_task(struct task_struct *p) { assert_spin_locked(&task_rq(p)->lock); - set_tsk_thread_flag(p, tif_bit); + set_tsk_need_resched(p); } #endif @@ -3766,8 +3592,8 @@ void scheduler_tick(void) } rq->tick_timestamp = rq->clock; update_cpu_load(rq); - curr->sched_class->task_tick(rq, curr, 0); - update_sched_rt_period(rq); + if (curr != rq->idle) /* FIXME: needed? */ + curr->sched_class->task_tick(rq, curr); spin_unlock(&rq->lock); #ifdef CONFIG_SMP @@ -3913,8 +3739,6 @@ need_resched_nonpreemptible: schedule_debug(prev); - hrtick_clear(rq); - /* * Do the rq-clock update outside the rq lock: */ @@ -3952,20 +3776,14 @@ need_resched_nonpreemptible: ++*switch_count; context_switch(rq, prev, next); /* unlocks the rq */ - /* - * the context switch might have flipped the stack from under - * us, hence refresh the local variables. - */ - cpu = smp_processor_id(); - rq = cpu_rq(cpu); } else spin_unlock_irq(&rq->lock); - hrtick_set(rq); - - if (unlikely(reacquire_kernel_lock(current) < 0)) + if (unlikely(reacquire_kernel_lock(current) < 0)) { + cpu = smp_processor_id(); + rq = cpu_rq(cpu); goto need_resched_nonpreemptible; - + } preempt_enable_no_resched(); if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; @@ -7248,7 +7066,6 @@ void __init sched_init(void) INIT_LIST_HEAD(&rq->migration_queue); rq_attach_root(rq, &def_root_domain); #endif - init_rq_hrtick(rq); atomic_set(&rq->nr_iowait, 0); highest_cpu = i; } diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 0080968..14a5af5 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -677,29 +677,13 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) cfs_rq->curr = NULL; } -static void -entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) +static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) { /* * Update run-time statistics of the 'current'. */ update_curr(cfs_rq); -#ifdef CONFIG_SCHED_HRTICK - /* - * queued ticks are scheduled to match the slice, so don't bother - * validating it and just reschedule. - */ - if (queued) - return resched_task(rq_of(cfs_rq)->curr); - /* - * don't let the period tick interfere with the hrtick preemption - */ - if (!sched_feat(DOUBLE_TICK) && - hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) - return; -#endif - if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT)) check_preempt_tick(cfs_rq, curr); } @@ -803,43 +787,6 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se) #endif /* CONFIG_FAIR_GROUP_SCHED */ -#ifdef CONFIG_SCHED_HRTICK -static void hrtick_start_fair(struct rq *rq, struct task_struct *p) -{ - int requeue = rq->curr == p; - struct sched_entity *se = &p->se; - struct cfs_rq *cfs_rq = cfs_rq_of(se); - - WARN_ON(task_rq(p) != rq); - - if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) { - u64 slice = sched_slice(cfs_rq, se); - u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; - s64 delta = slice - ran; - - if (delta < 0) { - if (rq->curr == p) - resched_task(p); - return; - } - - /* - * Don't schedule slices shorter than 10000ns, that just - * doesn't make sense. Rely on vruntime for fairness. - */ - if (!requeue) - delta = max(10000LL, delta); - - hrtick_start(rq, delta, requeue); - } -} -#else -static inline void -hrtick_start_fair(struct rq *rq, struct task_struct *p) -{ -} -#endif - /* * The enqueue_task method is called before nr_running is * increased. Here we update the fair scheduling stats and @@ -857,8 +804,6 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) enqueue_entity(cfs_rq, se, wakeup); wakeup = 1; } - - hrtick_start_fair(rq, rq->curr); } /* @@ -879,8 +824,6 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) break; sleep = 1; } - - hrtick_start_fair(rq, rq->curr); } /* @@ -1152,7 +1095,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) static struct task_struct *pick_next_task_fair(struct rq *rq) { - struct task_struct *p; struct cfs_rq *cfs_rq = &rq->cfs; struct sched_entity *se; @@ -1164,10 +1106,7 @@ static struct task_struct *pick_next_task_fair(struct rq *rq) cfs_rq = group_cfs_rq(se); } while (cfs_rq); - p = task_of(se); - hrtick_start_fair(rq, p); - - return p; + return task_of(se); } /* @@ -1322,14 +1261,14 @@ move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, /* * scheduler tick hitting a task of our scheduling class: */ -static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) +static void task_tick_fair(struct rq *rq, struct task_struct *curr) { struct cfs_rq *cfs_rq; struct sched_entity *se = &curr->se; for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); - entity_tick(cfs_rq, se, queued); + entity_tick(cfs_rq, se); } } diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 2bcafa3..ef7a266 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c @@ -61,7 +61,7 @@ move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, } #endif -static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) +static void task_tick_idle(struct rq *rq, struct task_struct *curr) { } diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 0a6d2e5..7f67b1a 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1149,7 +1149,7 @@ static void watchdog(struct rq *rq, struct task_struct *p) } } -static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) +static void task_tick_rt(struct rq *rq, struct task_struct *p) { update_curr_rt(rq);