There is only a single PREEMPT_ACTIVE use in the regular __schedule() path and that is to circumvent the task->state check. Since the code setting PREEMPT_ACTIVE is the immediate caller of __schedule() we can replace this with a function argument. Signed-off-by: Peter Zijlstra (Intel) --- kernel/sched/core.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3045,7 +3045,7 @@ pick_next_task(struct rq *rq, struct tas * * WARNING: must be called with preemption disabled! */ -static void __sched __schedule(void) +static void __sched __schedule(bool preempt) { struct task_struct *prev, *next; unsigned long *switch_count; @@ -3085,7 +3085,7 @@ static void __sched __schedule(void) rq->clock_skip_update <<= 1; /* promote REQ to ACT */ switch_count = &prev->nivcsw; - if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { + if (!preempt && prev->state) { if (unlikely(signal_pending_state(prev->state, prev))) { prev->state = TASK_RUNNING; } else { @@ -3150,7 +3150,7 @@ asmlinkage __visible void __sched schedu sched_submit_work(tsk); do { preempt_disable(); - __schedule(); + __schedule(false); sched_preempt_enable_no_resched(); } while (need_resched()); } @@ -3191,7 +3191,7 @@ static void __sched notrace preempt_sche { do { preempt_active_enter(); - __schedule(); + __schedule(true); preempt_active_exit(); /* @@ -3256,7 +3256,7 @@ asmlinkage __visible void __sched notrac * an infinite recursion. */ prev_ctx = exception_enter(); - __schedule(); + __schedule(true); exception_exit(prev_ctx); barrier(); @@ -3285,7 +3285,7 @@ asmlinkage __visible void __sched preemp do { preempt_active_enter(); local_irq_enable(); - __schedule(); + __schedule(true); local_irq_disable(); preempt_active_exit(); } while (need_resched()); -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/