diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index da7c061..2288ad8 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -511,7 +511,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) if (!initial) { /* sleeps upto a single latency don't count. */ - if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se)) + if (sched_feat(NEW_FAIR_SLEEPERS)) vruntime -= sysctl_sched_latency; /* ensure we never gain time by being placed backwards. */ @@ -867,7 +867,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) } gran = sysctl_sched_wakeup_granularity; - if (unlikely(se->load.weight != NICE_0_LOAD)) + /* + * More easily preempt - nice tasks, while not making + * it harder for + nice tasks. + */ + if (unlikely(se->load.weight > NICE_0_LOAD)) gran = calc_delta_fair(gran, &se->load); if (pse->vruntime + gran < se->vruntime)