>From 88f52c2df8a2d92423ddd12c92edec949148bf3c Mon Sep 17 00:00:00 2001 From: "Gautham R. Shenoy" Date: Fri, 23 Jun 2023 23:25:04 +0530 Subject: [PATCH 2/2] swqueue: Only pull a task with valid affinity from swqueue Currently swqueue_pull_task() dequeues the task at the head of the shared-wakequeue and then tries to migrate the task onto the current CPU. This may fail, since the current CPU may not be set in the task's affinity mask. Hence in swqueue_pull_task(), pull the first task from the shared-wakequeue that can be run on this CPU. With this, swqueue_pick_next_task() can return a 0/1 instead of 0/-1/1 as it is done now. Singed-off-by: Gautham R. Shenoy --- kernel/sched/fair.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fe33f6b13299..e78b8302b4c8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -195,17 +195,21 @@ static struct swqueue *rq_swqueue(struct rq *rq) return rq->cfs.swqueue; } -static struct task_struct *swqueue_pull_task(struct swqueue *swqueue) +static struct task_struct *swqueue_pull_task(struct swqueue *swqueue, int cpu) { unsigned long flags; struct task_struct *p; spin_lock_irqsave(&swqueue->lock, flags); - p = list_first_entry_or_null(&swqueue->list, struct task_struct, - swqueue_node); - if (p) - list_del_init(&p->swqueue_node); + list_for_each_entry(p, &swqueue->list, swqueue_node) { + if (cpumask_test_cpu(cpu, p->cpus_ptr)) { + list_del_init(&p->swqueue_node); + goto found; + } + } + p = NULL; +found: spin_unlock_irqrestore(&swqueue->lock, flags); return p; @@ -238,11 +242,11 @@ static int swqueue_pick_next_task(struct rq *rq, struct rq_flags *rf) struct task_struct *p = NULL; struct rq *src_rq; struct rq_flags src_rf; - int ret; + int ret = 0; swqueue = rq_swqueue(rq); if (!list_empty(&swqueue->list)) - p = swqueue_pull_task(swqueue); + p = swqueue_pull_task(swqueue, rq->cpu); if (!p) return 0; @@ -255,10 +259,8 @@ static int swqueue_pick_next_task(struct rq *rq, struct rq_flags *rf) if (task_on_rq_queued(p) && !task_on_cpu(rq, p)) src_rq = migrate_task_to(src_rq, &src_rf, p, cpu_of(rq)); - if (src_rq->cpu != rq->cpu) + if (src_rq->cpu == rq->cpu) ret = 1; - else - ret = -1; task_rq_unlock(src_rq, p, &src_rf); -- 2.25.1