[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20221026035724.GA21523@ranerica-svr.sc.intel.com>
Date: Tue, 25 Oct 2022 20:57:24 -0700
From: Ricardo Neri <ricardo.neri-calderon@...ux.intel.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Ricardo Neri <ricardo.neri@...el.com>,
"Ravi V. Shankar" <ravi.v.shankar@...el.com>,
Ben Segall <bsegall@...gle.com>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Len Brown <len.brown@...el.com>, Mel Gorman <mgorman@...e.de>,
"Rafael J. Wysocki" <rafael.j.wysocki@...el.com>,
Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>,
Steven Rostedt <rostedt@...dmis.org>,
Tim Chen <tim.c.chen@...ux.intel.com>,
Valentin Schneider <vschneid@...hat.com>, x86@...nel.org,
linux-kernel@...r.kernel.org, "Tim C . Chen" <tim.c.chen@...el.com>
Subject: Re: [RFC PATCH 08/23] sched/fair: Compute task-class performance
scores for load balancing
On Tue, Sep 27, 2022 at 11:15:46AM +0200, Peter Zijlstra wrote:
> On Fri, Sep 09, 2022 at 04:11:50PM -0700, Ricardo Neri wrote:
>
> > +static void compute_ilb_sg_task_class_scores(struct sg_lb_task_class_stats *class_sgs,
> > + struct sg_lb_stats *sgs,
> > + int dst_cpu)
> > +{
> > + int group_score, group_score_without, score_on_dst_cpu;
> > + int busy_cpus = sgs->group_weight - sgs->idle_cpus;
> > +
> > + if (!sched_task_classes_enabled())
> > + return;
> > +
> > + /* No busy CPUs in the group. No tasks to move. */
> > + if (!busy_cpus)
> > + return;
> > +
> > + score_on_dst_cpu = arch_get_task_class_score(class_sgs->p_min_score->class,
> > + dst_cpu);
> > +
> > + /*
> > + * The simpest case. The single busy CPU in the current group will
> > + * become idle after pulling its current task. The destination CPU is
> > + * idle.
> > + */
> > + if (busy_cpus == 1) {
> > + sgs->task_class_score_before = class_sgs->sum_score;
> > + sgs->task_class_score_after = score_on_dst_cpu;
> > + return;
> > + }
> > +
> > + /*
> > + * Now compute the group score with and without the task with the
> > + * lowest score. We assume that the tasks that remain in the group share
> > + * the CPU resources equally.
> > + */
> > + group_score = class_sgs->sum_score / busy_cpus;
> > +
> > + group_score_without = (class_sgs->sum_score - class_sgs->min_score) /
> > + (busy_cpus - 1);
> > +
> > + sgs->task_class_score_after = group_score_without + score_on_dst_cpu;
> > + sgs->task_class_score_before = group_score;
> > +}
(I am sorry Peter, I just found that several emails were sitting on my drafts
directory).
>
> That's just plain broken; also lots of cleanups done...
Thank you very much for your suggestions. They make sense to me. I only
have a comment...
Do you want me to add your Signed-off-by and Co-developed-by tags?
>
> ---
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -8405,12 +8405,14 @@ struct sg_lb_stats {
> enum group_type group_type;
> unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */
> unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
> - long task_class_score_after; /* Prospective task-class score after load balancing */
> - long task_class_score_before; /* Task-class score before load balancing */
> #ifdef CONFIG_NUMA_BALANCING
> unsigned int nr_numa_running;
> unsigned int nr_preferred_running;
> #endif
> +#ifdef CONFIG_SCHED_TASK_CLASSES
> + long task_class_score_after; /* Prospective task-class score after load balancing */
> + long task_class_score_before; /* Task-class score before load balancing */
> +#endif
> };
>
> /*
> @@ -8689,58 +8691,54 @@ group_type group_classify(unsigned int i
> }
>
> struct sg_lb_task_class_stats {
> - /*
> - * Score of the task with lowest score among the current tasks (i.e.,
> - * runqueue::curr) of all runqueues in the scheduling group.
> - */
> - int min_score;
> - /*
> - * Sum of the scores of the current tasks of all runqueues in the
> - * scheduling group.
> - */
> - long sum_score;
> - /* The task with score equal to @min_score */
> - struct task_struct *p_min_score;
> + int min_score; /* Min(rq->curr->score) */
> + int min_class;
> + long sum_score; /* Sum(rq->curr->score) */
> };
>
> #ifdef CONFIG_SCHED_TASK_CLASSES
> -static void init_rq_task_classes_stats(struct sg_lb_task_class_stats *class_sgs)
> +static void init_sg_lb_task_class_stats(struct sg_lb_task_class_stats *sgcs)
> {
> - class_sgs->min_score = INT_MAX;
> - class_sgs->sum_score = 0;
> - class_sgs->p_min_score = NULL;
> + *sgcs = (struct sg_lb_task_class_stats){
> + .min_score = INT_MAX,
> + };
> }
>
> /** Called only if cpu_of(@rq) is not idle and has tasks running. */
> -static void update_rq_task_classes_stats(struct sg_lb_task_class_stats *class_sgs,
> - struct rq *rq)
> +static void update_sg_lb_task_class_stats(struct sg_lb_task_class_stats *sgcs,
> + struct rq *rq)
> {
> - int score;
> + struct task_struct *curr;
> + int class, score;
>
> if (!sched_task_classes_enabled())
> return;
>
> + curr = rcu_dereference(rq->curr);
> + if (!curr || (curr->flags & PF_EXITING) || is_idle_task(curr))
> + return;
> +
> /*
> * TODO: if nr_running > 1 we may want go through all the tasks behind
> * rq->curr.
> */
> - score = arch_get_task_class_score(rq->curr->class, cpu_of(rq));
> -
> - class_sgs->sum_score += score;
> + class = curr->class;
> + score = arch_get_task_class_score(class, cpu_of(rq));
>
> - if (score >= class_sgs->min_score)
> - return;
> + sgcs->sum_score += score;
>
> - class_sgs->min_score = score;
> - class_sgs->p_min_score = rq->curr;
> + if (score < sgcs->min_score) {
> + sgcs->min_score = score;
> + sgcs->min_class = class;
> + }
> }
>
> -static void compute_ilb_sg_task_class_scores(struct sg_lb_task_class_stats *class_sgs,
> - struct sg_lb_stats *sgs,
> - int dst_cpu)
> +static void update_sg_lb_stats_scores(struct sg_lb_task_class_stats *sgcs,
> + struct sg_lb_stats *sgs,
> + int dst_cpu)
> {
> - int group_score, group_score_without, score_on_dst_cpu;
> int busy_cpus = sgs->group_weight - sgs->idle_cpus;
> + long before, after;
>
> if (!sched_task_classes_enabled())
> return;
> @@ -8749,32 +8747,18 @@ static void compute_ilb_sg_task_class_sc
> if (!busy_cpus)
> return;
>
> - score_on_dst_cpu = arch_get_task_class_score(class_sgs->p_min_score->class,
> - dst_cpu);
> + score_on_dst_cpu = arch_get_task_class_score(sgcs->min_class, dst_cpu);
>
> - /*
> - * The simpest case. The single busy CPU in the current group will
> - * become idle after pulling its current task. The destination CPU is
> - * idle.
> - */
> - if (busy_cpus == 1) {
> - sgs->task_class_score_before = class_sgs->sum_score;
> - sgs->task_class_score_after = score_on_dst_cpu;
> - return;
> - }
> + before = sgcs->sum_score
> + after = before - sgcs->min_score + score_on_dst_cpu;
This works when the sched group being evaluated has only one busy CPU
because it will become idle if the destination CPU (which was idle) pulls
the current task.
>
> - /*
> - * Now compute the group score with and without the task with the
> - * lowest score. We assume that the tasks that remain in the group share
> - * the CPU resources equally.
> - */
> - group_score = class_sgs->sum_score / busy_cpus;
> -
> - group_score_without = (class_sgs->sum_score - class_sgs->min_score) /
> - (busy_cpus - 1);
> + if (busy_cpus > 1) {
> + before /= busy_cpus;
> + after /= busy_cpus;
However, I don't think this works when the sched group has more than one
busy CPU. 'before' and 'after' reflect the total throughput score of both
the sched group *and* the destination CPU.
One of the CPUs in the sched group will become idle after the balance.
Also, at this point we have already added score_on_dst_cpu. We are incorrectly
scaling it by the number of busy CPUs in the sched group.
We instead must scale 'after' by busy_cpus - 1 and then add score_on_dst_cpu.
> + }
>
> - sgs->task_class_score_after = group_score_without + score_on_dst_cpu;
> - sgs->task_class_score_before = group_score;
> + sgs->task_class_score_before = before;
> + sgs->task_class_score_after = after;
>
Thanks and BR,
Ricardo
>
Powered by blists - more mailing lists