[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAKfTPtDXdo16gTPUf8RwXDuESTdn09c2W7J0YCCYjCxg8uuh6A@mail.gmail.com>
Date: Fri, 1 Jun 2018 16:37:32 +0200
From: Vincent Guittot <vincent.guittot@...aro.org>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Ingo Molnar <mingo@...nel.org>,
linux-kernel <linux-kernel@...r.kernel.org>,
Patrick Bellasi <patrick.bellasi@....com>,
viresh kumar <viresh.kumar@...aro.org>,
Thomas Gleixner <tglx@...utronix.de>,
Mike Galbraith <efault@....de>,
Juri Lelli <juri.lelli@...hat.com>,
"# v4 . 16+" <stable@...r.kernel.org>
Subject: Re: [PATCH v2] sched/rt: fix call to cpufreq_update_util
Hi Peter,
On 17 May 2018 at 22:12, Vincent Guittot <vincent.guittot@...aro.org> wrote:
> Le Thursday 17 May 2018 à 11:32:06 (+0200), Peter Zijlstra a écrit :
>> On Thu, May 17, 2018 at 11:05:30AM +0200, Peter Zijlstra wrote:
>> > Hurm.. I think this is also wrong. See how dequeue_rt_stack() is also
>> > called from the enqueue path. Also, nothing calls cpufreq_update_util()
>> > on the throttle path now.
>
> Yes I missed the point that rt_entities were dequeued then re-enqueued when
> a task was either enqueued or dequeued.
>
> That's also means that enqueue_top_rt_rq() is always called when a task is
> enqueued or dequeued and also when groups are throttled or unthrottled
> In fact, the only point where it's not called, is when root rt_rq is
> throttled. So I have prepared the patch below which call cpufreq_update util
> in enqueue_top_rt_rq() and also when we throttle the root rt_rq.
> According to the tests I have done , it seems to work for enqueue/dequeue and
> throttle/unthrottle use cases.
>
> I have re-used your rt_rq_is_runnable() which takes into account rt_queued
What is the status for this problem ?
The patch below conflict with
b29bbbbff0d6 ('sched/cpufreq: always consider blocked FAIR utilization')
that has been merged in tip/sched/core
Do you want me to rebase it ?
Regards,
Vincent
>
> ---
> kernel/sched/cpufreq_schedutil.c | 2 +-
> kernel/sched/rt.c | 16 ++++++++++------
> kernel/sched/sched.h | 5 +++++
> 3 files changed, 16 insertions(+), 7 deletions(-)
>
> diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
> index e13df95..1751f96 100644
> --- a/kernel/sched/cpufreq_schedutil.c
> +++ b/kernel/sched/cpufreq_schedutil.c
> @@ -185,7 +185,7 @@ static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
> struct rq *rq = cpu_rq(sg_cpu->cpu);
> unsigned long util;
>
> - if (rq->rt.rt_nr_running) {
> + if (rt_rq_is_runnable(&rq->rt)) {
> util = sg_cpu->max;
> } else {
> util = sg_cpu->util_dl;
> diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
> index 7aef6b4..d6b9517 100644
> --- a/kernel/sched/rt.c
> +++ b/kernel/sched/rt.c
> @@ -508,8 +508,11 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
>
> rt_se = rt_rq->tg->rt_se[cpu];
>
> - if (!rt_se)
> + if (!rt_se) {
> dequeue_top_rt_rq(rt_rq);
> + /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
> + cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
> + }
> else if (on_rt_rq(rt_se))
> dequeue_rt_entity(rt_se, 0);
> }
> @@ -1001,8 +1004,6 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq)
> sub_nr_running(rq, rt_rq->rt_nr_running);
> rt_rq->rt_queued = 0;
>
> - /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
> - cpufreq_update_util(rq, 0);
> }
>
> static void
> @@ -1014,11 +1015,14 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq)
>
> if (rt_rq->rt_queued)
> return;
> - if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
> +
> + if (rt_rq_throttled(rt_rq))
> return;
>
> - add_nr_running(rq, rt_rq->rt_nr_running);
> - rt_rq->rt_queued = 1;
> + if (rt_rq->rt_nr_running) {
> + add_nr_running(rq, rt_rq->rt_nr_running);
> + rt_rq->rt_queued = 1;
> + }
>
> /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
> cpufreq_update_util(rq, 0);
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index c9895d3..bbebcfe 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -609,6 +609,11 @@ struct rt_rq {
> #endif
> };
>
> +static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
> +{
> + return rt_rq->rt_queued && rt_rq->rt_nr_running;
> +}
> +
> /* Deadline class' related fields in a runqueue */
> struct dl_rq {
> /* runqueue is an rbtree, ordered by deadline */
> --
> 2.7.4
>
>
>
>
>
>> >
>> > And talking about throttle; I think we wanted to check rt_queued in that
>> > case.
>>
>> Bah, clearly you also want to update on unthrottle.. but I think there's
>> more buggered wrt throtteling.
>
>
>
>
Powered by blists - more mailing lists