lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 5 Oct 2016 10:38:25 +0100
From:   Dietmar Eggemann <dietmar.eggemann@....com>
To:     Vincent Guittot <vincent.guittot@...aro.org>,
        <peterz@...radead.org>, <mingo@...nel.org>,
        <linux-kernel@...r.kernel.org>, <kernellwp@...il.com>
CC:     <yuyang.du@...el.com>, <Morten.Rasmussen@....com>,
        <linaro-kernel@...ts.linaro.org>, <pjt@...gle.com>,
        <bsegall@...gle.com>
Subject: Re: [PATCH 1/7 v4] sched: factorize attach entity

On 09/26/2016 01:19 PM, Vincent Guittot wrote:
> Factorize post_init_entity_util_avg and part of attach_task_cfs_rq
> in one function attach_entity_cfs_rq
>
> Signed-off-by: Vincent Guittot <vincent.guittot@...aro.org>
> ---
>  kernel/sched/fair.c | 19 +++++++++++--------
>  1 file changed, 11 insertions(+), 8 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 986c10c..e8ed8d1 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -697,9 +697,7 @@ void init_entity_runnable_average(struct sched_entity *se)
>  }
>
>  static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
> -static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq);
> -static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force);
> -static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se);
> +static void attach_entity_cfs_rq(struct sched_entity *se);
>
>  /*
>   * With new tasks being created, their initial util_avgs are extrapolated
> @@ -764,9 +762,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
>               }
>       }

You now could move the 'u64 now = cfs_rq_clock_task(cfs_rq);' into the
if condition to handle !fair_sched_class tasks.

> -     update_cfs_rq_load_avg(now, cfs_rq, false);
> -     attach_entity_load_avg(cfs_rq, se);
> -     update_tg_load_avg(cfs_rq, false);
> +     attach_entity_cfs_rq(se);
>  }
>
>  #else /* !CONFIG_SMP */
> @@ -8501,9 +8497,8 @@ static void detach_task_cfs_rq(struct task_struct *p)
>       update_tg_load_avg(cfs_rq, false);
>  }
>
> -static void attach_task_cfs_rq(struct task_struct *p)
> +static void attach_entity_cfs_rq(struct sched_entity *se)
>  {
> -     struct sched_entity *se = &p->se;
>       struct cfs_rq *cfs_rq = cfs_rq_of(se);

Both callers of attach_entity_cfs_rq() already use cfs_rq_of(se). You
could pass it into attach_entity_cfs_rq().

>       u64 now = cfs_rq_clock_task(cfs_rq);
> @@ -8519,6 +8514,14 @@ static void attach_task_cfs_rq(struct task_struct *p)

The old comment /* Synchronize task ... */ should be changed to /*
Synchronize entity ... */

>       update_cfs_rq_load_avg(now, cfs_rq, false);
>       attach_entity_load_avg(cfs_rq, se);
>       update_tg_load_avg(cfs_rq, false);
> +}
> +
> +static void attach_task_cfs_rq(struct task_struct *p)
> +{
> +     struct sched_entity *se = &p->se;
> +     struct cfs_rq *cfs_rq = cfs_rq_of(se);
> +
> +     attach_entity_cfs_rq(se);
>
>       if (!vruntime_normalized(p))
>               se->vruntime += cfs_rq->min_vruntime;
>

IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ