lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAKfTPtA5-S_EyzZMDMr9SuVQmWZNdLXOVSLMAMTD+6Bow4jJBQ@mail.gmail.com>
Date:   Thu, 16 Apr 2020 09:42:36 +0200
From:   Vincent Guittot <vincent.guittot@...aro.org>
To:     Valentin Schneider <valentin.schneider@....com>
Cc:     linux-kernel <linux-kernel@...r.kernel.org>,
        Ingo Molnar <mingo@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Dietmar Eggemann <dietmar.eggemann@....com>,
        Ingo Molnar <mingo@...hat.com>,
        Juri Lelli <juri.lelli@...hat.com>,
        Steven Rostedt <rostedt@...dmis.org>
Subject: Re: [PATCH v3 6/9] sched: Kill select_task_rq()'s sd_flag parameter

On Wed, 15 Apr 2020 at 23:05, Valentin Schneider
<valentin.schneider@....com> wrote:
>
> Only select_task_rq_fair() uses that parameter to do an actual domain
> search, other classes only care about what kind of wakeup is happening
> (fork, exec, or "regular") and thus just translate the flag into a wakeup
> type.
>
> WF_TTWU and WF_EXEC have just been added, use these along with WF_FORK to
> encode the wakeup types we care about. This cleans up the API a
> bit, but adds an extra conversion layer within select_task_rq_fair().
>
> Cc: Ingo Molnar <mingo@...hat.com>
> Cc: Peter Zijlstra <peterz@...radead.org>
> Cc: Juri Lelli <juri.lelli@...hat.com>
> Cc: Vincent Guittot <vincent.guittot@...aro.org>
> Cc: Dietmar Eggemann <dietmar.eggemann@....com>
> Cc: Steven Rostedt <rostedt@...dmis.org>
> Signed-off-by: Valentin Schneider <valentin.schneider@....com>
> ---
>  kernel/sched/core.c      | 10 +++++-----
>  kernel/sched/deadline.c  |  4 ++--
>  kernel/sched/fair.c      | 18 +++++++++++++++---
>  kernel/sched/idle.c      |  2 +-
>  kernel/sched/rt.c        |  4 ++--
>  kernel/sched/sched.h     |  2 +-
>  kernel/sched/stop_task.c |  2 +-
>  7 files changed, 27 insertions(+), 15 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 3a61a3b8eaa9..aea9badd397a 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -2094,12 +2094,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
>   * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
>   */
>  static inline
> -int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
> +int select_task_rq(struct task_struct *p, int cpu, int wake_flags)
>  {
>         lockdep_assert_held(&p->pi_lock);
>
>         if (p->nr_cpus_allowed > 1)
> -               cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
> +               cpu = p->sched_class->select_task_rq(p, cpu, wake_flags);
>         else
>                 cpu = cpumask_any(p->cpus_ptr);
>
> @@ -2612,7 +2612,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
>                 atomic_dec(&task_rq(p)->nr_iowait);
>         }
>
> -       cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
> +       cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
>         if (task_cpu(p) != cpu) {
>                 wake_flags |= WF_MIGRATED;
>                 psi_ttwu_dequeue(p);
> @@ -2945,7 +2945,7 @@ void wake_up_new_task(struct task_struct *p)
>          * as we're not fully set-up yet.
>          */
>         p->recent_used_cpu = task_cpu(p);
> -       __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
> +       __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK));
>  #endif
>         rq = __task_rq_lock(p, &rf);
>         update_rq_clock(rq);
> @@ -3486,7 +3486,7 @@ void sched_exec(void)
>         int dest_cpu;
>
>         raw_spin_lock_irqsave(&p->pi_lock, flags);
> -       dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
> +       dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
>         if (dest_cpu == smp_processor_id())
>                 goto unlock;
>
> diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
> index 504d2f51b0d6..0e96b435c51b 100644
> --- a/kernel/sched/deadline.c
> +++ b/kernel/sched/deadline.c
> @@ -1601,12 +1601,12 @@ static void yield_task_dl(struct rq *rq)
>  static int find_later_rq(struct task_struct *task);
>
>  static int
> -select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
> +select_task_rq_dl(struct task_struct *p, int cpu, int flags)
>  {
>         struct task_struct *curr;
>         struct rq *rq;
>
> -       if (sd_flag != SD_BALANCE_WAKE)
> +       if (!(flags & WF_TTWU))
>                 goto out;
>
>         rq = cpu_rq(cpu);
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 3d34b4e4060f..b0bf98e6798b 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -6611,7 +6611,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
>
>  /*
>   * select_task_rq_fair: Select target runqueue for the waking task in domains
> - * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
> + * that have the relevant SD flag set. In practice, this is SD_BALANCE_WAKE,
>   * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
>   *
>   * Balances load by selecting the idlest CPU in the idlest group, or under
> @@ -6622,13 +6622,25 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
>   * preempt must be disabled.
>   */
>  static int
> -select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
> +select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
>  {
> +       int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
>         struct sched_domain *tmp, *sd = NULL;
>         int cpu = smp_processor_id();
>         int new_cpu = prev_cpu;
>         int want_affine = 0;
> -       int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
> +       int sd_flag;
> +
> +       switch (wake_flags & (WF_TTWU | WF_FORK | WF_EXEC)) {

You remove a function parameter, which was directly set with the right
flag, but then you add a switch case to recreate this sd_flag
internally. Not sure we can say that it's real benefit

> +       case WF_TTWU:
> +               sd_flag = SD_BALANCE_WAKE;
> +               break;
> +       case WF_FORK:
> +               sd_flag = SD_BALANCE_FORK;
> +               break;
> +       default:
> +               sd_flag = SD_BALANCE_EXEC;
> +       }
>
>         if (sd_flag & SD_BALANCE_WAKE) {
>                 record_wakee(p);
> diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
> index b743bf38f08f..e9c6a27f0647 100644
> --- a/kernel/sched/idle.c
> +++ b/kernel/sched/idle.c
> @@ -367,7 +367,7 @@ void cpu_startup_entry(enum cpuhp_state state)
>
>  #ifdef CONFIG_SMP
>  static int
> -select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
> +select_task_rq_idle(struct task_struct *p, int cpu, int flags)
>  {
>         return task_cpu(p); /* IDLE tasks as never migrated */
>  }
> diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
> index df11d88c9895..88427ea0231b 100644
> --- a/kernel/sched/rt.c
> +++ b/kernel/sched/rt.c
> @@ -1426,14 +1426,14 @@ static void yield_task_rt(struct rq *rq)
>  static int find_lowest_rq(struct task_struct *task);
>
>  static int
> -select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
> +select_task_rq_rt(struct task_struct *p, int cpu, int flags)
>  {
>         struct task_struct *curr;
>         struct rq *rq;
>         bool test;
>
>         /* For anything but wake ups, just return the task_cpu */
> -       if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
> +       if (!(flags & (WF_TTWU | WF_FORK)))
>                 goto out;
>
>         rq = cpu_rq(cpu);
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index f32c5fa229af..448f5d630544 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1767,7 +1767,7 @@ struct sched_class {
>
>  #ifdef CONFIG_SMP
>         int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
> -       int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
> +       int  (*select_task_rq)(struct task_struct *p, int task_cpu, int flags);
>         void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
>
>         void (*task_woken)(struct rq *this_rq, struct task_struct *task);
> diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
> index 4c9e9975684f..4f061ddf8470 100644
> --- a/kernel/sched/stop_task.c
> +++ b/kernel/sched/stop_task.c
> @@ -11,7 +11,7 @@
>
>  #ifdef CONFIG_SMP
>  static int
> -select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
> +select_task_rq_stop(struct task_struct *p, int cpu, int flags)
>  {
>         return task_cpu(p); /* stop tasks as never migrate */
>  }
> --
> 2.24.0
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ