lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAEXW_YTHNbzdSn3RpJwnwKZOJX=rGdMabfsCw0ZuMZhsXZMjQA@mail.gmail.com>
Date:   Wed, 10 May 2023 22:23:19 -0700
From:   Joel Fernandes <joel@...lfernandes.org>
To:     "Paul E. McKenney" <paulmck@...nel.org>
Cc:     rcu@...r.kernel.org, linux-kernel@...r.kernel.org,
        kernel-team@...a.com, rostedt@...dmis.org,
        Qiuxu Zhuo <qiuxu.zhuo@...el.com>
Subject: Re: [PATCH rcu 3/6] rcu/rcuscale: Move rcu_scale_*() after kfree_scale_cleanup()

On Wed, May 10, 2023 at 10:12 AM Paul E. McKenney <paulmck@...nel.org> wrote:
>
> From: Qiuxu Zhuo <qiuxu.zhuo@...el.com>
>
> This code-movement-only commit moves the rcu_scale_cleanup() and
> rcu_scale_shutdown() functions to follow kfree_scale_cleanup().
> This is code movement is in preparation for a bug-fix patch that invokes
> kfree_scale_cleanup() from rcu_scale_cleanup().
>
> Signed-off-by: Qiuxu Zhuo <qiuxu.zhuo@...el.com>
> Signed-off-by: Paul E. McKenney <paulmck@...nel.org>
> ---
>  kernel/rcu/rcuscale.c | 194 +++++++++++++++++++++---------------------
>  1 file changed, 97 insertions(+), 97 deletions(-)

I wish diff was better at showing what really changed. The meld tool
can help but its gui...

Should I run meld later (I'm out at a conference so no access to
meld-capable machines) or are we sufficiently confident that the lines
were moved as-is ? :)

 - Joel


>
> diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
> index e82ec9f9a5d8..7e8965b0827a 100644
> --- a/kernel/rcu/rcuscale.c
> +++ b/kernel/rcu/rcuscale.c
> @@ -522,89 +522,6 @@ rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag)
>                  scale_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
>  }
>
> -static void
> -rcu_scale_cleanup(void)
> -{
> -       int i;
> -       int j;
> -       int ngps = 0;
> -       u64 *wdp;
> -       u64 *wdpp;
> -
> -       /*
> -        * Would like warning at start, but everything is expedited
> -        * during the mid-boot phase, so have to wait till the end.
> -        */
> -       if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
> -               SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
> -       if (rcu_gp_is_normal() && gp_exp)
> -               SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
> -       if (gp_exp && gp_async)
> -               SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
> -
> -       if (torture_cleanup_begin())
> -               return;
> -       if (!cur_ops) {
> -               torture_cleanup_end();
> -               return;
> -       }
> -
> -       if (reader_tasks) {
> -               for (i = 0; i < nrealreaders; i++)
> -                       torture_stop_kthread(rcu_scale_reader,
> -                                            reader_tasks[i]);
> -               kfree(reader_tasks);
> -       }
> -
> -       if (writer_tasks) {
> -               for (i = 0; i < nrealwriters; i++) {
> -                       torture_stop_kthread(rcu_scale_writer,
> -                                            writer_tasks[i]);
> -                       if (!writer_n_durations)
> -                               continue;
> -                       j = writer_n_durations[i];
> -                       pr_alert("%s%s writer %d gps: %d\n",
> -                                scale_type, SCALE_FLAG, i, j);
> -                       ngps += j;
> -               }
> -               pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
> -                        scale_type, SCALE_FLAG,
> -                        t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
> -                        t_rcu_scale_writer_finished -
> -                        t_rcu_scale_writer_started,
> -                        ngps,
> -                        rcuscale_seq_diff(b_rcu_gp_test_finished,
> -                                          b_rcu_gp_test_started));
> -               for (i = 0; i < nrealwriters; i++) {
> -                       if (!writer_durations)
> -                               break;
> -                       if (!writer_n_durations)
> -                               continue;
> -                       wdpp = writer_durations[i];
> -                       if (!wdpp)
> -                               continue;
> -                       for (j = 0; j < writer_n_durations[i]; j++) {
> -                               wdp = &wdpp[j];
> -                               pr_alert("%s%s %4d writer-duration: %5d %llu\n",
> -                                       scale_type, SCALE_FLAG,
> -                                       i, j, *wdp);
> -                               if (j % 100 == 0)
> -                                       schedule_timeout_uninterruptible(1);
> -                       }
> -                       kfree(writer_durations[i]);
> -               }
> -               kfree(writer_tasks);
> -               kfree(writer_durations);
> -               kfree(writer_n_durations);
> -       }
> -
> -       /* Do torture-type-specific cleanup operations.  */
> -       if (cur_ops->cleanup != NULL)
> -               cur_ops->cleanup();
> -
> -       torture_cleanup_end();
> -}
> -
>  /*
>   * Return the number if non-negative.  If -1, the number of CPUs.
>   * If less than -1, that much less than the number of CPUs, but
> @@ -624,20 +541,6 @@ static int compute_real(int n)
>         return nr;
>  }
>
> -/*
> - * RCU scalability shutdown kthread.  Just waits to be awakened, then shuts
> - * down system.
> - */
> -static int
> -rcu_scale_shutdown(void *arg)
> -{
> -       wait_event_idle(shutdown_wq, atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
> -       smp_mb(); /* Wake before output. */
> -       rcu_scale_cleanup();
> -       kernel_power_off();
> -       return -EINVAL;
> -}
> -
>  /*
>   * kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number
>   * of iterations and measure total time and number of GP for all iterations to complete.
> @@ -874,6 +777,103 @@ kfree_scale_init(void)
>         return firsterr;
>  }
>
> +static void
> +rcu_scale_cleanup(void)
> +{
> +       int i;
> +       int j;
> +       int ngps = 0;
> +       u64 *wdp;
> +       u64 *wdpp;
> +
> +       /*
> +        * Would like warning at start, but everything is expedited
> +        * during the mid-boot phase, so have to wait till the end.
> +        */
> +       if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
> +               SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
> +       if (rcu_gp_is_normal() && gp_exp)
> +               SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
> +       if (gp_exp && gp_async)
> +               SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
> +
> +       if (torture_cleanup_begin())
> +               return;
> +       if (!cur_ops) {
> +               torture_cleanup_end();
> +               return;
> +       }
> +
> +       if (reader_tasks) {
> +               for (i = 0; i < nrealreaders; i++)
> +                       torture_stop_kthread(rcu_scale_reader,
> +                                            reader_tasks[i]);
> +               kfree(reader_tasks);
> +       }
> +
> +       if (writer_tasks) {
> +               for (i = 0; i < nrealwriters; i++) {
> +                       torture_stop_kthread(rcu_scale_writer,
> +                                            writer_tasks[i]);
> +                       if (!writer_n_durations)
> +                               continue;
> +                       j = writer_n_durations[i];
> +                       pr_alert("%s%s writer %d gps: %d\n",
> +                                scale_type, SCALE_FLAG, i, j);
> +                       ngps += j;
> +               }
> +               pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
> +                        scale_type, SCALE_FLAG,
> +                        t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
> +                        t_rcu_scale_writer_finished -
> +                        t_rcu_scale_writer_started,
> +                        ngps,
> +                        rcuscale_seq_diff(b_rcu_gp_test_finished,
> +                                          b_rcu_gp_test_started));
> +               for (i = 0; i < nrealwriters; i++) {
> +                       if (!writer_durations)
> +                               break;
> +                       if (!writer_n_durations)
> +                               continue;
> +                       wdpp = writer_durations[i];
> +                       if (!wdpp)
> +                               continue;
> +                       for (j = 0; j < writer_n_durations[i]; j++) {
> +                               wdp = &wdpp[j];
> +                               pr_alert("%s%s %4d writer-duration: %5d %llu\n",
> +                                       scale_type, SCALE_FLAG,
> +                                       i, j, *wdp);
> +                               if (j % 100 == 0)
> +                                       schedule_timeout_uninterruptible(1);
> +                       }
> +                       kfree(writer_durations[i]);
> +               }
> +               kfree(writer_tasks);
> +               kfree(writer_durations);
> +               kfree(writer_n_durations);
> +       }
> +
> +       /* Do torture-type-specific cleanup operations.  */
> +       if (cur_ops->cleanup != NULL)
> +               cur_ops->cleanup();
> +
> +       torture_cleanup_end();
> +}
> +
> +/*
> + * RCU scalability shutdown kthread.  Just waits to be awakened, then shuts
> + * down system.
> + */
> +static int
> +rcu_scale_shutdown(void *arg)
> +{
> +       wait_event_idle(shutdown_wq, atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
> +       smp_mb(); /* Wake before output. */
> +       rcu_scale_cleanup();
> +       kernel_power_off();
> +       return -EINVAL;
> +}
> +
>  static int __init
>  rcu_scale_init(void)
>  {
> --
> 2.40.1
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ