[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAEXW_YRatZhejsW-o1PXK3V8QxOcc-krvRdjwaDudFSfUotAKw@mail.gmail.com>
Date: Wed, 15 Mar 2023 10:12:05 -0400
From: Joel Fernandes <joel@...lfernandes.org>
To: "Zhuo, Qiuxu" <qiuxu.zhuo@...el.com>
Cc: "paulmck@...nel.org" <paulmck@...nel.org>,
Frederic Weisbecker <frederic@...nel.org>,
Josh Triplett <josh@...htriplett.org>,
Neeraj Upadhyay <quic_neeraju@...cinc.com>,
Davidlohr Bueso <dave@...olabs.net>,
Steven Rostedt <rostedt@...dmis.org>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Lai Jiangshan <jiangshanlai@...il.com>,
"rcu@...r.kernel.org" <rcu@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH 1/1] rcu/rcuscale: Stop kfree_scale_thread thread(s) after
unloading rcuscale
On Wed, Mar 15, 2023 at 10:07 AM Zhuo, Qiuxu <qiuxu.zhuo@...el.com> wrote:
[...]
> >
> > I do applaud minmimizing the size of the patch, but in this case could you
> > please pull the kfree_scale_cleanup() function ahead of its first use?
> >
>
> I thought about it, but was also concerned that would make the patch bigger
> while the effective changes were just only a few lines ...
>
> Pulling the kfree_scale_cleanup() function ahead of rcu_scale_cleanup() also needs
> to pull another two kfree_* variables ahead used by kfree_scale_cleanup().
> This looks like will mess up kfree_* and rcu_scale_* functions/variables in the source file.
>
> How about to pull the rcu_scale_cleanup() function after kfree_scale_cleanup().
> This groups kfree_* functions and groups rcu_scale_* functions.
> Then the code would look cleaner.
> So, do you think the changes below are better?
IMHO, I don't think doing such a code move is better. Just add a new
header file and declare the function there. But see what Paul says
first.
thanks,
- Joel
>
> ---
> kernel/rcu/rcuscale.c | 201 ++++++++++++++++++++++--------------------
> 1 file changed, 103 insertions(+), 98 deletions(-)
>
> diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
> index 91fb5905a008..5a000d26f03e 100644
> --- a/kernel/rcu/rcuscale.c
> +++ b/kernel/rcu/rcuscale.c
> @@ -522,89 +522,6 @@ rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag)
> scale_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
> }
>
> -static void
> -rcu_scale_cleanup(void)
> -{
> - int i;
> - int j;
> - int ngps = 0;
> - u64 *wdp;
> - u64 *wdpp;
> -
> - /*
> - * Would like warning at start, but everything is expedited
> - * during the mid-boot phase, so have to wait till the end.
> - */
> - if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
> - SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
> - if (rcu_gp_is_normal() && gp_exp)
> - SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
> - if (gp_exp && gp_async)
> - SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
> -
> - if (torture_cleanup_begin())
> - return;
> - if (!cur_ops) {
> - torture_cleanup_end();
> - return;
> - }
> -
> - if (reader_tasks) {
> - for (i = 0; i < nrealreaders; i++)
> - torture_stop_kthread(rcu_scale_reader,
> - reader_tasks[i]);
> - kfree(reader_tasks);
> - }
> -
> - if (writer_tasks) {
> - for (i = 0; i < nrealwriters; i++) {
> - torture_stop_kthread(rcu_scale_writer,
> - writer_tasks[i]);
> - if (!writer_n_durations)
> - continue;
> - j = writer_n_durations[i];
> - pr_alert("%s%s writer %d gps: %d\n",
> - scale_type, SCALE_FLAG, i, j);
> - ngps += j;
> - }
> - pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
> - scale_type, SCALE_FLAG,
> - t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
> - t_rcu_scale_writer_finished -
> - t_rcu_scale_writer_started,
> - ngps,
> - rcuscale_seq_diff(b_rcu_gp_test_finished,
> - b_rcu_gp_test_started));
> - for (i = 0; i < nrealwriters; i++) {
> - if (!writer_durations)
> - break;
> - if (!writer_n_durations)
> - continue;
> - wdpp = writer_durations[i];
> - if (!wdpp)
> - continue;
> - for (j = 0; j < writer_n_durations[i]; j++) {
> - wdp = &wdpp[j];
> - pr_alert("%s%s %4d writer-duration: %5d %llu\n",
> - scale_type, SCALE_FLAG,
> - i, j, *wdp);
> - if (j % 100 == 0)
> - schedule_timeout_uninterruptible(1);
> - }
> - kfree(writer_durations[i]);
> - }
> - kfree(writer_tasks);
> - kfree(writer_durations);
> - kfree(writer_n_durations);
> - }
> -
> - /* Do torture-type-specific cleanup operations. */
> - if (cur_ops->cleanup != NULL)
> - cur_ops->cleanup();
> -
> - torture_cleanup_end();
> -}
> -
> /*
> * Return the number if non-negative. If -1, the number of CPUs.
> * If less than -1, that much less than the number of CPUs, but
> @@ -624,21 +541,6 @@ static int compute_real(int n)
> return nr;
> }
>
> -/*
> - * RCU scalability shutdown kthread. Just waits to be awakened, then shuts
> - * down system.
> - */
> -static int
> -rcu_scale_shutdown(void *arg)
> -{
> - wait_event(shutdown_wq,
> - atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
> - smp_mb(); /* Wake before output. */
> - rcu_scale_cleanup();
> - kernel_power_off();
> - return -EINVAL;
> -}
> -
> /*
> * kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number
> * of iterations and measure total time and number of GP for all iterations to complete.
> @@ -875,6 +777,109 @@ kfree_scale_init(void)
> return firsterr;
> }
>
> +static void
> +rcu_scale_cleanup(void)
> +{
> + int i;
> + int j;
> + int ngps = 0;
> + u64 *wdp;
> + u64 *wdpp;
> +
> + /*
> + * Would like warning at start, but everything is expedited
> + * during the mid-boot phase, so have to wait till the end.
> + */
> + if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
> + SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
> + if (rcu_gp_is_normal() && gp_exp)
> + SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
> + if (gp_exp && gp_async)
> + SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
> +
> + if (kfree_rcu_test) {
> + kfree_scale_cleanup();
> + return;
> + }
> +
> + if (torture_cleanup_begin())
> + return;
> + if (!cur_ops) {
> + torture_cleanup_end();
> + return;
> + }
> +
> + if (reader_tasks) {
> + for (i = 0; i < nrealreaders; i++)
> + torture_stop_kthread(rcu_scale_reader,
> + reader_tasks[i]);
> + kfree(reader_tasks);
> + }
> +
> + if (writer_tasks) {
> + for (i = 0; i < nrealwriters; i++) {
> + torture_stop_kthread(rcu_scale_writer,
> + writer_tasks[i]);
> + if (!writer_n_durations)
> + continue;
> + j = writer_n_durations[i];
> + pr_alert("%s%s writer %d gps: %d\n",
> + scale_type, SCALE_FLAG, i, j);
> + ngps += j;
> + }
> + pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
> + scale_type, SCALE_FLAG,
> + t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
> + t_rcu_scale_writer_finished -
> + t_rcu_scale_writer_started,
> + ngps,
> + rcuscale_seq_diff(b_rcu_gp_test_finished,
> + b_rcu_gp_test_started));
> + for (i = 0; i < nrealwriters; i++) {
> + if (!writer_durations)
> + break;
> + if (!writer_n_durations)
> + continue;
> + wdpp = writer_durations[i];
> + if (!wdpp)
> + continue;
> + for (j = 0; j < writer_n_durations[i]; j++) {
> + wdp = &wdpp[j];
> + pr_alert("%s%s %4d writer-duration: %5d %llu\n",
> + scale_type, SCALE_FLAG,
> + i, j, *wdp);
> + if (j % 100 == 0)
> + schedule_timeout_uninterruptible(1);
> + }
> + kfree(writer_durations[i]);
> + }
> + kfree(writer_tasks);
> + kfree(writer_durations);
> + kfree(writer_n_durations);
> + }
> +
> + /* Do torture-type-specific cleanup operations. */
> + if (cur_ops->cleanup != NULL)
> + cur_ops->cleanup();
> +
> + torture_cleanup_end();
> +}
> +
> +/*
> + * RCU scalability shutdown kthread. Just waits to be awakened, then shuts
> + * down system.
> + */
> +static int
> +rcu_scale_shutdown(void *arg)
> +{
> + wait_event(shutdown_wq,
> + atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
> + smp_mb(); /* Wake before output. */
> + rcu_scale_cleanup();
> + kernel_power_off();
> + return -EINVAL;
> +}
> +
> static int __init
> rcu_scale_init(void)
> {
> --
> 2.17.1
>
> > [...]
>
Powered by blists - more mailing lists