[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YuKZmloAcZWY5of8@infradead.org>
Date: Thu, 28 Jul 2022 07:13:46 -0700
From: Christoph Hellwig <hch@...radead.org>
To: Logan Gunthorpe <logang@...tatee.com>
Cc: linux-kernel@...r.kernel.org, linux-raid@...r.kernel.org,
Song Liu <song@...nel.org>,
Christoph Hellwig <hch@...radead.org>,
Guoqing Jiang <guoqing.jiang@...ux.dev>,
Stephen Bates <sbates@...thlin.com>,
Martin Oliveira <Martin.Oliveira@...eticom.com>,
David Sloan <David.Sloan@...eticom.com>
Subject: Re: [PATCH 1/5] md/raid5: Refactor raid5_get_active_stripe()
On Wed, Jul 27, 2022 at 03:05:56PM -0600, Logan Gunthorpe wrote:
> Refactor the raid5_get_active_stripe() to read more linearly in
> the order it's typically executed.
>
> The init_stripe() call is called if a free stripe is found and the
> function is exited early which removes a lot of if (sh) checks and
> unindents the following code.
>
> Remove the while loop in favour of the 'goto retry' pattern, which
> reduces indentation further. And use a 'goto wait_for_stripe' instead
> of an additional indent seeing it is the unusual path and this makes
> the code easier to read.
>
> No functional changes intended. Will make subsequent changes
> in patches easier to understand.
I find the new loop even more confusing than the old one. I'd go
with something like the version below (on top of the whol md-next tree
that pulled this in way too fast..)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4456ac51f7c53..cd8ec4995a49b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -811,54 +811,54 @@ static struct stripe_head *__raid5_get_active_stripe(struct r5conf *conf,
spin_lock_irq(conf->hash_locks + hash);
-retry:
- if (!noquiesce && conf->quiesce) {
- /*
- * Must release the reference to batch_last before waiting,
- * on quiesce, otherwise the batch_last will hold a reference
- * to a stripe and raid5_quiesce() will deadlock waiting for
- * active_stripes to go to zero.
- */
- if (ctx && ctx->batch_last) {
- raid5_release_stripe(ctx->batch_last);
- ctx->batch_last = NULL;
- }
-
- wait_event_lock_irq(conf->wait_for_quiescent, !conf->quiesce,
- *(conf->hash_locks + hash));
- }
+ for (;;) {
+ if (!noquiesce && conf->quiesce) {
+ /*
+ * Must release the reference to batch_last before
+ * waiting on quiesce, otherwise the batch_last will
+ * hold a reference to a stripe and raid5_quiesce()
+ * will deadlock waiting for active_stripes to go to
+ * zero.
+ */
+ if (ctx && ctx->batch_last) {
+ raid5_release_stripe(ctx->batch_last);
+ ctx->batch_last = NULL;
+ }
- sh = find_get_stripe(conf, sector, conf->generation - previous, hash);
- if (sh)
- goto out;
+ wait_event_lock_irq(conf->wait_for_quiescent,
+ !conf->quiesce,
+ *(conf->hash_locks + hash));
+ }
- if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
- goto wait_for_stripe;
+ sh = find_get_stripe(conf, sector, conf->generation - previous,
+ hash);
+ if (sh)
+ break;
- sh = get_free_stripe(conf, hash);
- if (sh) {
- r5c_check_stripe_cache_usage(conf);
- init_stripe(sh, sector, previous);
- atomic_inc(&sh->count);
- goto out;
- }
+ if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
+ sh = get_free_stripe(conf, hash);
+ if (sh) {
+ r5c_check_stripe_cache_usage(conf);
+ init_stripe(sh, sector, previous);
+ atomic_inc(&sh->count);
+ break;
+ }
- if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
- set_bit(R5_ALLOC_MORE, &conf->cache_state);
+ if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
+ set_bit(R5_ALLOC_MORE, &conf->cache_state);
+ }
-wait_for_stripe:
- if (noblock)
- goto out;
+ if (noblock)
+ break;
- set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
- r5l_wake_reclaim(conf->log, 0);
- wait_event_lock_irq(conf->wait_for_stripe,
- is_inactive_blocked(conf, hash),
- *(conf->hash_locks + hash));
- clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
- goto retry;
+ set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ r5l_wake_reclaim(conf->log, 0);
+ wait_event_lock_irq(conf->wait_for_stripe,
+ is_inactive_blocked(conf, hash),
+ *(conf->hash_locks + hash));
+ clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ }
-out:
spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
Powered by blists - more mailing lists