[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <bec8776a-f0d4-2ec3-4455-9976ad87775e@huaweicloud.com>
Date: Fri, 21 Feb 2025 17:55:53 +0800
From: Yu Kuai <yukuai1@...weicloud.com>
To: Zheng Qixing <zhengqixing@...weicloud.com>, axboe@...nel.dk,
song@...nel.org, colyli@...nel.org, dan.j.williams@...el.com,
vishal.l.verma@...el.com, dave.jiang@...el.com, ira.weiny@...el.com,
dlemoal@...nel.org, yanjun.zhu@...ux.dev, kch@...dia.com, hare@...e.de,
zhengqixing@...wei.com, john.g.garry@...cle.com, geliang@...nel.org,
xni@...hat.com, colyli@...e.de
Cc: linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-raid@...r.kernel.org, nvdimm@...ts.linux.dev, yi.zhang@...wei.com,
yangerkun@...wei.com, Li Nan <linan122@...wei.com>,
"yukuai (C)" <yukuai3@...wei.com>
Subject: Re: [PATCH 04/12] badblocks: return error directly when setting
badblocks exceeds 512
Hi,
+CC Linan
在 2025/02/21 16:11, Zheng Qixing 写道:
> From: Li Nan <linan122@...wei.com>
>
> In the current handling of badblocks settings, a lot of processing has
> been done for scenarios where the number of badblocks exceeds 512.
> This makes the code look quite complex and also introduces some issues,
It's better to add explanations about these issues here.
>
> Fixing those issues wouldn’t be too complicated, but it wouldn’t
> simplify the code. In fact, a disk shouldn’t have too many badblocks,
> and for disks with 512 badblocks, attempting to set more bad blocks
> doesn’t make much sense. At that point, the more appropriate action
> would be to replace the disk. Therefore, to resolve these issues and
> simplify the code somewhat, return error directly when setting badblocks
> exceeds 512.
>
> Fixes: aa511ff8218b ("badblocks: switch to the improved badblock handling code")
> Signed-off-by: Li Nan <linan122@...wei.com>
> ---
> block/badblocks.c | 121 ++++++++--------------------------------------
> 1 file changed, 19 insertions(+), 102 deletions(-)
>
Reviewed-by: Yu Kuai <yukuai3@...wei.com>
> diff --git a/block/badblocks.c b/block/badblocks.c
> index ad8652fbe1c8..1c8b8f65f6df 100644
> --- a/block/badblocks.c
> +++ b/block/badblocks.c
> @@ -527,51 +527,6 @@ static int prev_badblocks(struct badblocks *bb, struct badblocks_context *bad,
> return ret;
> }
>
> -/*
> - * Return 'true' if the range indicated by 'bad' can be backward merged
> - * with the bad range (from the bad table) index by 'behind'.
> - */
> -static bool can_merge_behind(struct badblocks *bb,
> - struct badblocks_context *bad, int behind)
> -{
> - sector_t sectors = bad->len;
> - sector_t s = bad->start;
> - u64 *p = bb->page;
> -
> - if ((s < BB_OFFSET(p[behind])) &&
> - ((s + sectors) >= BB_OFFSET(p[behind])) &&
> - ((BB_END(p[behind]) - s) <= BB_MAX_LEN) &&
> - BB_ACK(p[behind]) == bad->ack)
> - return true;
> - return false;
> -}
> -
> -/*
> - * Do backward merge for range indicated by 'bad' and the bad range
> - * (from the bad table) indexed by 'behind'. The return value is merged
> - * sectors from bad->len.
> - */
> -static int behind_merge(struct badblocks *bb, struct badblocks_context *bad,
> - int behind)
> -{
> - sector_t sectors = bad->len;
> - sector_t s = bad->start;
> - u64 *p = bb->page;
> - int merged = 0;
> -
> - WARN_ON(s >= BB_OFFSET(p[behind]));
> - WARN_ON((s + sectors) < BB_OFFSET(p[behind]));
> -
> - if (s < BB_OFFSET(p[behind])) {
> - merged = BB_OFFSET(p[behind]) - s;
> - p[behind] = BB_MAKE(s, BB_LEN(p[behind]) + merged, bad->ack);
> -
> - WARN_ON((BB_LEN(p[behind]) + merged) >= BB_MAX_LEN);
> - }
> -
> - return merged;
> -}
> -
> /*
> * Return 'true' if the range indicated by 'bad' can be forward
> * merged with the bad range (from the bad table) indexed by 'prev'.
> @@ -884,11 +839,9 @@ static bool try_adjacent_combine(struct badblocks *bb, int prev)
> static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors,
> int acknowledged)
> {
> - int retried = 0, space_desired = 0;
> - int orig_len, len = 0, added = 0;
> + int len = 0, added = 0;
> struct badblocks_context bad;
> int prev = -1, hint = -1;
> - sector_t orig_start;
> unsigned long flags;
> int rv = 0;
> u64 *p;
> @@ -912,8 +865,6 @@ static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors,
>
> write_seqlock_irqsave(&bb->lock, flags);
>
> - orig_start = s;
> - orig_len = sectors;
> bad.ack = acknowledged;
> p = bb->page;
>
> @@ -922,6 +873,11 @@ static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors,
> bad.len = sectors;
> len = 0;
>
> + if (badblocks_full(bb)) {
> + rv = 1;
> + goto out;
> + }
> +
> if (badblocks_empty(bb)) {
> len = insert_at(bb, 0, &bad);
> bb->count++;
> @@ -933,32 +889,14 @@ static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors,
>
> /* start before all badblocks */
> if (prev < 0) {
> - if (!badblocks_full(bb)) {
> - /* insert on the first */
> - if (bad.len > (BB_OFFSET(p[0]) - bad.start))
> - bad.len = BB_OFFSET(p[0]) - bad.start;
> - len = insert_at(bb, 0, &bad);
> - bb->count++;
> - added++;
> - hint = 0;
> - goto update_sectors;
> - }
> -
> - /* No sapce, try to merge */
> - if (overlap_behind(bb, &bad, 0)) {
> - if (can_merge_behind(bb, &bad, 0)) {
> - len = behind_merge(bb, &bad, 0);
> - added++;
> - } else {
> - len = BB_OFFSET(p[0]) - s;
> - space_desired = 1;
> - }
> - hint = 0;
> - goto update_sectors;
> - }
> -
> - /* no table space and give up */
> - goto out;
> + /* insert on the first */
> + if (bad.len > (BB_OFFSET(p[0]) - bad.start))
> + bad.len = BB_OFFSET(p[0]) - bad.start;
> + len = insert_at(bb, 0, &bad);
> + bb->count++;
> + added++;
> + hint = 0;
> + goto update_sectors;
> }
>
> /* in case p[prev-1] can be merged with p[prev] */
> @@ -978,6 +916,11 @@ static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors,
> int extra = 0;
>
> if (!can_front_overwrite(bb, prev, &bad, &extra)) {
> + if (extra > 0) {
> + rv = 1;
> + goto out;
> + }
> +
> len = min_t(sector_t,
> BB_END(p[prev]) - s, sectors);
> hint = prev;
> @@ -1004,24 +947,6 @@ static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors,
> goto update_sectors;
> }
>
> - /* if no space in table, still try to merge in the covered range */
> - if (badblocks_full(bb)) {
> - /* skip the cannot-merge range */
> - if (((prev + 1) < bb->count) &&
> - overlap_behind(bb, &bad, prev + 1) &&
> - ((s + sectors) >= BB_END(p[prev + 1]))) {
> - len = BB_END(p[prev + 1]) - s;
> - hint = prev + 1;
> - goto update_sectors;
> - }
> -
> - /* no retry any more */
> - len = sectors;
> - space_desired = 1;
> - hint = -1;
> - goto update_sectors;
> - }
> -
> /* cannot merge and there is space in bad table */
> if ((prev + 1) < bb->count &&
> overlap_behind(bb, &bad, prev + 1))
> @@ -1049,14 +974,6 @@ static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors,
> */
> try_adjacent_combine(bb, prev);
>
> - if (space_desired && !badblocks_full(bb)) {
> - s = orig_start;
> - sectors = orig_len;
> - space_desired = 0;
> - if (retried++ < 3)
> - goto re_insert;
> - }
> -
> out:
> if (added) {
> set_changed(bb);
>
Powered by blists - more mailing lists