[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230803132426.2688608-14-yukuai1@huaweicloud.com>
Date: Thu, 3 Aug 2023 21:24:26 +0800
From: Yu Kuai <yukuai1@...weicloud.com>
To: song@...nel.org, xni@...hat.com
Cc: linux-raid@...r.kernel.org, linux-kernel@...r.kernel.org,
yukuai3@...wei.com, yukuai1@...weicloud.com, yi.zhang@...wei.com,
yangerkun@...wei.com
Subject: [PATCH -next 13/13] md: delay remove_and_add_spares() for read only array to md_start_sync()
From: Yu Kuai <yukuai3@...wei.com>
Before this patch, for read-only array:
md_check_recovery() check that 'MD_RECOVERY_NEEDED' is set, then it will
call remove_and_add_spares() directly to try to remove and add rdevs
from array.
After this patch:
1) md_check_recovery() check that 'MD_RECOVERY_NEEDED' is set, and the
worker 'sync_work' is not pending, and there are rdevs can be added
or removed, then it will queue new work md_start_sync();
2) md_start_sync() will call remove_and_add_spares() and exist;
This change make sure that array reconfiguration is independent from
daemon, and it'll be much easier to synchronize it with io, consier
that io may rely on daemon thread to be done.
Signed-off-by: Yu Kuai <yukuai3@...wei.com>
---
drivers/md/md.c | 37 +++++++++++++++++++++++++++----------
1 file changed, 27 insertions(+), 10 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ef88581d9a39..f6e024c15530 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -9188,6 +9188,16 @@ static bool rdev_addable(struct md_rdev *rdev)
return true;
}
+static bool md_spares_need_change(struct mddev *mddev)
+{
+ struct md_rdev *rdev;
+
+ rdev_for_each(rdev, mddev)
+ if (rdev_removeable(rdev) || rdev_addable(rdev))
+ return true;
+ return false;
+}
+
static bool rdev_is_spare(struct md_rdev *rdev)
{
return !test_bit(Candidate, &rdev->flags) && rdev->raid_disk >= 0 &&
@@ -9265,6 +9275,12 @@ static void md_start_sync(struct work_struct *ws)
mddev_lock_nointr(mddev);
+ if (!md_is_rdwr(mddev)) {
+ remove_and_add_spares(mddev);
+ mddev_unlock(mddev);
+ return;
+ }
+
/*
* No recovery is running, remove any failed drives, then add spares if
* possible. Spares are also removed and re-added, to allow the
@@ -9381,7 +9397,8 @@ void md_check_recovery(struct mddev *mddev)
}
if (!md_is_rdwr(mddev) &&
- !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ (!test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
+ work_pending(&mddev->sync_work)))
return;
if ( ! (
(mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
@@ -9409,15 +9426,8 @@ void md_check_recovery(struct mddev *mddev)
*/
rdev_for_each(rdev, mddev)
clear_bit(Blocked, &rdev->flags);
- /* On a read-only array we can:
- * - remove failed devices
- * - add already-in_sync devices if the array itself
- * is in-sync.
- * As we only add devices that are already in-sync,
- * we can activate the spares immediately.
- */
- remove_and_add_spares(mddev);
- /* There is no thread, but we need to call
+ /*
+ * There is no thread, but we need to call
* ->spare_active and clear saved_raid_disk
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
@@ -9425,6 +9435,13 @@ void md_check_recovery(struct mddev *mddev)
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
+
+ /*
+ * Let md_start_sync() to remove and add rdevs to the
+ * array.
+ */
+ if (md_spares_need_change(mddev))
+ queue_work(md_misc_wq, &mddev->sync_work);
goto unlock;
}
--
2.39.2
Powered by blists - more mailing lists