[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250917093508.456790-5-linan666@huaweicloud.com>
Date: Wed, 17 Sep 2025 17:35:05 +0800
From: linan666@...weicloud.com
To: song@...nel.org,
yukuai3@...wei.com,
neil@...wn.name,
namhyung@...il.com
Cc: linux-raid@...r.kernel.org,
linux-kernel@...r.kernel.org,
linan666@...weicloud.com,
yangerkun@...wei.com,
yi.zhang@...wei.com
Subject: [PATCH 4/7] md: factor out sync completion update into helper
From: Li Nan <linan122@...wei.com>
Repeatedly reading 'mddev->recovery' flags in md_do_sync() may introduce
potential risk if this flag is modified during sync, leading to incorrect
offset updates. Therefore, replace direct 'mddev->recovery' checks with
'action'.
Move sync completion update logic into helper md_finish_sync(), which
improves readability and maintainability.
The reshape completion update remains safe as it only updated after
successful reshape when MD_RECOVERY_INTR is not set and 'curr_resync'
equals 'max_sectors'.
Signed-off-by: Li Nan <linan122@...wei.com>
---
drivers/md/md.c | 82 ++++++++++++++++++++++++++++---------------------
1 file changed, 47 insertions(+), 35 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c4d765d57af7..f4f80d32db98 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -9301,6 +9301,51 @@ static bool sync_io_within_limit(struct mddev *mddev)
(raid_is_456(mddev) ? 8 : 128) * sync_io_depth(mddev);
}
+/*
+ * Update sync offset and mddev status when sync completes
+ */
+static void md_finish_sync(struct mddev *mddev, enum sync_action action)
+{
+ struct md_rdev *rdev;
+
+ switch (action) {
+ case ACTION_RESYNC:
+ case ACTION_REPAIR:
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ mddev->curr_resync = MaxSector;
+ mddev->resync_offset = mddev->curr_resync;
+ break;
+ case ACTION_RECOVER:
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ mddev->curr_resync = MaxSector;
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev)
+ if (mddev->delta_disks >= 0 &&
+ rdev_needs_recovery(rdev, mddev->curr_resync))
+ rdev->recovery_offset = mddev->curr_resync;
+ rcu_read_unlock();
+ break;
+ case ACTION_RESHAPE:
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+ mddev->delta_disks > 0 &&
+ mddev->pers->finish_reshape &&
+ mddev->pers->size &&
+ !mddev_is_dm(mddev)) {
+ mddev_lock_nointr(mddev);
+ md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
+ mddev_unlock(mddev);
+ if (!mddev_is_clustered(mddev))
+ set_capacity_and_notify(mddev->gendisk,
+ mddev->array_sectors);
+ }
+ break;
+ /* */
+ case ACTION_CHECK:
+ default:
+ break;
+ }
+}
+
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
#define UPDATE_FREQUENCY (5*60*HZ)
@@ -9316,7 +9361,6 @@ void md_do_sync(struct md_thread *thread)
int last_mark,m;
sector_t last_check;
int skipped = 0;
- struct md_rdev *rdev;
enum sync_action action;
const char *desc;
struct blk_plug plug;
@@ -9603,46 +9647,14 @@ void md_do_sync(struct md_thread *thread)
}
mddev->pers->sync_request(mddev, max_sectors, max_sectors, &skipped);
- if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
- mddev->curr_resync > MD_RESYNC_ACTIVE) {
- if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
- mddev->curr_resync = MaxSector;
-
- if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
- mddev->resync_offset = mddev->curr_resync;
- } else {
- if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
- test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
- rcu_read_lock();
- rdev_for_each_rcu(rdev, mddev)
- if (mddev->delta_disks >= 0 &&
- rdev_needs_recovery(rdev, mddev->curr_resync))
- rdev->recovery_offset = mddev->curr_resync;
- rcu_read_unlock();
- }
- }
- }
+ if (mddev->curr_resync > MD_RESYNC_ACTIVE)
+ md_finish_sync(mddev, action);
skip:
/* set CHANGE_PENDING here since maybe another update is needed,
* so other nodes are informed. It should be harmless for normal
* raid */
set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
-
- if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
- !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
- mddev->delta_disks > 0 &&
- mddev->pers->finish_reshape &&
- mddev->pers->size &&
- !mddev_is_dm(mddev)) {
- mddev_lock_nointr(mddev);
- md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
- mddev_unlock(mddev);
- if (!mddev_is_clustered(mddev))
- set_capacity_and_notify(mddev->gendisk,
- mddev->array_sectors);
- }
-
spin_lock(&mddev->lock);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* We completed so min/max setting can be forgotten if used. */
--
2.39.2
Powered by blists - more mailing lists