[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250911020441.2858174-3-linan666@huaweicloud.com>
Date: Thu, 11 Sep 2025 10:04:41 +0800
From: linan666@...weicloud.com
To: song@...nel.org,
yukuai3@...wei.com,
neil@...wn.name,
namhyung@...il.com
Cc: linux-raid@...r.kernel.org,
linux-kernel@...r.kernel.org,
linan666@...weicloud.com,
yangerkun@...wei.com,
yi.zhang@...wei.com
Subject: [PATCH 3/3] md: factor out sync completion update into helper
From: Li Nan <linan122@...wei.com>
Repeatedly reading 'mddev->recovery' flags in md_do_sync() may introduce
potential risk if this flag is modified during sync, leading to incorrect
offset updates. Therefore, replace direct 'mddev->recovery' checks with
'action'.
Move sync completion update logic into helper md_finish_sync(), which
improves readability and maintainability.
The reshape completion update remains safe as it only updated after
successful reshape when MD_RECOVERY_INTR is not set and 'curr_resync'
equals 'max_sectors'.
Signed-off-by: Li Nan <linan122@...wei.com>
---
drivers/md/md.c | 80 +++++++++++++++++++++++++++----------------------
1 file changed, 45 insertions(+), 35 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index f3abfc140481..a77c59527d4c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -9298,6 +9298,49 @@ static bool sync_io_within_limit(struct mddev *mddev)
(raid_is_456(mddev) ? 8 : 128) * sync_io_depth(mddev);
}
+/*
+ * Update sync offset and mddev status when sync completes
+ */
+static void md_finish_sync(struct mddev *mddev, enum sync_action action)
+{
+ struct md_rdev *rdev;
+
+ switch (action) {
+ case ACTION_RESYNC:
+ case ACTION_REPAIR:
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ mddev->curr_resync_completed = MaxSector;
+ mddev->resync_offset = mddev->curr_resync_completed;
+ break;
+ case ACTION_RECOVER:
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ mddev->curr_resync_completed = MaxSector;
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev)
+ if (mddev->delta_disks >= 0 &&
+ rdev_needs_recovery(rdev, mddev->curr_resync_completed))
+ rdev->recovery_offset = mddev->curr_resync_completed;
+ rcu_read_unlock();
+ break;
+ case ACTION_RESHAPE:
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+ mddev->delta_disks > 0 &&
+ mddev->pers->finish_reshape &&
+ mddev->pers->size &&
+ !mddev_is_dm(mddev)) {
+ mddev_lock_nointr(mddev);
+ md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
+ mddev_unlock(mddev);
+ if (!mddev_is_clustered(mddev))
+ set_capacity_and_notify(mddev->gendisk,
+ mddev->array_sectors);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
#define UPDATE_FREQUENCY (5*60*HZ)
@@ -9313,7 +9356,6 @@ void md_do_sync(struct md_thread *thread)
int last_mark,m;
sector_t last_check;
int skipped = 0;
- struct md_rdev *rdev;
enum sync_action action;
const char *desc;
struct blk_plug plug;
@@ -9603,46 +9645,14 @@ void md_do_sync(struct md_thread *thread)
}
mddev->pers->sync_request(mddev, max_sectors, max_sectors, &skipped);
- if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
- mddev->curr_resync_completed > MD_RESYNC_ACTIVE) {
- if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
- mddev->curr_resync_completed = MaxSector;
-
- if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
- mddev->resync_offset = mddev->curr_resync_completed;
- } else {
- if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
- test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
- rcu_read_lock();
- rdev_for_each_rcu(rdev, mddev)
- if (mddev->delta_disks >= 0 &&
- rdev_needs_recovery(rdev, mddev->curr_resync_completed))
- rdev->recovery_offset = mddev->curr_resync_completed;
- rcu_read_unlock();
- }
- }
- }
+ if (mddev->curr_resync > MD_RESYNC_ACTIVE)
+ md_finish_sync(mddev, action);
skip:
/* set CHANGE_PENDING here since maybe another update is needed,
* so other nodes are informed. It should be harmless for normal
* raid */
set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
-
- if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
- !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
- mddev->delta_disks > 0 &&
- mddev->pers->finish_reshape &&
- mddev->pers->size &&
- !mddev_is_dm(mddev)) {
- mddev_lock_nointr(mddev);
- md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
- mddev_unlock(mddev);
- if (!mddev_is_clustered(mddev))
- set_capacity_and_notify(mddev->gendisk,
- mddev->array_sectors);
- }
-
spin_lock(&mddev->lock);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* We completed so min/max setting can be forgotten if used. */
--
2.39.2
Powered by blists - more mailing lists