[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <3b5e935b-0a1b-bf23-8ceb-b45e0b5f1b4b@huaweicloud.com>
Date: Tue, 6 Jan 2026 20:59:53 +0800
From: Li Nan <linan666@...weicloud.com>
To: Zheng Qixing <zhengqixing@...weicloud.com>, song@...nel.org,
yukuai@...as.com
Cc: linux-raid@...r.kernel.org, linux-kernel@...r.kernel.org,
yi.zhang@...wei.com, yangerkun@...wei.com, houtao1@...wei.com,
zhengqixing@...wei.com, linan122@...artners.com
Subject: Re: [RFC PATCH 1/5] md: add helpers for requested sync action
在 2025/12/31 15:09, Zheng Qixing 写道:
> From: Zheng Qixing <zhengqixing@...wei.com>
>
> Add helpers for handling requested sync action.
>
> In handle_requested_sync_action(), add mutual exclusivity checks between
> check/repair operations. This prevents the scenario where one operation
> is requested, but before MD_RECOVERY_RUNNING is set, another operation is
> requested, resulting in neither an EBUSY return nor proper execution of
> the second operation.
>
> Signed-off-by: Zheng Qixing <zhengqixing@...wei.com>
> ---
> drivers/md/md.c | 87 +++++++++++++++++++++++++++++++++++++------------
> 1 file changed, 66 insertions(+), 21 deletions(-)
>
> diff --git a/drivers/md/md.c b/drivers/md/md.c
> index 5df2220b1bd1..ccaa2e6fe079 100644
> --- a/drivers/md/md.c
> +++ b/drivers/md/md.c
> @@ -665,6 +665,59 @@ void mddev_put(struct mddev *mddev)
> spin_unlock(&all_mddevs_lock);
> }
>
> +static int __handle_requested_sync_action(struct mddev *mddev,
> + enum sync_action action)
> +{
> + switch (action) {
> + case ACTION_CHECK:
> + set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
> + fallthrough;
> + case ACTION_REPAIR:
> + set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
> + set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
> + return 0;
> + default:
> + return -EINVAL;
> + } > +}
> +
> +static int handle_requested_sync_action(struct mddev *mddev,
> + enum sync_action action)
> +{
> + if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
> + return -EBUSY;
This return change origin logic; split factor out and fix into two patches.
> + return __handle_requested_sync_action(mddev, action);
> +}
> +
__handle_requested_sync_action does not need to be split.
> +static enum sync_action __get_recovery_sync_action(struct mddev *mddev)
> +{
> + if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
> + return ACTION_CHECK;
> + if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
> + return ACTION_REPAIR;
> + return ACTION_RESYNC;
> +}
> +
> +static enum sync_action get_recovery_sync_action(struct mddev *mddev)
> +{
> + return __get_recovery_sync_action(mddev);
> +}
> +
__get_recovery_sync_action also does not need to be split.
> +static void init_recovery_position(struct mddev *mddev)
> +{
> + mddev->resync_min = 0;
> +}
> +
> +static void set_requested_position(struct mddev *mddev, sector_t value)
> +{
> + mddev->resync_min = value;
> +}
> +
> +static sector_t get_requested_position(struct mddev *mddev)
> +{
> + return mddev->resync_min;
> +}
> +
There is no need to factor the operations of resync_min;
'rectify_min' that follows can re-use 'resync_min' directly.
> static void md_safemode_timeout(struct timer_list *t);
> static void md_start_sync(struct work_struct *ws);
>
> @@ -781,7 +834,7 @@ int mddev_init(struct mddev *mddev)
> mddev->reshape_position = MaxSector;
> mddev->reshape_backwards = 0;
> mddev->last_sync_action = ACTION_IDLE;
> - mddev->resync_min = 0;
> + init_recovery_position(mddev);
> mddev->resync_max = MaxSector;
> mddev->level = LEVEL_NONE;
>
> @@ -5101,17 +5154,9 @@ enum sync_action md_sync_action(struct mddev *mddev)
> if (test_bit(MD_RECOVERY_RECOVER, &recovery))
> return ACTION_RECOVER;
>
> - if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
> - /*
> - * MD_RECOVERY_CHECK must be paired with
> - * MD_RECOVERY_REQUESTED.
> - */
> - if (test_bit(MD_RECOVERY_CHECK, &recovery))
> - return ACTION_CHECK;
> - if (test_bit(MD_RECOVERY_REQUESTED, &recovery))
> - return ACTION_REPAIR;
> - return ACTION_RESYNC;
> - }
> + /* MD_RECOVERY_CHECK must be paired with MD_RECOVERY_REQUESTED. */
> + if (test_bit(MD_RECOVERY_SYNC, &recovery))
> + return get_recovery_sync_action(mddev);
>
> /*
> * MD_RECOVERY_NEEDED or MD_RECOVERY_RUNNING is set, however, no
> @@ -5300,11 +5345,10 @@ action_store(struct mddev *mddev, const char *page, size_t len)
> set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
> break;
> case ACTION_CHECK:
> - set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
> - fallthrough;
> case ACTION_REPAIR:
> - set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
> - set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
> + ret = handle_requested_sync_action(mddev, action);
> + if (ret)
> + goto out;
> fallthrough;
> case ACTION_RESYNC:
> case ACTION_IDLE:
> @@ -6783,7 +6827,7 @@ static void md_clean(struct mddev *mddev)
> mddev->dev_sectors = 0;
> mddev->raid_disks = 0;
> mddev->resync_offset = 0;
> - mddev->resync_min = 0;
> + init_recovery_position(mddev);
> mddev->resync_max = MaxSector;
> mddev->reshape_position = MaxSector;
> /* we still need mddev->external in export_rdev, do not clear it yet */
> @@ -9370,7 +9414,7 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
> switch (action) {
> case ACTION_CHECK:
> case ACTION_REPAIR:
> - return mddev->resync_min;
> + return get_requested_position(mddev);
> case ACTION_RESYNC:
> if (!mddev->bitmap)
> return mddev->resync_offset;
> @@ -9795,10 +9839,11 @@ void md_do_sync(struct md_thread *thread)
> if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
> /* We completed so min/max setting can be forgotten if used. */
> if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
> - mddev->resync_min = 0;
> + set_requested_position(mddev, 0);
> mddev->resync_max = MaxSector;
> - } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
> - mddev->resync_min = mddev->curr_resync_completed;
> + } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
> + set_requested_position(mddev, mddev->curr_resync_completed);
> + }
> set_bit(MD_RECOVERY_DONE, &mddev->recovery);
> mddev->curr_resync = MD_RESYNC_NONE;
> spin_unlock(&mddev->lock);
--
Thanks,
Nan
Powered by blists - more mailing lists