lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <0b8ba3a3-ce7e-4fd0-aacb-3ea62876acc8@fnnas.com>
Date: Thu, 5 Feb 2026 00:52:24 +0800
From: "Yu Kuai" <yukuai@...as.com>
To: <linan666@...weicloud.com>, <song@...nel.org>
Cc: <xni@...hat.com>, <linux-raid@...r.kernel.org>, 
	<linux-kernel@...r.kernel.org>, <yangerkun@...wei.com>, 
	<yi.zhang@...wei.com>, <yukuai@...as.com>
Subject: Re: [PATCH v2 06/14] md: Clean up folio sync support related code

Hi,

在 2026/1/28 15:57, linan666@...weicloud.com 写道:
> From: Li Nan <linan122@...wei.com>
>
> 1. Remove resync_get_all_folio() and invoke folio_get() directly instead.
> 2. Clean up redundant while(0) loop in md_bio_reset_resync_folio().
> 3. Clean up bio variable by directly referencing r10_bio->devs[j].bio
>     instead in r1buf_pool_alloc() and r10buf_pool_alloc().
> 4. Clean up RESYNC_PAGES.
>
> Signed-off-by: Li Nan <linan122@...wei.com>
> Reviewed-by: Xiao Ni <xni@...hat.com>
> ---
>   drivers/md/raid1-10.c | 22 ++++++----------------
>   drivers/md/raid1.c    |  6 ++----
>   drivers/md/raid10.c   |  6 ++----
>   3 files changed, 10 insertions(+), 24 deletions(-)

I think this patch can be merged into patch 5.

> diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
> index 300fbe9dc02e..568ab002691f 100644
> --- a/drivers/md/raid1-10.c
> +++ b/drivers/md/raid1-10.c
> @@ -1,7 +1,6 @@
>   // SPDX-License-Identifier: GPL-2.0
>   /* Maximum size of each resync request */
>   #define RESYNC_BLOCK_SIZE (64*1024)
> -#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
>   #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
>   
>   /*
> @@ -56,11 +55,6 @@ static inline void resync_free_folio(struct resync_folio *rf)
>   	folio_put(rf->folio);
>   }
>   
> -static inline void resync_get_folio(struct resync_folio *rf)
> -{
> -	folio_get(rf->folio);
> -}
> -
>   static inline struct folio *resync_fetch_folio(struct resync_folio *rf)
>   {
>   	return rf->folio;
> @@ -80,16 +74,12 @@ static void md_bio_reset_resync_folio(struct bio *bio, struct resync_folio *rf,
>   			       int size)
>   {
>   	/* initialize bvec table again */
> -	do {
> -		struct folio *folio = resync_fetch_folio(rf);
> -		int len = min_t(int, size, RESYNC_BLOCK_SIZE);
> -
> -		if (WARN_ON(!bio_add_folio(bio, folio, len, 0))) {
> -			bio->bi_status = BLK_STS_RESOURCE;
> -			bio_endio(bio);
> -			return;
> -		}
> -	} while (0);
> +	if (WARN_ON(!bio_add_folio(bio, resync_fetch_folio(rf),
> +				   min_t(int, size, RESYNC_BLOCK_SIZE),
> +				   0))) {
> +		bio->bi_status = BLK_STS_RESOURCE;
> +		bio_endio(bio);
> +	}
>   }
>   
>   
> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
> index d9c106529289..5954ead7dfd4 100644
> --- a/drivers/md/raid1.c
> +++ b/drivers/md/raid1.c
> @@ -181,18 +181,16 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
>   	for (j = 0; j < conf->raid_disks * 2; j++) {
>   		struct resync_folio *rf = &rfs[j];
>   
> -		bio = r1_bio->bios[j];
> -
>   		if (j < need_folio) {
>   			if (resync_alloc_folio(rf, gfp_flags))
>   				goto out_free_folio;
>   		} else {
>   			memcpy(rf, &rfs[0], sizeof(*rf));
> -			resync_get_folio(rf);
> +			folio_get(rf->folio);
>   		}
>   
>   		rf->raid_bio = r1_bio;
> -		bio->bi_private = rf;
> +		r1_bio->bios[j]->bi_private = rf;
>   	}
>   
>   	r1_bio->master_bio = NULL;
> diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
> index 7533aeb23819..5c0975ec8809 100644
> --- a/drivers/md/raid10.c
> +++ b/drivers/md/raid10.c
> @@ -183,19 +183,17 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
>   		if (rbio)
>   			rf_repl = &rfs[nalloc + j];
>   
> -		bio = r10_bio->devs[j].bio;
> -
>   		if (!j || test_bit(MD_RECOVERY_SYNC,
>   				   &conf->mddev->recovery)) {
>   			if (resync_alloc_folio(rf, gfp_flags))
>   				goto out_free_folio;
>   		} else {
>   			memcpy(rf, &rfs[0], sizeof(*rf));
> -			resync_get_folio(rf);
> +			folio_get(rf->folio);
>   		}
>   
>   		rf->raid_bio = r10_bio;
> -		bio->bi_private = rf;
> +		r10_bio->devs[j].bio->bi_private = rf;
>   		if (rbio) {
>   			memcpy(rf_repl, rf, sizeof(*rf));
>   			rbio->bi_private = rf_repl;

-- 
Thansk,
Kuai

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ