[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4b57e9b4-e660-a7df-4655-93c2815ec758@huaweicloud.com>
Date: Thu, 5 Feb 2026 15:23:05 +0800
From: Li Nan <linan666@...weicloud.com>
To: yukuai@...as.com, linan666@...weicloud.com, song@...nel.org
Cc: xni@...hat.com, linux-raid@...r.kernel.org, linux-kernel@...r.kernel.org,
yangerkun@...wei.com, yi.zhang@...wei.com
Subject: Re: [PATCH v2 03/14] md/raid1: use folio for tmppage
在 2026/2/5 0:45, Yu Kuai 写道:
> Hi,
>
> 在 2026/1/28 15:56, linan666@...weicloud.com 写道:
>> From: Li Nan <linan122@...wei.com>
>>
>> Convert tmppage to tmpfolio and use it throughout in raid1.
>>
>> Signed-off-by: Li Nan <linan122@...wei.com>
>> Reviewed-by: Xiao Ni <xni@...hat.com>
>> ---
>> drivers/md/raid1.h | 2 +-
>> drivers/md/raid1.c | 18 ++++++++++--------
>> 2 files changed, 11 insertions(+), 9 deletions(-)
>>
>> diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
>> index c98d43a7ae99..d480b3a8c2c4 100644
>> --- a/drivers/md/raid1.h
>> +++ b/drivers/md/raid1.h
>> @@ -101,7 +101,7 @@ struct r1conf {
>> /* temporary buffer to synchronous IO when attempting to repair
>> * a read error.
>> */
>> - struct page *tmppage;
>> + struct folio *tmpfolio;
>>
>> /* When taking over an array from a different personality, we store
>> * the new thread here until we fully activate the array.
>> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
>> index 407925951299..43453f1a04f4 100644
>> --- a/drivers/md/raid1.c
>> +++ b/drivers/md/raid1.c
>> @@ -2417,8 +2417,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
>> rdev->recovery_offset >= sect + s)) &&
>> rdev_has_badblock(rdev, sect, s) == 0) {
>> atomic_inc(&rdev->nr_pending);
>> - if (sync_page_io(rdev, sect, s<<9,
>> - conf->tmppage, REQ_OP_READ, false))
>> + if (sync_folio_io(rdev, sect, s<<9, 0,
>> + conf->tmpfolio, REQ_OP_READ, false))
>> success = 1;
>> rdev_dec_pending(rdev, mddev);
>> if (success)
>> @@ -2447,7 +2447,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
>> !test_bit(Faulty, &rdev->flags)) {
>> atomic_inc(&rdev->nr_pending);
>> r1_sync_page_io(rdev, sect, s,
>> - conf->tmppage, REQ_OP_WRITE);
>> + folio_page(conf->tmpfolio, 0),
>> + REQ_OP_WRITE);
>> rdev_dec_pending(rdev, mddev);
>> }
>> }
>> @@ -2461,7 +2462,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
>> !test_bit(Faulty, &rdev->flags)) {
>> atomic_inc(&rdev->nr_pending);
>> if (r1_sync_page_io(rdev, sect, s,
>> - conf->tmppage, REQ_OP_READ)) {
>> + folio_page(conf->tmpfolio, 0),
>> + REQ_OP_READ)) {
>> atomic_add(s, &rdev->corrected_errors);
>> pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
>> mdname(mddev), s,
>> @@ -3120,8 +3122,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
>> if (!conf->mirrors)
>> goto abort;
>>
>> - conf->tmppage = alloc_page(GFP_KERNEL);
>> - if (!conf->tmppage)
>> + conf->tmpfolio = folio_alloc(GFP_KERNEL, 0);
>> + if (!conf->tmpfolio)
>> goto abort;
>>
>> r1bio_size = offsetof(struct r1bio, bios[mddev->raid_disks * 2]);
>> @@ -3196,7 +3198,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
>> if (conf) {
>> mempool_destroy(conf->r1bio_pool);
>> kfree(conf->mirrors);
>> - safe_put_page(conf->tmppage);
>> + folio_put(conf->tmpfolio);
>
> Is this safe? folio_put() can't be called with NULL.
>
Yeah, should we introduce safe_put_folio()? Or just check NULL here.
>> kfree(conf->nr_pending);
>> kfree(conf->nr_waiting);
>> kfree(conf->nr_queued);
>> @@ -3310,7 +3312,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
>>
>> mempool_destroy(conf->r1bio_pool);
>> kfree(conf->mirrors);
>> - safe_put_page(conf->tmppage);
>> + folio_put(conf->tmpfolio);
>
> Same here.
>
In raid1_free(), setup_conf() is successful, and conf->tmpfolio must not be
NULL. It is safe.
>> kfree(conf->nr_pending);
>> kfree(conf->nr_waiting);
>> kfree(conf->nr_queued);
>
--
Thanks,
Nan
Powered by blists - more mailing lists