lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <1f4b9b25-799c-4145-82a3-3cdbac8eccf6@gmail.com>
Date: Mon, 30 Jun 2025 11:49:40 +0800
From: Wang Jinchao <wangjinchao600@...il.com>
To: Yu Kuai <yukuai1@...weicloud.com>, Song Liu <song@...nel.org>
Cc: linux-raid@...r.kernel.org, linux-kernel@...r.kernel.org,
 "yukuai (C)" <yukuai3@...wei.com>
Subject: Re: [PATCH v3 1/2] md/raid1: change r1conf->r1bio_pool to a pointer
 type

On 6/28/25 11:17, Yu Kuai wrote:
> Hi,
> 
> 在 2025/06/24 9:55, Wang Jinchao 写道:
>> In raid1_reshape(), newpool is a stack variable.
>> mempool_init() initializes newpool->wait with the stack address.
>> After assigning newpool to conf->r1bio_pool, the wait queue
>> need to be reinitialized, which is not ideal.
>>
>> Change raid1_conf->r1bio_pool to a pointer type and
>> replace mempool_init() with mempool_create_kmalloc_pool() to
>> avoid referencing a stack-based wait queue.
>>
>> Signed-off-by: Wang Jinchao <wangjinchao600@...il.com>
>> ---
>>   drivers/md/raid1.c | 39 ++++++++++++++++++---------------------
>>   drivers/md/raid1.h |  2 +-
>>   2 files changed, 19 insertions(+), 22 deletions(-)
>>
>> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
>> index fd4ce2a4136f..8249cbb89fec 100644
>> --- a/drivers/md/raid1.c
>> +++ b/drivers/md/raid1.c
>> @@ -255,7 +255,7 @@ static void free_r1bio(struct r1bio *r1_bio)
>>       struct r1conf *conf = r1_bio->mddev->private;
>>       put_all_bios(conf, r1_bio);
>> -    mempool_free(r1_bio, &conf->r1bio_pool);
>> +    mempool_free(r1_bio, conf->r1bio_pool);
>>   }
>>   static void put_buf(struct r1bio *r1_bio)
>> @@ -1305,9 +1305,8 @@ alloc_r1bio(struct mddev *mddev, struct bio *bio)
>>       struct r1conf *conf = mddev->private;
>>       struct r1bio *r1_bio;
>> -    r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
>> -    /* Ensure no bio records IO_BLOCKED */
>> -    memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
>> +    r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
>> +    memset(r1_bio, 0, offsetof(struct r1bio, bios[conf->raid_disks * 
>> 2]));
>>       init_r1bio(r1_bio, mddev, bio);
>>       return r1_bio;
>>   }
>> @@ -3084,6 +3083,7 @@ static struct r1conf *setup_conf(struct mddev 
>> *mddev)
>>       int i;
>>       struct raid1_info *disk;
>>       struct md_rdev *rdev;
>> +    size_t r1bio_size;
>>       int err = -ENOMEM;
>>       conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
>> @@ -3124,9 +3124,10 @@ static struct r1conf *setup_conf(struct mddev 
>> *mddev)
>>       if (!conf->poolinfo)
>>           goto abort;
>>       conf->poolinfo->raid_disks = mddev->raid_disks * 2;
>> -    err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, 
>> r1bio_pool_alloc,
>> -               rbio_pool_free, conf->poolinfo);
>> -    if (err)
>> +
>> +    r1bio_size = offsetof(struct r1bio, bios[mddev->raid_disks * 2]);
> 
> The local variable doesn't look necessary, it's just used once anyway.
>> +    conf->r1bio_pool = mempool_create_kmalloc_pool(NR_RAID_BIOS, 
>> r1bio_size);
>> +    if (!conf->r1bio_pool)
>>           goto abort;
>>       err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
>> @@ -3197,7 +3198,7 @@ static struct r1conf *setup_conf(struct mddev 
>> *mddev)
>>    abort:
>>       if (conf) {
>> -        mempool_exit(&conf->r1bio_pool);
>> +        mempool_destroy(conf->r1bio_pool);
>>           kfree(conf->mirrors);
>>           safe_put_page(conf->tmppage);
>>           kfree(conf->poolinfo);
>> @@ -3310,7 +3311,7 @@ static void raid1_free(struct mddev *mddev, void 
>> *priv)
>>   {
>>       struct r1conf *conf = priv;
>> -    mempool_exit(&conf->r1bio_pool);
>> +    mempool_destroy(conf->r1bio_pool);
>>       kfree(conf->mirrors);
>>       safe_put_page(conf->tmppage);
>>       kfree(conf->poolinfo);
>> @@ -3366,17 +3367,14 @@ static int raid1_reshape(struct mddev *mddev)
>>        * At the same time, we "pack" the devices so that all the missing
>>        * devices have the higher raid_disk numbers.
>>        */
>> -    mempool_t newpool, oldpool;
>> +    mempool_t *newpool, *oldpool;
>>       struct pool_info *newpoolinfo;
>> +    size_t new_r1bio_size;
>>       struct raid1_info *newmirrors;
>>       struct r1conf *conf = mddev->private;
>>       int cnt, raid_disks;
>>       unsigned long flags;
>>       int d, d2;
>> -    int ret;
>> -
>> -    memset(&newpool, 0, sizeof(newpool));
>> -    memset(&oldpool, 0, sizeof(oldpool));
>>       /* Cannot change chunk_size, layout, or level */
>>       if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
>> @@ -3408,18 +3406,18 @@ static int raid1_reshape(struct mddev *mddev)
>>       newpoolinfo->mddev = mddev;
>>       newpoolinfo->raid_disks = raid_disks * 2;
>> -    ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc,
>> -               rbio_pool_free, newpoolinfo);
>> -    if (ret) {
>> +    new_r1bio_size = offsetof(struct r1bio, bios[raid_disks * 2]);
> same here. Otherwise looks good to me.
> 
> Reviewed-by: Yu Kuai <yukuai3@...wei.com>
>> +    newpool = mempool_create_kmalloc_pool(NR_RAID_BIOS, new_r1bio_size);
>> +    if (!newpool) {
>>           kfree(newpoolinfo);
>> -        return ret;
>> +        return -ENOMEM;
>>       }
>>       newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
>>                        raid_disks, 2),
>>                    GFP_KERNEL);
>>       if (!newmirrors) {
>>           kfree(newpoolinfo);
>> -        mempool_exit(&newpool);
>> +        mempool_destroy(newpool);
>>           return -ENOMEM;
>>       }
>> @@ -3428,7 +3426,6 @@ static int raid1_reshape(struct mddev *mddev)
>>       /* ok, everything is stopped */
>>       oldpool = conf->r1bio_pool;
>>       conf->r1bio_pool = newpool;
>> -    init_waitqueue_head(&conf->r1bio_pool.wait);
>>       for (d = d2 = 0; d < conf->raid_disks; d++) {
>>           struct md_rdev *rdev = conf->mirrors[d].rdev;
>> @@ -3460,7 +3457,7 @@ static int raid1_reshape(struct mddev *mddev)
>>       set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
>>       md_wakeup_thread(mddev->thread);
>> -    mempool_exit(&oldpool);
>> +    mempool_destroy(oldpool);
>>       return 0;
>>   }
>> diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
>> index 33f318fcc268..652c347b1a70 100644
>> --- a/drivers/md/raid1.h
>> +++ b/drivers/md/raid1.h
>> @@ -118,7 +118,7 @@ struct r1conf {
>>        * mempools - it changes when the array grows or shrinks
>>        */
>>       struct pool_info    *poolinfo;
>> -    mempool_t        r1bio_pool;
>> +    mempool_t        *r1bio_pool;
>>       mempool_t        r1buf_pool;
>>       struct bio_set        bio_split;
>>
> 
Thanks for pointing that out.

I originally introduced the local variable to avoid these checkpatch.pl 
messages:
     CHECK: Alignment should match open parenthesis
     WARNING: line length of xxx exceeds 100 columns
But I agree that using a temporary variable in this case adds 
unnecessary noise, since the value is only used once.

Based on your review and a re-read of the kernel documentation, I guess 
that CHECK:-level warnings are not strictly require fixing—especially 
when fixing them would harm clarity. Please let me know if I’ve 
misunderstood it.

I'll drop the local variable and update the patch accordingly in the 
next version.

Thanks again for the feedback.

-- 
Best regards,
Jinchao

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ