[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <47e62021-fd2c-44ba-be34-e12b2a486efb@huawei.com>
Date: Mon, 21 Jul 2025 20:33:31 +0800
From: Baokun Li <libaokun1@...wei.com>
To: Jan Kara <jack@...e.cz>, <tytso@....edu>
CC: <linux-ext4@...r.kernel.org>, <adilger.kernel@...ger.ca>,
<linux-kernel@...r.kernel.org>, <ojaswin@...ux.ibm.com>,
<julia.lawall@...ia.fr>, <yi.zhang@...wei.com>, <yangerkun@...wei.com>,
<libaokun@...weicloud.com>
Subject: Re: [PATCH v3 15/17] ext4: convert free groups order lists to xarrays
On 2025/7/21 19:07, Jan Kara wrote:
> On Mon 14-07-25 21:03:25, Baokun Li wrote:
>> |CPU: Kunpeng 920 | P80 | P1 |
>> |Memory: 512GB |------------------------|-------------------------|
>> |960GB SSD (0.5GB/s)| base | patched | base | patched |
>> |-------------------|-------|----------------|--------|----------------|
>> |mb_optimize_scan=0 | 20097 | 19555 (-2.6%) | 316141 | 315636 (-0.2%) |
>> |mb_optimize_scan=1 | 13318 | 15496 (+16.3%) | 325273 | 323569 (-0.5%) |
>>
>> |CPU: AMD 9654 * 2 | P96 | P1 |
>> |Memory: 1536GB |------------------------|-------------------------|
>> |960GB SSD (1GB/s) | base | patched | base | patched |
>> |-------------------|-------|----------------|--------|----------------|
>> |mb_optimize_scan=0 | 53603 | 53192 (-0.7%) | 214243 | 212678 (-0.7%) |
>> |mb_optimize_scan=1 | 20887 | 37636 (+80.1%) | 213632 | 214189 (+0.2%) |
>>
>> Signed-off-by: Baokun Li <libaokun1@...wei.com>
> The patch looks good and the results are nice. I've just noticed two typos:
>
>> +static inline void ext4_mb_avg_fragment_size_destory(struct ext4_sb_info *sbi)
> ^^^ destroy
>
>
>> +static inline void ext4_mb_largest_free_orders_destory(struct ext4_sb_info *sbi)
> ^^^ destroy
Hi Jan, thanks for the review! While examining this patch, I also
identified a comment formatting error that I regret overlooking previously.
My apologies for this oversight.
Hey Ted, could you please help apply the following diff to correct the
spelling errors and comment formatting issues? Or would you prefer I send
out a new patch series or a separate cleanup patch?
Thanks,
Baokun
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index a9eb997b8c9b..c61955cba370 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -863,10 +863,10 @@ mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
grp->bb_avg_fragment_size_order = new;
if (new >= 0) {
/*
- * Cannot use __GFP_NOFAIL because we hold the group lock.
- * Although allocation for insertion may fails, it's not fatal
- * as we have linear traversal to fall back on.
- */
+ * Cannot use __GFP_NOFAIL because we hold the group lock.
+ * Although allocation for insertion may fails, it's not fatal
+ * as we have linear traversal to fall back on.
+ */
int err = xa_insert(&sbi->s_mb_avg_fragment_size[new],
grp->bb_group, grp, GFP_ATOMIC);
if (err)
@@ -1201,10 +1201,10 @@ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
grp->bb_largest_free_order = new;
if (test_opt2(sb, MB_OPTIMIZE_SCAN) && new >= 0 && grp->bb_free) {
/*
- * Cannot use __GFP_NOFAIL because we hold the group lock.
- * Although allocation for insertion may fails, it's not fatal
- * as we have linear traversal to fall back on.
- */
+ * Cannot use __GFP_NOFAIL because we hold the group lock.
+ * Although allocation for insertion may fails, it's not fatal
+ * as we have linear traversal to fall back on.
+ */
int err = xa_insert(&sbi->s_mb_largest_free_orders[new],
grp->bb_group, grp, GFP_ATOMIC);
if (err)
@@ -3657,14 +3657,14 @@ static void ext4_discard_work(struct work_struct *work)
ext4_mb_unload_buddy(&e4b);
}
-static inline void ext4_mb_avg_fragment_size_destory(struct ext4_sb_info *sbi)
+static inline void ext4_mb_avg_fragment_size_destroy(struct ext4_sb_info *sbi)
{
for (int i = 0; i < MB_NUM_ORDERS(sbi->s_sb); i++)
xa_destroy(&sbi->s_mb_avg_fragment_size[i]);
kfree(sbi->s_mb_avg_fragment_size);
}
-static inline void ext4_mb_largest_free_orders_destory(struct ext4_sb_info *sbi)
+static inline void ext4_mb_largest_free_orders_destroy(struct ext4_sb_info *sbi)
{
for (int i = 0; i < MB_NUM_ORDERS(sbi->s_sb); i++)
xa_destroy(&sbi->s_mb_largest_free_orders[i]);
@@ -3818,8 +3818,8 @@ int ext4_mb_init(struct super_block *sb)
kfree(sbi->s_mb_last_groups);
sbi->s_mb_last_groups = NULL;
out:
- ext4_mb_avg_fragment_size_destory(sbi);
- ext4_mb_largest_free_orders_destory(sbi);
+ ext4_mb_avg_fragment_size_destroy(sbi);
+ ext4_mb_largest_free_orders_destroy(sbi);
kfree(sbi->s_mb_offsets);
sbi->s_mb_offsets = NULL;
kfree(sbi->s_mb_maxs);
@@ -3886,8 +3886,8 @@ void ext4_mb_release(struct super_block *sb)
kvfree(group_info);
rcu_read_unlock();
}
- ext4_mb_avg_fragment_size_destory(sbi);
- ext4_mb_largest_free_orders_destory(sbi);
+ ext4_mb_avg_fragment_size_destroy(sbi);
+ ext4_mb_largest_free_orders_destroy(sbi);
kfree(sbi->s_mb_offsets);
kfree(sbi->s_mb_maxs);
iput(sbi->s_buddy_cache);
Powered by blists - more mailing lists