[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4c8e7fa4-9381-1f30-bb65-17b28df9b3dd@huaweicloud.com>
Date: Mon, 4 Sep 2023 10:54:45 +0800
From: Kemeng Shi <shikemeng@...weicloud.com>
To: Ritesh Harjani <ritesh.list@...il.com>, tytso@....edu,
adilger.kernel@...ger.ca, linux-ext4@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v6 04/11] ext4: call ext4_mb_mark_context in
ext4_mb_mark_diskspace_used
on 9/1/2023 11:51 AM, Ritesh Harjani wrote:
> Kemeng Shi <shikemeng@...weicloud.com> writes:
>
>> call ext4_mb_mark_context in ext4_mb_mark_diskspace_used to:
>> 1. remove repeat code to normally update bitmap and group descriptor
>> on disk.
>> 2. call ext4_mb_mark_context instead of only setting bits in block bitmap
>> to fix the bitmap. Function ext4_mb_mark_context will also update
>> checksum of bitmap and other counter along with the bit change to keep
>> the cosistent with bit change or block bitmap will be marked corrupted as
>> checksum of bitmap is in inconsistent state.
>>
>> Signed-off-by: Kemeng Shi <shikemeng@...weicloud.com>
>> Reviewed-by: Ojaswin Mujoo <ojaswin@...ux.ibm.com>
>> ---
>> fs/ext4/mballoc.c | 86 +++++++++++------------------------------------
>> 1 file changed, 20 insertions(+), 66 deletions(-)
>
> I was wondering whether checking for !ext4_inode_block_valid() can also
> be part of ext4_mb_mark_context() by passing EXT4_MB_METABLOCKS_VALID_CHECK
> flag.
Looks great to me. Thanks for the suggestion, I will do this in next version.
>
> But as for this patch. It looks good to me. Please feel free to add -
>
> Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@...il.com>
>
> -ritesh
>
>>
>> diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
>> index b066ee018cdb..e650eac22237 100644
>> --- a/fs/ext4/mballoc.c
>> +++ b/fs/ext4/mballoc.c
>> @@ -4084,46 +4084,28 @@ static noinline_for_stack int
>> ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
>> handle_t *handle, unsigned int reserv_clstrs)
>> {
>> - struct buffer_head *bitmap_bh = NULL;
>> + struct ext4_mark_context mc;
>> struct ext4_group_desc *gdp;
>> - struct buffer_head *gdp_bh;
>> struct ext4_sb_info *sbi;
>> struct super_block *sb;
>> ext4_fsblk_t block;
>> int err, len;
>> + int flags = 0;
>>
>> BUG_ON(ac->ac_status != AC_STATUS_FOUND);
>> BUG_ON(ac->ac_b_ex.fe_len <= 0);
>>
>> sb = ac->ac_sb;
>> sbi = EXT4_SB(sb);
>> + ext4_mb_prepare_mark_context(&mc, handle, sb, 1);
>>
>> - bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
>> - if (IS_ERR(bitmap_bh)) {
>> - return PTR_ERR(bitmap_bh);
>> - }
>> -
>> - BUFFER_TRACE(bitmap_bh, "getting write access");
>> - err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
>> - EXT4_JTR_NONE);
>> - if (err)
>> - goto out_err;
>> -
>> - err = -EIO;
>> - gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
>> + gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, NULL);
>> if (!gdp)
>> - goto out_err;
>> -
>> + return -EIO;
>> ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
>> ext4_free_group_clusters(sb, gdp));
>>
>> - BUFFER_TRACE(gdp_bh, "get_write_access");
>> - err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE);
>> - if (err)
>> - goto out_err;
>> -
>> block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
>> -
>> len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
>> if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
>> ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
>> @@ -4132,41 +4114,28 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
>> * Fix the bitmap and return EFSCORRUPTED
>> * We leak some of the blocks here.
>> */
>> - ext4_lock_group(sb, ac->ac_b_ex.fe_group);
>> - mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
>> - ac->ac_b_ex.fe_len);
>> - ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
>> - err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
>> + err = ext4_mb_mark_context(&mc, ac->ac_b_ex.fe_group,
>> + ac->ac_b_ex.fe_start,
>> + ac->ac_b_ex.fe_len,
>> + 0);
>> if (!err)
>> err = -EFSCORRUPTED;
>> - goto out_err;
>> + return err;
>> }
>>
>> - ext4_lock_group(sb, ac->ac_b_ex.fe_group);
>> #ifdef AGGRESSIVE_CHECK
>> - {
>> - int i;
>> - for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
>> - BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
>> - bitmap_bh->b_data));
>> - }
>> - }
>> + flags |= EXT4_MB_BITMAP_MARKED_CHECK;
>> #endif
>> - mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
>> - ac->ac_b_ex.fe_len);
>> - if (ext4_has_group_desc_csum(sb) &&
>> - (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
>> - gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
>> - ext4_free_group_clusters_set(sb, gdp,
>> - ext4_free_clusters_after_init(sb,
>> - ac->ac_b_ex.fe_group, gdp));
>> - }
>> - len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
>> - ext4_free_group_clusters_set(sb, gdp, len);
>> - ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
>> - ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
>> + err = ext4_mb_mark_context(&mc, ac->ac_b_ex.fe_group,
>> + ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len,
>> + flags);
>> +
>> + if (err && mc.changed == 0)
>> + return err;
>>
>> - ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
>> +#ifdef AGGRESSIVE_CHECK
>> + BUG_ON(mc.changed != ac->ac_b_ex.fe_len);
>> +#endif
>> percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
>> /*
>> * Now reduce the dirty block count also. Should not go negative
>> @@ -4176,21 +4145,6 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
>> percpu_counter_sub(&sbi->s_dirtyclusters_counter,
>> reserv_clstrs);
>>
>> - if (sbi->s_log_groups_per_flex) {
>> - ext4_group_t flex_group = ext4_flex_group(sbi,
>> - ac->ac_b_ex.fe_group);
>> - atomic64_sub(ac->ac_b_ex.fe_len,
>> - &sbi_array_rcu_deref(sbi, s_flex_groups,
>> - flex_group)->free_clusters);
>> - }
>> -
>> - err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
>> - if (err)
>> - goto out_err;
>> - err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
>> -
>> -out_err:
>> - brelse(bitmap_bh);
>> return err;
>> }
>>
>> --
>> 2.30.0
>
Powered by blists - more mailing lists