[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZGCq2yKNyp5VMUM4@li-bb2b2a4c-3307-11b2-a85c-8fa5c3a69313.ibm.com>
Date: Sun, 14 May 2023 15:03:15 +0530
From: Ojaswin Mujoo <ojaswin@...ux.ibm.com>
To: Kemeng Shi <shikemeng@...weicloud.com>
Cc: tytso@....edu, adilger.kernel@...ger.ca,
linux-ext4@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v3 14/19] ext4: extent ext4_mb_mark_group_bb to support
allocation under journal
On Mon, Apr 17, 2023 at 07:06:12PM +0800, Kemeng Shi wrote:
> Previously, ext4_mb_mark_group_bb is only called under fast commit
> replay path, so there is no valid handle when we update block bitmap
> and group descriptor. This patch try to extent ext4_mb_mark_group_bb
> to be used by code under journal. There are several improves:
> 1. add "handle_t *handle" to struct ext4_mark_context to accept handle
> to journal block bitmap and group descriptor update inside
> ext4_mb_mark_group_bb (the added journal caode is based on journal
> code in ext4_mb_mark_diskspace_used where ext4_mb_mark_group_bb
> is going to be used.)
> 2. add EXT4_MB_BITMAP_MARKED_CHECK flag to control check if bits in block
> bitmap are already marked as allocation code under journal asserts that
> all bits to be changed are not marked before.
> 3. add "ext4_grpblk_t changed" to struct ext4_mark_context to notify number
> of bits in block bitmap has changed.
>
> Signed-off-by: Kemeng Shi <shikemeng@...weicloud.com>
Looks good, feel free to add:
Reviewed-by: Ojaswin Mujoo <ojaswin@...ux.ibm.com>
Regards,
ojaswin
> ---
> fs/ext4/mballoc.c | 59 ++++++++++++++++++++++++++++++++++++-----------
> 1 file changed, 45 insertions(+), 14 deletions(-)
>
> diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
> index 623508115d98..c3e620f6eded 100644
> --- a/fs/ext4/mballoc.c
> +++ b/fs/ext4/mballoc.c
> @@ -3730,32 +3730,54 @@ void ext4_exit_mballoc(void)
> ext4_groupinfo_destroy_slabs();
> }
>
> +#define EXT4_MB_BITMAP_MARKED_CHECK 0x0001
> +#define EXT4_MB_SYNC_UPDATE 0x0002
> struct ext4_mark_context {
> + handle_t *handle;
> struct super_block *sb;
> int state;
> + ext4_grpblk_t changed;
> };
>
> static int
> ext4_mb_mark_group_bb(struct ext4_mark_context *mc, ext4_group_t group,
> - ext4_grpblk_t blkoff, ext4_grpblk_t len)
> + ext4_grpblk_t blkoff, ext4_grpblk_t len, int flags)
> {
> + handle_t *handle = mc->handle;
> struct super_block *sb = mc->sb;
> struct ext4_sb_info *sbi = EXT4_SB(sb);
> struct buffer_head *bitmap_bh = NULL;
> struct ext4_group_desc *gdp;
> struct buffer_head *gdp_bh;
> int err;
> - unsigned int i, already, changed;
> + unsigned int i, already, changed = len;
>
> + mc->changed = 0;
> bitmap_bh = ext4_read_block_bitmap(sb, group);
> if (IS_ERR(bitmap_bh))
> return PTR_ERR(bitmap_bh);
>
> + if (handle) {
> + BUFFER_TRACE(bitmap_bh, "getting write access");
> + err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
> + EXT4_JTR_NONE);
> + if (err)
> + goto out_err;
> + }
> +
> err = -EIO;
> gdp = ext4_get_group_desc(sb, group, &gdp_bh);
> if (!gdp)
> goto out_err;
>
> + if (handle) {
> + BUFFER_TRACE(gdp_bh, "get_write_access");
> + err = ext4_journal_get_write_access(handle, sb, gdp_bh,
> + EXT4_JTR_NONE);
> + if (err)
> + goto out_err;
> + }
> +
> ext4_lock_group(sb, group);
> if (ext4_has_group_desc_csum(sb) &&
> (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
> @@ -3764,12 +3786,14 @@ ext4_mb_mark_group_bb(struct ext4_mark_context *mc, ext4_group_t group,
> ext4_free_clusters_after_init(sb, group, gdp));
> }
>
> - already = 0;
> - for (i = 0; i < len; i++)
> - if (mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
> - mc->state)
> - already++;
> - changed = len - already;
> + if (flags & EXT4_MB_BITMAP_MARKED_CHECK) {
> + already = 0;
> + for (i = 0; i < len; i++)
> + if (mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
> + mc->state)
> + already++;
> + changed = len - already;
> + }
>
> if (mc->state) {
> mb_set_bits(bitmap_bh->b_data, blkoff, len);
> @@ -3784,6 +3808,7 @@ ext4_mb_mark_group_bb(struct ext4_mark_context *mc, ext4_group_t group,
> ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
> ext4_group_desc_csum_set(sb, group, gdp);
> ext4_unlock_group(sb, group);
> + mc->changed = changed;
>
> if (sbi->s_log_groups_per_flex) {
> ext4_group_t flex_group = ext4_flex_group(sbi, group);
> @@ -3796,15 +3821,17 @@ ext4_mb_mark_group_bb(struct ext4_mark_context *mc, ext4_group_t group,
> atomic64_add(changed, &fg->free_clusters);
> }
>
> - err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
> + err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
> if (err)
> goto out_err;
> - err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
> + err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
> if (err)
> goto out_err;
>
> - sync_dirty_buffer(bitmap_bh);
> - sync_dirty_buffer(gdp_bh);
> + if (flags & EXT4_MB_SYNC_UPDATE) {
> + sync_dirty_buffer(bitmap_bh);
> + sync_dirty_buffer(gdp_bh);
> + }
>
> out_err:
> brelse(bitmap_bh);
> @@ -3968,7 +3995,9 @@ void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
> break;
> }
>
> - err = ext4_mb_mark_group_bb(&mc, group, blkoff, clen);
> + err = ext4_mb_mark_group_bb(&mc, group, blkoff, clen,
> + EXT4_MB_BITMAP_MARKED_CHECK |
> + EXT4_MB_SYNC_UPDATE);
> if (err)
> break;
>
> @@ -6072,7 +6101,9 @@ static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
> ext4_grpblk_t blkoff;
>
> ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
> - ext4_mb_mark_group_bb(&mc, group, blkoff, count);
> + ext4_mb_mark_group_bb(&mc, group, blkoff, count,
> + EXT4_MB_BITMAP_MARKED_CHECK |
> + EXT4_MB_SYNC_UPDATE);
> }
>
> /**
> --
> 2.30.0
>
Powered by blists - more mailing lists