[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230826155028.4019470-4-shikemeng@huaweicloud.com>
Date: Sat, 26 Aug 2023 23:50:20 +0800
From: Kemeng Shi <shikemeng@...weicloud.com>
To: tytso@....edu, adilger.kernel@...ger.ca, ritesh.list@...il.com,
linux-ext4@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH v6 03/11] ext4: extent ext4_mb_mark_context to support allocation under journal
Previously, ext4_mb_mark_context is only called under fast commit
replay path, so there is no valid handle when we update block bitmap
and group descriptor. This patch try to extent ext4_mb_mark_context
to be used by code under journal. There are several improves:
1. add "handle_t *handle" to struct ext4_mark_context to accept handle
to journal block bitmap and group descriptor update inside
ext4_mb_mark_context (the added journal caode is based on journal
code in ext4_mb_mark_diskspace_used where ext4_mb_mark_context
is going to be used.)
2. add EXT4_MB_BITMAP_MARKED_CHECK flag to control check if bits in block
bitmap are already marked as allocation code under journal asserts that
all bits to be changed are not marked before.
3. add "ext4_grpblk_t changed" to struct ext4_mark_context to notify number
of bits in block bitmap has changed.
Signed-off-by: Kemeng Shi <shikemeng@...weicloud.com>
Reviewed-by: Ojaswin Mujoo <ojaswin@...ux.ibm.com>
---
fs/ext4/mballoc.c | 65 +++++++++++++++++++++++++++++++++++------------
1 file changed, 49 insertions(+), 16 deletions(-)
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index c803f74aaf63..b066ee018cdb 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3952,6 +3952,8 @@ void ext4_exit_mballoc(void)
ext4_groupinfo_destroy_slabs();
}
+#define EXT4_MB_BITMAP_MARKED_CHECK 0x0001
+#define EXT4_MB_SYNC_UPDATE 0x0002
/*
* Collect global setting to reduce the number of variable passing to
* ext4_mb_mark_context. Pass target group blocks range directly to
@@ -3959,39 +3961,61 @@ void ext4_exit_mballoc(void)
* to show clearly the specific block range will be marked.
*/
struct ext4_mark_context {
+ handle_t *handle;
struct super_block *sb;
int state;
+ ext4_grpblk_t changed;
};
static inline void ext4_mb_prepare_mark_context(struct ext4_mark_context *mc,
+ handle_t *handle,
struct super_block *sb,
int state)
{
+ mc->handle = handle;
mc->sb = sb;
mc->state = state;
}
static int
ext4_mb_mark_context(struct ext4_mark_context *mc, ext4_group_t group,
- ext4_grpblk_t blkoff, ext4_grpblk_t len)
+ ext4_grpblk_t blkoff, ext4_grpblk_t len, int flags)
{
+ handle_t *handle = mc->handle;
struct super_block *sb = mc->sb;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct buffer_head *bitmap_bh = NULL;
struct ext4_group_desc *gdp;
struct buffer_head *gdp_bh;
int err;
- unsigned int i, already, changed;
+ unsigned int i, already, changed = len;
+ mc->changed = 0;
bitmap_bh = ext4_read_block_bitmap(sb, group);
if (IS_ERR(bitmap_bh))
return PTR_ERR(bitmap_bh);
+ if (handle) {
+ BUFFER_TRACE(bitmap_bh, "getting write access");
+ err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
+ EXT4_JTR_NONE);
+ if (err)
+ goto out_err;
+ }
+
err = -EIO;
gdp = ext4_get_group_desc(sb, group, &gdp_bh);
if (!gdp)
goto out_err;
+ if (handle) {
+ BUFFER_TRACE(gdp_bh, "get_write_access");
+ err = ext4_journal_get_write_access(handle, sb, gdp_bh,
+ EXT4_JTR_NONE);
+ if (err)
+ goto out_err;
+ }
+
ext4_lock_group(sb, group);
if (ext4_has_group_desc_csum(sb) &&
(gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
@@ -4000,12 +4024,14 @@ ext4_mb_mark_context(struct ext4_mark_context *mc, ext4_group_t group,
ext4_free_clusters_after_init(sb, group, gdp));
}
- already = 0;
- for (i = 0; i < len; i++)
- if (mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
- mc->state)
- already++;
- changed = len - already;
+ if (flags & EXT4_MB_BITMAP_MARKED_CHECK) {
+ already = 0;
+ for (i = 0; i < len; i++)
+ if (mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
+ mc->state)
+ already++;
+ changed = len - already;
+ }
if (mc->state) {
mb_set_bits(bitmap_bh->b_data, blkoff, len);
@@ -4020,6 +4046,7 @@ ext4_mb_mark_context(struct ext4_mark_context *mc, ext4_group_t group,
ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
ext4_group_desc_csum_set(sb, group, gdp);
ext4_unlock_group(sb, group);
+ mc->changed = changed;
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, group);
@@ -4032,15 +4059,17 @@ ext4_mb_mark_context(struct ext4_mark_context *mc, ext4_group_t group,
atomic64_add(changed, &fg->free_clusters);
}
- err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
+ err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
if (err)
goto out_err;
- err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
+ err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
if (err)
goto out_err;
- sync_dirty_buffer(bitmap_bh);
- sync_dirty_buffer(gdp_bh);
+ if (flags & EXT4_MB_SYNC_UPDATE) {
+ sync_dirty_buffer(bitmap_bh);
+ sync_dirty_buffer(gdp_bh);
+ }
out_err:
brelse(bitmap_bh);
@@ -4179,7 +4208,7 @@ void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
int err = 0;
unsigned int clen, thisgrp_len;
- ext4_mb_prepare_mark_context(&mc, sb, state);
+ ext4_mb_prepare_mark_context(&mc, NULL, sb, state);
while (len > 0) {
ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
@@ -4202,7 +4231,9 @@ void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
break;
}
- err = ext4_mb_mark_context(&mc, group, blkoff, clen);
+ err = ext4_mb_mark_context(&mc, group, blkoff, clen,
+ EXT4_MB_BITMAP_MARKED_CHECK |
+ EXT4_MB_SYNC_UPDATE);
if (err)
break;
@@ -6419,9 +6450,11 @@ static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
ext4_group_t group;
ext4_grpblk_t blkoff;
- ext4_mb_prepare_mark_context(&mc, sb, 0);
+ ext4_mb_prepare_mark_context(&mc, NULL, sb, 0);
ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
- ext4_mb_mark_context(&mc, group, blkoff, count);
+ ext4_mb_mark_context(&mc, group, blkoff, count,
+ EXT4_MB_BITMAP_MARKED_CHECK |
+ EXT4_MB_SYNC_UPDATE);
}
/**
--
2.30.0
Powered by blists - more mailing lists