[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Z2PHRaj+S5M+fZ5U@li-bb2b2a4c-3307-11b2-a85c-8fa5c3a69313.ibm.com>
Date: Thu, 19 Dec 2024 12:42:05 +0530
From: Ojaswin Mujoo <ojaswin@...ux.ibm.com>
To: Zhang Yi <yi.zhang@...weicloud.com>
Cc: linux-ext4@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org, tytso@....edu, adilger.kernel@...ger.ca,
jack@...e.cz, yi.zhang@...wei.com, chengzhihao1@...wei.com,
yukuai3@...wei.com, yangerkun@...wei.com
Subject: Re: [PATCH v4 05/10] ext4: refactor ext4_zero_range()
On Mon, Dec 16, 2024 at 09:39:10AM +0800, Zhang Yi wrote:
> From: Zhang Yi <yi.zhang@...wei.com>
>
> The current implementation of ext4_zero_range() contains complex
> position calculations and stale error tags. To improve the code's
> clarity and maintainability, it is essential to clean up the code and
> improve its readability, this can be achieved by: a) simplifying and
> renaming variables, making the style the same as ext4_punch_hole(); b)
> eliminating unnecessary position calculations, writing back all data in
> data=journal mode, and drop page cache from the original offset to the
> end, rather than using aligned blocks; c) renaming the stale out_mutex
> tags.
>
> Signed-off-by: Zhang Yi <yi.zhang@...wei.com>
Looks good Zhang, feel free to add:
Reviewed-by: Ojaswin Mujoo <ojaswin@...ux.ibm.com>
Regards,
ojaswin
> ---
> fs/ext4/extents.c | 142 +++++++++++++++++++---------------------------
> 1 file changed, 57 insertions(+), 85 deletions(-)
>
> diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
> index 7fb38aab241d..97ad6fea58d3 100644
> --- a/fs/ext4/extents.c
> +++ b/fs/ext4/extents.c
> @@ -4570,40 +4570,15 @@ static long ext4_zero_range(struct file *file, loff_t offset,
> struct inode *inode = file_inode(file);
> struct address_space *mapping = file->f_mapping;
> handle_t *handle = NULL;
> - unsigned int max_blocks;
> loff_t new_size = 0;
> - int ret = 0;
> - int flags;
> - int credits;
> - int partial_begin, partial_end;
> - loff_t start, end;
> - ext4_lblk_t lblk;
> + loff_t end = offset + len;
> + ext4_lblk_t start_lblk, end_lblk;
> + unsigned int blocksize = i_blocksize(inode);
> unsigned int blkbits = inode->i_blkbits;
> + int ret, flags, credits;
>
> trace_ext4_zero_range(inode, offset, len, mode);
>
> - /*
> - * Round up offset. This is not fallocate, we need to zero out
> - * blocks, so convert interior block aligned part of the range to
> - * unwritten and possibly manually zero out unaligned parts of the
> - * range. Here, start and partial_begin are inclusive, end and
> - * partial_end are exclusive.
> - */
> - start = round_up(offset, 1 << blkbits);
> - end = round_down((offset + len), 1 << blkbits);
> -
> - if (start < offset || end > offset + len)
> - return -EINVAL;
> - partial_begin = offset & ((1 << blkbits) - 1);
> - partial_end = (offset + len) & ((1 << blkbits) - 1);
> -
> - lblk = start >> blkbits;
> - max_blocks = (end >> blkbits);
> - if (max_blocks < lblk)
> - max_blocks = 0;
> - else
> - max_blocks -= lblk;
> -
> inode_lock(inode);
>
> /*
> @@ -4611,77 +4586,70 @@ static long ext4_zero_range(struct file *file, loff_t offset,
> */
> if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
> ret = -EOPNOTSUPP;
> - goto out_mutex;
> + goto out;
> }
>
> if (!(mode & FALLOC_FL_KEEP_SIZE) &&
> - (offset + len > inode->i_size ||
> - offset + len > EXT4_I(inode)->i_disksize)) {
> - new_size = offset + len;
> + (end > inode->i_size || end > EXT4_I(inode)->i_disksize)) {
> + new_size = end;
> ret = inode_newsize_ok(inode, new_size);
> if (ret)
> - goto out_mutex;
> + goto out;
> }
>
> - flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
> -
> /* Wait all existing dio workers, newcomers will block on i_rwsem */
> inode_dio_wait(inode);
>
> ret = file_modified(file);
> if (ret)
> - goto out_mutex;
> -
> - /* Preallocate the range including the unaligned edges */
> - if (partial_begin || partial_end) {
> - ret = ext4_alloc_file_blocks(file,
> - round_down(offset, 1 << blkbits) >> blkbits,
> - (round_up((offset + len), 1 << blkbits) -
> - round_down(offset, 1 << blkbits)) >> blkbits,
> - new_size, flags);
> - if (ret)
> - goto out_mutex;
> + goto out;
>
> - }
> + /*
> + * Prevent page faults from reinstantiating pages we have released
> + * from page cache.
> + */
> + filemap_invalidate_lock(mapping);
>
> - /* Zero range excluding the unaligned edges */
> - if (max_blocks > 0) {
> - flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
> - EXT4_EX_NOCACHE);
> + ret = ext4_break_layouts(inode);
> + if (ret)
> + goto out_invalidate_lock;
>
> - /*
> - * Prevent page faults from reinstantiating pages we have
> - * released from page cache.
> - */
> - filemap_invalidate_lock(mapping);
> + flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
> + /* Preallocate the range including the unaligned edges */
> + if (!IS_ALIGNED(offset | end, blocksize)) {
> + ext4_lblk_t alloc_lblk = offset >> blkbits;
> + ext4_lblk_t len_lblk = EXT4_MAX_BLOCKS(len, offset, blkbits);
>
> - ret = ext4_break_layouts(inode);
> - if (ret) {
> - filemap_invalidate_unlock(mapping);
> - goto out_mutex;
> - }
> + ret = ext4_alloc_file_blocks(file, alloc_lblk, len_lblk,
> + new_size, flags);
> + if (ret)
> + goto out_invalidate_lock;
> + }
>
> - ret = ext4_update_disksize_before_punch(inode, offset, len);
> - if (ret) {
> - filemap_invalidate_unlock(mapping);
> - goto out_mutex;
> - }
> + ret = ext4_update_disksize_before_punch(inode, offset, len);
> + if (ret)
> + goto out_invalidate_lock;
>
> - /* Now release the pages and zero block aligned part of pages */
> - ret = ext4_truncate_page_cache_block_range(inode, start, end);
> - if (ret) {
> - filemap_invalidate_unlock(mapping);
> - goto out_mutex;
> - }
> + /* Now release the pages and zero block aligned part of pages */
> + ret = ext4_truncate_page_cache_block_range(inode, offset, end);
> + if (ret)
> + goto out_invalidate_lock;
>
> - ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
> - flags);
> - filemap_invalidate_unlock(mapping);
> + /* Zero range excluding the unaligned edges */
> + start_lblk = EXT4_B_TO_LBLK(inode, offset);
> + end_lblk = end >> blkbits;
> + if (end_lblk > start_lblk) {
> + ext4_lblk_t zero_blks = end_lblk - start_lblk;
> +
> + flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | EXT4_EX_NOCACHE);
> + ret = ext4_alloc_file_blocks(file, start_lblk, zero_blks,
> + new_size, flags);
> if (ret)
> - goto out_mutex;
> + goto out_invalidate_lock;
> }
> - if (!partial_begin && !partial_end)
> - goto out_mutex;
> + /* Finish zeroing out if it doesn't contain partial block */
> + if (IS_ALIGNED(offset | end, blocksize))
> + goto out_invalidate_lock;
>
> /*
> * In worst case we have to writeout two nonadjacent unwritten
> @@ -4694,25 +4662,29 @@ static long ext4_zero_range(struct file *file, loff_t offset,
> if (IS_ERR(handle)) {
> ret = PTR_ERR(handle);
> ext4_std_error(inode->i_sb, ret);
> - goto out_mutex;
> + goto out_invalidate_lock;
> }
>
> + /* Zero out partial block at the edges of the range */
> + ret = ext4_zero_partial_blocks(handle, inode, offset, len);
> + if (ret)
> + goto out_handle;
> +
> if (new_size)
> ext4_update_inode_size(inode, new_size);
> ret = ext4_mark_inode_dirty(handle, inode);
> if (unlikely(ret))
> goto out_handle;
> - /* Zero out partial block at the edges of the range */
> - ret = ext4_zero_partial_blocks(handle, inode, offset, len);
> - if (ret >= 0)
> - ext4_update_inode_fsync_trans(handle, inode, 1);
>
> + ext4_update_inode_fsync_trans(handle, inode, 1);
> if (file->f_flags & O_SYNC)
> ext4_handle_sync(handle);
>
> out_handle:
> ext4_journal_stop(handle);
> -out_mutex:
> +out_invalidate_lock:
> + filemap_invalidate_unlock(mapping);
> +out:
> inode_unlock(inode);
> return ret;
> }
> --
> 2.46.1
>
Powered by blists - more mailing lists