[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <74ab3c3e-3daf-5374-75e5-bcb25ffdb527@huaweicloud.com>
Date: Sat, 17 Feb 2024 16:55:51 +0800
From: Zhang Yi <yi.zhang@...weicloud.com>
To: Christoph Hellwig <hch@...radead.org>
Cc: linux-ext4@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org, linux-kernel@...r.kernel.org, tytso@....edu,
adilger.kernel@...ger.ca, jack@...e.cz, ritesh.list@...il.com,
djwong@...nel.org, willy@...radead.org, zokeefe@...gle.com,
yi.zhang@...wei.com, chengzhihao1@...wei.com, yukuai3@...wei.com,
wangkefeng.wang@...wei.com
Subject: Re: [RFC PATCH v3 07/26] iomap: don't increase i_size if it's not a
write operation
On 2024/2/13 13:46, Christoph Hellwig wrote:
> Wouldn't it make more sense to just move the size manipulation to the
> write-only code? An untested version of that is below. With this
Sorry for the late reply and thanks for your suggestion, The reason why
I introduced this new helper iomap_write_end_simple() is I don't want to
open code __iomap_put_folio() in each caller since corresponding to
iomap_write_begin(), it's the responsibility for iomap_write_end_*() to
put and unlock folio, so I'd like to keep it in iomap_write_end_*().
But I don't feel strongly about it, it's also fine by me to just move
the size manipulation to the write-only code if you think it's better.
> the naming of the status variable becomes even more confusing than
> it already is, maybe we need to do a cleanup of the *_write_end
> calling conventions as it always returns the passed in copied value
> or 0.
Indeed, it becomes more confused and deserve a cleanup.
Thanks,
Yi.
>
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index 3dab060aed6d7b..8401a9ca702fc0 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -876,34 +876,13 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
> size_t copied, struct folio *folio)
> {
> const struct iomap *srcmap = iomap_iter_srcmap(iter);
> - loff_t old_size = iter->inode->i_size;
> - size_t ret;
> -
> - if (srcmap->type == IOMAP_INLINE) {
> - ret = iomap_write_end_inline(iter, folio, pos, copied);
> - } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
> - ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
> - copied, &folio->page, NULL);
> - } else {
> - ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
> - }
> -
> - /*
> - * Update the in-memory inode size after copying the data into the page
> - * cache. It's up to the file system to write the updated size to disk,
> - * preferably after I/O completion so that no stale data is exposed.
> - */
> - if (pos + ret > old_size) {
> - i_size_write(iter->inode, pos + ret);
> - iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
> - }
> - __iomap_put_folio(iter, pos, ret, folio);
>
> - if (old_size < pos)
> - pagecache_isize_extended(iter->inode, old_size, pos);
> - if (ret < len)
> - iomap_write_failed(iter->inode, pos + ret, len - ret);
> - return ret;
> + if (srcmap->type == IOMAP_INLINE)
> + return iomap_write_end_inline(iter, folio, pos, copied);
> + if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
> + return block_write_end(NULL, iter->inode->i_mapping, pos, len,
> + copied, &folio->page, NULL);
> + return __iomap_write_end(iter->inode, pos, len, copied, folio);
> }
>
> static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
> @@ -918,6 +897,7 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
>
> do {
> struct folio *folio;
> + loff_t old_size;
> size_t offset; /* Offset into folio */
> size_t bytes; /* Bytes to write to folio */
> size_t copied; /* Bytes copied from user */
> @@ -964,7 +944,24 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
>
> copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
> status = iomap_write_end(iter, pos, bytes, copied, folio);
> + /*
> + * Update the in-memory inode size after copying the data into
> + * the page cache. It's up to the file system to write the
> + * updated size to disk, preferably after I/O completion so that
> + * no stale data is exposed.
> + */
> + old_size = iter->inode->i_size;
> + if (pos + status > old_size) {
> + i_size_write(iter->inode, pos + status);
> + iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
> + }
> + __iomap_put_folio(iter, pos, status, folio);
>
> + if (old_size < pos)
> + pagecache_isize_extended(iter->inode, old_size, pos);
> + if (status < bytes)
> + iomap_write_failed(iter->inode, pos + status,
> + bytes - status);
> if (unlikely(copied != status))
> iov_iter_revert(i, copied - status);
>
> @@ -1334,6 +1331,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
> bytes = folio_size(folio) - offset;
>
> bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
> + __iomap_put_folio(iter, pos, bytes, folio);
> if (WARN_ON_ONCE(bytes == 0))
> return -EIO;
>
> @@ -1398,6 +1396,7 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
> folio_mark_accessed(folio);
>
> bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
> + __iomap_put_folio(iter, pos, bytes, folio);
> if (WARN_ON_ONCE(bytes == 0))
> return -EIO;
>
>
Powered by blists - more mailing lists