[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240127015825.1608160-8-yi.zhang@huaweicloud.com>
Date: Sat, 27 Jan 2024 09:58:06 +0800
From: Zhang Yi <yi.zhang@...weicloud.com>
To: linux-ext4@...r.kernel.org
Cc: linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
tytso@....edu,
adilger.kernel@...ger.ca,
jack@...e.cz,
ritesh.list@...il.com,
hch@...radead.org,
djwong@...nel.org,
willy@...radead.org,
zokeefe@...gle.com,
yi.zhang@...wei.com,
yi.zhang@...weicloud.com,
chengzhihao1@...wei.com,
yukuai3@...wei.com,
wangkefeng.wang@...wei.com
Subject: [RFC PATCH v3 07/26] iomap: don't increase i_size if it's not a write operation
From: Zhang Yi <yi.zhang@...wei.com>
Increase i_size in iomap_zero_range() and iomap_unshare_iter() is not
needed, the caller should handle it. Especially, when truncate partial
block, we could not increase i_size beyond the new EOF here. It dosn't
affect xfs and gfs2 now because they reset the new file size after zero
out, it doesn't matter that a brief increase in i_size, but it will
affect ext4 because it set file size before truncate. At the same time,
iomap_write_failed() is also not needed for above two cases too, so
let's introduce a new helper iomap_write_end_simple() to replace the
common iomap_write_end() helper which designed for buffer write, and
also move out iomap_write_failed() from iomap_write_begin() to
iomap_write_iter().
Signed-off-by: Zhang Yi <yi.zhang@...wei.com>
---
fs/iomap/buffered-io.c | 28 ++++++++++++++++++++++++----
1 file changed, 24 insertions(+), 4 deletions(-)
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index e0c9cede82ee..2ae936e5af74 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -834,7 +834,6 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
out_unlock:
__iomap_put_folio(iter, pos, 0, folio);
- iomap_write_failed(iter->inode, pos, len);
return status;
}
@@ -881,6 +880,25 @@ static size_t iomap_write_end_inline(const struct iomap_iter *iter,
return copied;
}
+static size_t iomap_write_end_simple(struct iomap_iter *iter, loff_t pos,
+ size_t len, struct folio *folio)
+{
+ const struct iomap *srcmap = iomap_iter_srcmap(iter);
+ size_t ret;
+
+ if (srcmap->type == IOMAP_INLINE) {
+ ret = iomap_write_end_inline(iter, folio, pos, len);
+ } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
+ ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
+ len, &folio->page, NULL);
+ } else {
+ ret = __iomap_write_end(iter->inode, pos, len, len, folio);
+ }
+
+ __iomap_put_folio(iter, pos, ret, folio);
+ return ret;
+}
+
/* Returns the number of bytes copied. May be 0. Cannot be an errno. */
static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
size_t copied, struct folio *folio)
@@ -960,8 +978,10 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
}
status = iomap_write_begin(iter, pos, bytes, &folio);
- if (unlikely(status))
+ if (unlikely(status)) {
+ iomap_write_failed(iter->inode, pos, bytes);
break;
+ }
if (iter->iomap.flags & IOMAP_F_STALE)
break;
@@ -1343,7 +1363,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
if (bytes > folio_size(folio) - offset)
bytes = folio_size(folio) - offset;
- bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
+ bytes = iomap_write_end_simple(iter, pos, bytes, folio);
if (WARN_ON_ONCE(bytes == 0))
return -EIO;
@@ -1407,7 +1427,7 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
folio_zero_range(folio, offset, bytes);
folio_mark_accessed(folio);
- bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
+ bytes = iomap_write_end_simple(iter, pos, bytes, folio);
if (WARN_ON_ONCE(bytes == 0))
return -EIO;
--
2.39.2
Powered by blists - more mailing lists