[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240319011102.2929635-9-yi.zhang@huaweicloud.com>
Date: Tue, 19 Mar 2024 09:11:01 +0800
From: Zhang Yi <yi.zhang@...weicloud.com>
To: linux-xfs@...r.kernel.org,
linux-fsdevel@...r.kernel.org
Cc: linux-kernel@...r.kernel.org,
djwong@...nel.org,
hch@...radead.org,
brauner@...nel.org,
david@...morbit.com,
tytso@....edu,
jack@...e.cz,
yi.zhang@...wei.com,
yi.zhang@...weicloud.com,
chengzhihao1@...wei.com,
yukuai3@...wei.com
Subject: [PATCH v3 8/9] iomap: make iomap_write_end() return a boolean
From: Zhang Yi <yi.zhang@...wei.com>
For now, we can make sure iomap_write_end() always return 0 or copied
bytes, so instead of return written bytes, convert to return a boolean
to indicate the copied bytes have been written to the pagecache.
Signed-off-by: Zhang Yi <yi.zhang@...wei.com>
Reviewed-by: Christoph Hellwig <hch@....de>
---
fs/iomap/buffered-io.c | 50 +++++++++++++++++++++++++++---------------
1 file changed, 32 insertions(+), 18 deletions(-)
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 291648c61a32..004673ea8bc1 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -790,7 +790,7 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
return status;
}
-static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
+static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
size_t copied, struct folio *folio)
{
flush_dcache_folio(folio);
@@ -807,14 +807,14 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
* redo the whole thing.
*/
if (unlikely(copied < len && !folio_test_uptodate(folio)))
- return 0;
+ return false;
iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
filemap_dirty_folio(inode->i_mapping, folio);
- return copied;
+ return true;
}
-static size_t iomap_write_end_inline(const struct iomap_iter *iter,
+static void iomap_write_end_inline(const struct iomap_iter *iter,
struct folio *folio, loff_t pos, size_t copied)
{
const struct iomap *iomap = &iter->iomap;
@@ -829,21 +829,32 @@ static size_t iomap_write_end_inline(const struct iomap_iter *iter,
kunmap_local(addr);
mark_inode_dirty(iter->inode);
- return copied;
}
-/* Returns the number of bytes copied. May be 0. Cannot be an errno. */
-static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
+/*
+ * Returns true if all copied bytes have been written to the pagecache,
+ * otherwise return false.
+ */
+static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
size_t copied, struct folio *folio)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
+ bool ret = true;
- if (srcmap->type == IOMAP_INLINE)
- return iomap_write_end_inline(iter, folio, pos, copied);
- if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
- return block_write_end(NULL, iter->inode->i_mapping, pos, len,
- copied, &folio->page, NULL);
- return __iomap_write_end(iter->inode, pos, len, copied, folio);
+ if (srcmap->type == IOMAP_INLINE) {
+ iomap_write_end_inline(iter, folio, pos, copied);
+ } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
+ size_t bh_written;
+
+ bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
+ len, copied, &folio->page, NULL);
+ WARN_ON_ONCE(bh_written != copied && bh_written != 0);
+ ret = bh_written == copied;
+ } else {
+ ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
+ }
+
+ return ret;
}
static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
@@ -907,7 +918,8 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
flush_dcache_folio(folio);
copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
- written = iomap_write_end(iter, pos, bytes, copied, folio);
+ written = iomap_write_end(iter, pos, bytes, copied, folio) ?
+ copied : 0;
/*
* Update the in-memory inode size after copying the data into
@@ -1285,6 +1297,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
int status;
size_t offset;
size_t bytes = min_t(u64, SIZE_MAX, length);
+ bool ret;
status = iomap_write_begin(iter, pos, bytes, &folio);
if (unlikely(status))
@@ -1296,9 +1309,9 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
if (bytes > folio_size(folio) - offset)
bytes = folio_size(folio) - offset;
- bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
+ ret = iomap_write_end(iter, pos, bytes, bytes, folio);
__iomap_put_folio(iter, pos, bytes, folio);
- if (WARN_ON_ONCE(bytes == 0))
+ if (WARN_ON_ONCE(!ret))
return -EIO;
cond_resched();
@@ -1347,6 +1360,7 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
int status;
size_t offset;
size_t bytes = min_t(u64, SIZE_MAX, length);
+ bool ret;
status = iomap_write_begin(iter, pos, bytes, &folio);
if (status)
@@ -1361,9 +1375,9 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
folio_zero_range(folio, offset, bytes);
folio_mark_accessed(folio);
- bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
+ ret = iomap_write_end(iter, pos, bytes, bytes, folio);
__iomap_put_folio(iter, pos, bytes, folio);
- if (WARN_ON_ONCE(bytes == 0))
+ if (WARN_ON_ONCE(!ret))
return -EIO;
pos += bytes;
--
2.39.2
Powered by blists - more mailing lists