[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210721082323.41933-1-hsiangkao@linux.alibaba.com>
Date: Wed, 21 Jul 2021 16:23:23 +0800
From: Gao Xiang <hsiangkao@...ux.alibaba.com>
To: linux-erofs@...ts.ozlabs.org, linux-fsdevel@...r.kernel.org
Cc: LKML <linux-kernel@...r.kernel.org>,
Gao Xiang <hsiangkao@...ux.alibaba.com>,
Christoph Hellwig <hch@....de>,
"Darrick J . Wong" <djwong@...nel.org>,
Matthew Wilcox <willy@...radead.org>,
Andreas Gruenbacher <andreas.gruenbacher@...il.com>
Subject: [PATCH v5] iomap: support tail packing inline read
This tries to add tail packing inline read to iomap, which can support
several inline tail blocks. Similar to the previous approach, it cleans
post-EOF in one iteration.
The write path remains untouched since EROFS cannot be used for testing.
It'd be better to be implemented if upcoming real users care rather than
leave untested dead code around.
Cc: Christoph Hellwig <hch@....de>
Cc: Darrick J. Wong <djwong@...nel.org>
Cc: Matthew Wilcox <willy@...radead.org>
Cc: Andreas Gruenbacher <andreas.gruenbacher@...il.com>
Signed-off-by: Gao Xiang <hsiangkao@...ux.alibaba.com>
---
v4: https://lore.kernel.org/r/20210720133554.44058-1-hsiangkao@linux.alibaba.com
changes since v4:
- turn to WARN_ON_ONCE() suggested by Darrick;
- fix size to "min(iomap->length + iomap->offset - pos,
PAGE_SIZE - poff)"
fs/iomap/buffered-io.c | 58 +++++++++++++++++++++++++++---------------
fs/iomap/direct-io.c | 13 +++++++---
2 files changed, 47 insertions(+), 24 deletions(-)
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 87ccb3438bec..d8436d34a159 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -205,25 +205,27 @@ struct iomap_readpage_ctx {
struct readahead_control *rac;
};
-static void
+static int
iomap_read_inline_data(struct inode *inode, struct page *page,
- struct iomap *iomap)
+ struct iomap *iomap, loff_t pos)
{
- size_t size = i_size_read(inode);
+ unsigned int size, poff = offset_in_page(pos);
void *addr;
- if (PageUptodate(page))
- return;
-
- BUG_ON(page_has_private(page));
- BUG_ON(page->index);
- BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
+ /* inline source data must be inside a single page */
+ if (WARN_ON_ONCE(iomap->length > PAGE_SIZE -
+ offset_in_page(iomap->inline_data)))
+ return -EIO;
+ /* handle tail-packing blocks cross the current page into the next */
+ size = min_t(unsigned int, iomap->length + iomap->offset - pos,
+ PAGE_SIZE - poff);
addr = kmap_atomic(page);
- memcpy(addr, iomap->inline_data, size);
- memset(addr + size, 0, PAGE_SIZE - size);
+ memcpy(addr + poff, iomap->inline_data - iomap->offset + pos, size);
+ memset(addr + poff + size, 0, PAGE_SIZE - poff - size);
kunmap_atomic(addr);
- SetPageUptodate(page);
+ iomap_set_range_uptodate(page, poff, PAGE_SIZE - poff);
+ return PAGE_SIZE - poff;
}
static inline bool iomap_block_needs_zeroing(struct inode *inode,
@@ -245,19 +247,23 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
loff_t orig_pos = pos;
unsigned poff, plen;
sector_t sector;
+ int ret;
- if (iomap->type == IOMAP_INLINE) {
- WARN_ON_ONCE(pos);
- iomap_read_inline_data(inode, page, iomap);
- return PAGE_SIZE;
- }
-
- /* zero post-eof blocks as the page may be mapped */
iop = iomap_page_create(inode, page);
+ /* needs to skip some leading uptodate blocks */
iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
if (plen == 0)
goto done;
+ if (iomap->type == IOMAP_INLINE) {
+ ret = iomap_read_inline_data(inode, page, iomap, pos);
+ if (ret < 0)
+ return ret;
+ plen = ret;
+ goto done;
+ }
+
+ /* zero post-eof blocks as the page may be mapped */
if (iomap_block_needs_zeroing(inode, iomap, pos)) {
zero_user(page, poff, plen);
iomap_set_range_uptodate(page, poff, plen);
@@ -589,6 +595,18 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
return 0;
}
+static int iomap_write_begin_inline(struct inode *inode, loff_t pos,
+ struct page *page, struct iomap *srcmap)
+{
+ /* needs more work for the tailpacking case, disable for now */
+ if (WARN_ON_ONCE(srcmap->offset != 0))
+ return -EIO;
+ if (PageUptodate(page))
+ return 0;
+ iomap_read_inline_data(inode, page, srcmap, 0);
+ return 0;
+}
+
static int
iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
struct page **pagep, struct iomap *iomap, struct iomap *srcmap)
@@ -618,7 +636,7 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
}
if (srcmap->type == IOMAP_INLINE)
- iomap_read_inline_data(inode, page, srcmap);
+ status = iomap_write_begin_inline(inode, pos, page, srcmap);
else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
status = __block_write_begin_int(page, pos, len, NULL, srcmap);
else
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 9398b8c31323..cbadb99fb88c 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -379,22 +379,27 @@ iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
{
struct iov_iter *iter = dio->submit.iter;
size_t copied;
+ void *dst = iomap->inline_data + pos - iomap->offset;
- BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
+ /* inline data must be inside a single page */
+ if (WARN_ON_ONCE(length > PAGE_SIZE -
+ offset_in_page(iomap->inline_data)))
+ return -EIO;
if (dio->flags & IOMAP_DIO_WRITE) {
loff_t size = inode->i_size;
if (pos > size)
- memset(iomap->inline_data + size, 0, pos - size);
- copied = copy_from_iter(iomap->inline_data + pos, length, iter);
+ memset(iomap->inline_data + size - iomap->offset,
+ 0, pos - size);
+ copied = copy_from_iter(dst, length, iter);
if (copied) {
if (pos + copied > size)
i_size_write(inode, pos + copied);
mark_inode_dirty(inode);
}
} else {
- copied = copy_to_iter(iomap->inline_data + pos, length, iter);
+ copied = copy_to_iter(dst, length, iter);
}
dio->size += copied;
return copied;
--
2.24.4
Powered by blists - more mailing lists