[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211108040551.1942823-17-willy@infradead.org>
Date: Mon, 8 Nov 2021 04:05:39 +0000
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: "Darrick J . Wong " <djwong@...nel.org>
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-xfs@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-block@...r.kernel.org,
Jens Axboe <axboe@...nel.dk>,
Christoph Hellwig <hch@...radead.org>,
Christoph Hellwig <hch@....de>
Subject: [PATCH v2 16/28] iomap: Convert iomap_read_inline_data to take a folio
We still only support up to a single page of inline data (at least,
per call to iomap_read_inline_data()), but it can now be written into
the middle of a folio in case we decide to allocate a 16KiB page for
a file that's 8.1KiB in size.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
Reviewed-by: Darrick J. Wong <djwong@...nel.org>
Reviewed-by: Christoph Hellwig <hch@....de>
---
fs/iomap/buffered-io.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index c7c4ae735620..96a404f11a3b 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -195,9 +195,8 @@ struct iomap_readpage_ctx {
};
static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
- struct page *page)
+ struct folio *folio)
{
- struct folio *folio = page_folio(page);
struct iomap_page *iop;
const struct iomap *iomap = iomap_iter_srcmap(iter);
size_t size = i_size_read(iter->inode) - iomap->offset;
@@ -205,7 +204,7 @@ static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
size_t offset = offset_in_folio(folio, iomap->offset);
void *addr;
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
return PAGE_SIZE - poff;
if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
@@ -220,7 +219,7 @@ static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
else
iop = to_iomap_page(folio);
- addr = kmap_local_page(page) + poff;
+ addr = kmap_local_folio(folio, offset);
memcpy(addr, iomap->inline_data, size);
memset(addr + size, 0, PAGE_SIZE - poff - size);
kunmap_local(addr);
@@ -252,7 +251,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
sector_t sector;
if (iomap->type == IOMAP_INLINE)
- return min(iomap_read_inline_data(iter, page), length);
+ return min(iomap_read_inline_data(iter, folio), length);
/* zero post-eof blocks as the page may be mapped */
iop = iomap_page_create(iter->inode, folio);
@@ -586,12 +585,13 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
static int iomap_write_begin_inline(const struct iomap_iter *iter,
struct page *page)
{
+ struct folio *folio = page_folio(page);
int ret;
/* needs more work for the tailpacking case; disable for now */
if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
return -EIO;
- ret = iomap_read_inline_data(iter, page);
+ ret = iomap_read_inline_data(iter, folio);
if (ret < 0)
return ret;
return 0;
--
2.33.0
Powered by blists - more mailing lists