[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211216210715.3801857-4-willy@infradead.org>
Date: Thu, 16 Dec 2021 21:06:53 +0000
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: "Darrick J. Wong" <djwong@...nel.org>
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-xfs@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org, Christoph Hellwig <hch@....de>
Subject: [PATCH v3 03/25] fs/buffer: Convert __block_write_begin_int() to take a folio
There are no plans to convert buffer_head infrastructure to use large
folios, but __block_write_begin_int() is called from iomap, and it's
more convenient and less error-prone if we pass in a folio from iomap.
It also has a nice saving of almost 200 bytes of code from removing
repeated calls to compound_head().
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
Reviewed-by: Christoph Hellwig <hch@....de>
Reviewed-by: Darrick J. Wong <djwong@...nel.org>
---
fs/buffer.c | 23 ++++++++++++-----------
fs/internal.h | 2 +-
fs/iomap/buffered-io.c | 7 +++++--
3 files changed, 18 insertions(+), 14 deletions(-)
diff --git a/fs/buffer.c b/fs/buffer.c
index 46bc589b7a03..8e112b6bd371 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1969,34 +1969,34 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
}
}
-int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
+int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block, const struct iomap *iomap)
{
unsigned from = pos & (PAGE_SIZE - 1);
unsigned to = from + len;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
unsigned block_start, block_end;
sector_t block;
int err = 0;
unsigned blocksize, bbits;
struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
- BUG_ON(!PageLocked(page));
+ BUG_ON(!folio_test_locked(folio));
BUG_ON(from > PAGE_SIZE);
BUG_ON(to > PAGE_SIZE);
BUG_ON(from > to);
- head = create_page_buffers(page, inode, 0);
+ head = create_page_buffers(&folio->page, inode, 0);
blocksize = head->b_size;
bbits = block_size_bits(blocksize);
- block = (sector_t)page->index << (PAGE_SHIFT - bbits);
+ block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
for(bh = head, block_start = 0; bh != head || !block_start;
block++, block_start=block_end, bh = bh->b_this_page) {
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
- if (PageUptodate(page)) {
+ if (folio_test_uptodate(folio)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
}
@@ -2016,20 +2016,20 @@ int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
if (buffer_new(bh)) {
clean_bdev_bh_alias(bh);
- if (PageUptodate(page)) {
+ if (folio_test_uptodate(folio)) {
clear_buffer_new(bh);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
continue;
}
if (block_end > to || block_start < from)
- zero_user_segments(page,
+ folio_zero_segments(folio,
to, block_end,
block_start, from);
continue;
}
}
- if (PageUptodate(page)) {
+ if (folio_test_uptodate(folio)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
continue;
@@ -2050,14 +2050,15 @@ int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
err = -EIO;
}
if (unlikely(err))
- page_zero_new_buffers(page, from, to);
+ page_zero_new_buffers(&folio->page, from, to);
return err;
}
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block)
{
- return __block_write_begin_int(page, pos, len, get_block, NULL);
+ return __block_write_begin_int(page_folio(page), pos, len, get_block,
+ NULL);
}
EXPORT_SYMBOL(__block_write_begin);
diff --git a/fs/internal.h b/fs/internal.h
index 7979ff8d168c..8590c973c2f4 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -37,7 +37,7 @@ static inline int emergency_thaw_bdev(struct super_block *sb)
/*
* buffer.c
*/
-int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
+int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block, const struct iomap *iomap);
/*
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 71a36ae120ee..ecb65167715b 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -603,6 +603,7 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
const struct iomap *srcmap = iomap_iter_srcmap(iter);
struct page *page;
+ struct folio *folio;
int status = 0;
BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
@@ -624,11 +625,12 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
status = -ENOMEM;
goto out_no_page;
}
+ folio = page_folio(page);
if (srcmap->type == IOMAP_INLINE)
status = iomap_write_begin_inline(iter, page);
else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
- status = __block_write_begin_int(page, pos, len, NULL, srcmap);
+ status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
else
status = __iomap_write_begin(iter, pos, len, page);
@@ -960,11 +962,12 @@ EXPORT_SYMBOL_GPL(iomap_truncate_page);
static loff_t iomap_page_mkwrite_iter(struct iomap_iter *iter,
struct page *page)
{
+ struct folio *folio = page_folio(page);
loff_t length = iomap_length(iter);
int ret;
if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
- ret = __block_write_begin_int(page, iter->pos, length, NULL,
+ ret = __block_write_begin_int(folio, iter->pos, length, NULL,
&iter->iomap);
if (ret)
return ret;
--
2.33.0
Powered by blists - more mailing lists