[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210715033704.692967-106-willy@infradead.org>
Date: Thu, 15 Jul 2021 04:36:31 +0100
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: linux-kernel@...r.kernel.org
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-mm@...ck.org, linux-fsdevel@...r.kernel.org
Subject: [PATCH v14 105/138] iomap: Convert iomap_add_to_ioend to take a folio
We still iterate one block at a time, but now we call compound_head()
less often. Rename file_offset to pos to fit the rest of the file.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
fs/iomap/buffered-io.c | 66 +++++++++++++++++++-----------------------
1 file changed, 30 insertions(+), 36 deletions(-)
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index ac33f19325ab..8e767aec8d07 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1252,36 +1252,29 @@ iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
* first, otherwise finish off the current ioend and start another.
*/
static void
-iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
+iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio,
struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
struct writeback_control *wbc, struct list_head *iolist)
{
- sector_t sector = iomap_sector(&wpc->iomap, offset);
+ sector_t sector = iomap_sector(&wpc->iomap, pos);
unsigned len = i_blocksize(inode);
- unsigned poff = offset & (PAGE_SIZE - 1);
- bool merged, same_page = false;
+ size_t poff = offset_in_folio(folio, pos);
- if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
+ if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) {
if (wpc->ioend)
list_add(&wpc->ioend->io_list, iolist);
- wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc);
+ wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc);
}
- merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
- &same_page);
if (iop)
atomic_add(len, &iop->write_bytes_pending);
-
- if (!merged) {
- if (bio_full(wpc->ioend->io_bio, len)) {
- wpc->ioend->io_bio =
- iomap_chain_bio(wpc->ioend->io_bio);
- }
- bio_add_page(wpc->ioend->io_bio, page, len, poff);
+ if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) {
+ wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
+ bio_add_folio(wpc->ioend->io_bio, folio, len, poff);
}
wpc->ioend->io_size += len;
- wbc_account_cgroup_owner(wbc, page, len);
+ wbc_account_cgroup_owner(wbc, &folio->page, len);
}
/*
@@ -1309,40 +1302,41 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
struct iomap_page *iop = to_iomap_page(folio);
struct iomap_ioend *ioend, *next;
unsigned len = i_blocksize(inode);
- u64 file_offset; /* file offset of page */
+ unsigned nblocks = i_blocks_per_folio(inode, folio);
+ loff_t pos = folio_pos(folio);
int error = 0, count = 0, i;
LIST_HEAD(submit_list);
- WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
+ WARN_ON_ONCE(nblocks > 1 && !iop);
WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
/*
- * Walk through the page to find areas to write back. If we run off the
- * end of the current map or find the current map invalid, grab a new
- * one.
+ * Walk through the folio to find areas to write back. If we
+ * run off the end of the current map or find the current map
+ * invalid, grab a new one.
*/
- for (i = 0, file_offset = page_offset(page);
- i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
- i++, file_offset += len) {
+ for (i = 0; i < nblocks; i++, pos += len) {
+ if (pos >= end_offset)
+ break;
if (iop && !test_bit(i, iop->uptodate))
continue;
- error = wpc->ops->map_blocks(wpc, inode, file_offset);
+ error = wpc->ops->map_blocks(wpc, inode, pos);
if (error)
break;
if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
continue;
if (wpc->iomap.type == IOMAP_HOLE)
continue;
- iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
+ iomap_add_to_ioend(inode, pos, folio, iop, wpc, wbc,
&submit_list);
count++;
}
WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
- WARN_ON_ONCE(!PageLocked(page));
- WARN_ON_ONCE(PageWriteback(page));
- WARN_ON_ONCE(PageDirty(page));
+ WARN_ON_ONCE(!folio_test_locked(folio));
+ WARN_ON_ONCE(folio_test_writeback(folio));
+ WARN_ON_ONCE(folio_test_dirty(folio));
/*
* We cannot cancel the ioend directly here on error. We may have
@@ -1358,16 +1352,16 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
* now.
*/
if (wpc->ops->discard_page)
- wpc->ops->discard_page(page, file_offset);
+ wpc->ops->discard_page(&folio->page, pos);
if (!count) {
- ClearPageUptodate(page);
- unlock_page(page);
+ folio_clear_uptodate(folio);
+ folio_unlock(folio);
goto done;
}
}
- set_page_writeback(page);
- unlock_page(page);
+ folio_start_writeback(folio);
+ folio_unlock(folio);
/*
* Preserve the original error if there was one, otherwise catch
@@ -1388,9 +1382,9 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
* with a partial page truncate on a sub-page block sized filesystem.
*/
if (!count)
- end_page_writeback(page);
+ folio_end_writeback(folio);
done:
- mapping_set_error(page->mapping, error);
+ mapping_set_error(folio->mapping, error);
return error;
}
--
2.30.2
Powered by blists - more mailing lists