[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231114084436.2755-11-konishi.ryusuke@gmail.com>
Date: Tue, 14 Nov 2023 17:44:26 +0900
From: Ryusuke Konishi <konishi.ryusuke@...il.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-nilfs@...r.kernel.org, Matthew Wilcox <willy@...radead.org>,
linux-kernel@...r.kernel.org
Subject: [PATCH 10/20] nilfs2: Convert nilfs_segctor_prepare_write to use folios
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Use the new folio APIs, saving 17 hidden calls to compound_head().
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@...il.com>
---
fs/nilfs2/segment.c | 58 ++++++++++++++++++++++-----------------------
1 file changed, 29 insertions(+), 29 deletions(-)
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 8c675c118c66..52995838f2de 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1665,39 +1665,39 @@ static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
return 0;
}
-static void nilfs_begin_page_io(struct page *page)
+static void nilfs_begin_folio_io(struct folio *folio)
{
- if (!page || PageWriteback(page))
+ if (!folio || folio_test_writeback(folio))
/*
* For split b-tree node pages, this function may be called
* twice. We ignore the 2nd or later calls by this check.
*/
return;
- lock_page(page);
- clear_page_dirty_for_io(page);
- set_page_writeback(page);
- unlock_page(page);
+ folio_lock(folio);
+ folio_clear_dirty_for_io(folio);
+ folio_start_writeback(folio);
+ folio_unlock(folio);
}
static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
{
struct nilfs_segment_buffer *segbuf;
- struct page *bd_page = NULL, *fs_page = NULL;
+ struct folio *bd_folio = NULL, *fs_folio = NULL;
list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
struct buffer_head *bh;
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
b_assoc_buffers) {
- if (bh->b_page != bd_page) {
- if (bd_page) {
- lock_page(bd_page);
- clear_page_dirty_for_io(bd_page);
- set_page_writeback(bd_page);
- unlock_page(bd_page);
+ if (bh->b_folio != bd_folio) {
+ if (bd_folio) {
+ folio_lock(bd_folio);
+ folio_clear_dirty_for_io(bd_folio);
+ folio_start_writeback(bd_folio);
+ folio_unlock(bd_folio);
}
- bd_page = bh->b_page;
+ bd_folio = bh->b_folio;
}
}
@@ -1705,28 +1705,28 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
b_assoc_buffers) {
set_buffer_async_write(bh);
if (bh == segbuf->sb_super_root) {
- if (bh->b_page != bd_page) {
- lock_page(bd_page);
- clear_page_dirty_for_io(bd_page);
- set_page_writeback(bd_page);
- unlock_page(bd_page);
- bd_page = bh->b_page;
+ if (bh->b_folio != bd_folio) {
+ folio_lock(bd_folio);
+ folio_clear_dirty_for_io(bd_folio);
+ folio_start_writeback(bd_folio);
+ folio_unlock(bd_folio);
+ bd_folio = bh->b_folio;
}
break;
}
- if (bh->b_page != fs_page) {
- nilfs_begin_page_io(fs_page);
- fs_page = bh->b_page;
+ if (bh->b_folio != fs_folio) {
+ nilfs_begin_folio_io(fs_folio);
+ fs_folio = bh->b_folio;
}
}
}
- if (bd_page) {
- lock_page(bd_page);
- clear_page_dirty_for_io(bd_page);
- set_page_writeback(bd_page);
- unlock_page(bd_page);
+ if (bd_folio) {
+ folio_lock(bd_folio);
+ folio_clear_dirty_for_io(bd_folio);
+ folio_start_writeback(bd_folio);
+ folio_unlock(bd_folio);
}
- nilfs_begin_page_io(fs_page);
+ nilfs_begin_folio_io(fs_folio);
}
static int nilfs_segctor_write(struct nilfs_sc_info *sci,
--
2.34.1
Powered by blists - more mailing lists