[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231218153553.807799-13-hch@lst.de>
Date: Mon, 18 Dec 2023 16:35:48 +0100
From: Christoph Hellwig <hch@....de>
To: linux-mm@...ck.org
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
Jan Kara <jack@...e.com>,
David Howells <dhowells@...hat.com>,
Brian Foster <bfoster@...hat.com>,
linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 12/17] writeback: Factor writeback_iter_init() out of write_cache_pages()
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Make it return the first folio in the batch so that we can use it
in a typical for() pattern.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
Signed-off-by: Christoph Hellwig <hch@....de>
---
mm/page-writeback.c | 39 ++++++++++++++++++++-------------------
1 file changed, 20 insertions(+), 19 deletions(-)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 70f42fd9a95b5d..efcfffa800884d 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2442,6 +2442,22 @@ static bool should_writeback_folio(struct address_space *mapping,
return true;
}
+static struct folio *writeback_iter_init(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ if (wbc->range_cyclic)
+ wbc->index = mapping->writeback_index; /* prev offset */
+ else
+ wbc->index = wbc->range_start >> PAGE_SHIFT;
+
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+ tag_pages_for_writeback(mapping, wbc->index, wbc_end(wbc));
+
+ wbc->err = 0;
+ folio_batch_init(&wbc->fbatch);
+ return writeback_get_next(mapping, wbc);
+}
+
/**
* write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
* @mapping: address space structure to write
@@ -2477,29 +2493,14 @@ int write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc, writepage_t writepage,
void *data)
{
+ struct folio *folio;
int error;
- pgoff_t end; /* Inclusive */
- if (wbc->range_cyclic) {
- wbc->index = mapping->writeback_index; /* prev offset */
- end = -1;
- } else {
- wbc->index = wbc->range_start >> PAGE_SHIFT;
- end = wbc->range_end >> PAGE_SHIFT;
- }
- if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
- tag_pages_for_writeback(mapping, wbc->index, end);
-
- folio_batch_init(&wbc->fbatch);
- wbc->err = 0;
-
- for (;;) {
- struct folio *folio = writeback_get_next(mapping, wbc);
+ for (folio = writeback_iter_init(mapping, wbc);
+ folio;
+ folio = writeback_get_next(mapping, wbc)) {
unsigned long nr;
- if (!folio)
- break;
-
folio_lock(folio);
if (!should_writeback_folio(mapping, wbc, folio)) {
folio_unlock(folio);
--
2.39.2
Powered by blists - more mailing lists