[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240125085758.2393327-12-hch@lst.de>
Date: Thu, 25 Jan 2024 09:57:50 +0100
From: Christoph Hellwig <hch@....de>
To: linux-mm@...ck.org
Cc: Matthew Wilcox <willy@...radead.org>,
Jan Kara <jack@...e.com>,
David Howells <dhowells@...hat.com>,
Brian Foster <bfoster@...hat.com>,
Christian Brauner <brauner@...nel.org>,
"Darrick J. Wong" <djwong@...nel.org>,
linux-xfs@...r.kernel.org,
linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org,
Jan Kara <jack@...e.cz>,
Dave Chinner <dchinner@...hat.com>
Subject: [PATCH 11/19] writeback: Use the folio_batch queue iterator
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Instead of keeping our own local iterator variable, use the one just
added to folio_batch.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
Signed-off-by: Christoph Hellwig <hch@....de>
Reviewed-by: Jan Kara <jack@...e.cz>
Acked-by: Dave Chinner <dchinner@...hat.com>
---
mm/page-writeback.c | 29 +++++++++++++++--------------
1 file changed, 15 insertions(+), 14 deletions(-)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d6ac414ddce9ca..432bb42d0829d1 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2429,13 +2429,21 @@ static bool folio_prepare_writeback(struct address_space *mapping,
return true;
}
-static void writeback_get_batch(struct address_space *mapping,
+static struct folio *writeback_get_folio(struct address_space *mapping,
struct writeback_control *wbc)
{
- folio_batch_release(&wbc->fbatch);
- cond_resched();
- filemap_get_folios_tag(mapping, &wbc->index, wbc_end(wbc),
- wbc_to_tag(wbc), &wbc->fbatch);
+ struct folio *folio;
+
+ folio = folio_batch_next(&wbc->fbatch);
+ if (!folio) {
+ folio_batch_release(&wbc->fbatch);
+ cond_resched();
+ filemap_get_folios_tag(mapping, &wbc->index, wbc_end(wbc),
+ wbc_to_tag(wbc), &wbc->fbatch);
+ folio = folio_batch_next(&wbc->fbatch);
+ }
+
+ return folio;
}
/**
@@ -2475,7 +2483,6 @@ int write_cache_pages(struct address_space *mapping,
{
int error;
pgoff_t end; /* Inclusive */
- int i = 0;
if (wbc->range_cyclic) {
wbc->index = mapping->writeback_index; /* prev offset */
@@ -2491,18 +2498,12 @@ int write_cache_pages(struct address_space *mapping,
wbc->err = 0;
for (;;) {
- struct folio *folio;
+ struct folio *folio = writeback_get_folio(mapping, wbc);
unsigned long nr;
- if (i == wbc->fbatch.nr) {
- writeback_get_batch(mapping, wbc);
- i = 0;
- }
- if (wbc->fbatch.nr == 0)
+ if (!folio)
break;
- folio = wbc->fbatch.folios[i++];
-
folio_lock(folio);
if (!folio_prepare_writeback(mapping, wbc, folio)) {
folio_unlock(folio);
--
2.39.2
Powered by blists - more mailing lists