[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZJsOLJLtc+SRUU/L@infradead.org>
Date: Tue, 27 Jun 2023 09:28:28 -0700
From: Christoph Hellwig <hch@...radead.org>
To: Matthew Wilcox <willy@...radead.org>
Cc: Christoph Hellwig <hch@...radead.org>, linux-mm@...ck.org,
linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
Jan Kara <jack@...e.com>, David Howells <dhowells@...hat.com>
Subject: Re: [PATCH 09/12] writeback: Factor writeback_iter_next() out of
write_cache_pages()
On Tue, Jun 27, 2023 at 04:31:17PM +0100, Matthew Wilcox wrote:
> It makes the callers neater. Compare:
>
> if (!folio)
> return writeback_finish(mapping, wbc, false);
> vs
> if (!folio) {
> writeback_finish(mapping, wbc, false);
> return NULL;
> }
>
> Similarly for the other two callers.
Not sure I agree. See my quickly cooked up patch below. But in the
end this completely superficial and I won't complain, do it the way
your prefer.
>
> > > + if (error == AOP_WRITEPAGE_ACTIVATE) {
> > > + folio_unlock(folio);
> > > + error = 0;
> >
> > Note there really shouldn't be any need for this in outside of the
> > legacy >writepage case. But it might make sense to delay the removal
> > until after ->writepage is gone to avoid bugs in conversions.
>
> ext4_journalled_writepage_callback() still returns
> AOP_WRITEPAGE_ACTIVATE, and that's used by a direct call to
> write_cache_pages().
Yeah. But that could trivially do the open coded unlock_page.
But probably not worth mixing into this series.
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 55832679af2194..07bbbc0dec4d00 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2360,7 +2360,7 @@ void tag_pages_for_writeback(struct address_space *mapping,
}
EXPORT_SYMBOL(tag_pages_for_writeback);
-static struct folio *writeback_finish(struct address_space *mapping,
+static void writeback_finish(struct address_space *mapping,
struct writeback_control *wbc, bool done)
{
folio_batch_release(&wbc->fbatch);
@@ -2374,8 +2374,6 @@ static struct folio *writeback_finish(struct address_space *mapping,
wbc->done_index = 0;
if (wbc->range_cyclic || (wbc->range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = wbc->done_index;
-
- return NULL;
}
static struct folio *writeback_get_next(struct address_space *mapping,
@@ -2435,20 +2433,19 @@ static struct folio *writeback_get_folio(struct address_space *mapping,
{
struct folio *folio;
- for (;;) {
- folio = writeback_get_next(mapping, wbc);
- if (!folio)
- return writeback_finish(mapping, wbc, false);
+ while ((folio = writeback_get_next(mapping, wbc))) {
wbc->done_index = folio->index;
folio_lock(folio);
- if (likely(should_writeback_folio(mapping, wbc, folio)))
- break;
+ if (likely(should_writeback_folio(mapping, wbc, folio))) {
+ trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
+ return folio;
+ }
folio_unlock(folio);
}
- trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
- return folio;
+ writeback_finish(mapping, wbc, false);
+ return NULL;
}
struct folio *writeback_iter_init(struct address_space *mapping,
@@ -2494,7 +2491,7 @@ struct folio *writeback_iter_next(struct address_space *mapping,
wbc->err = error;
wbc->done_index = folio->index +
folio_nr_pages(folio);
- return writeback_finish(mapping, wbc, true);
+ goto done;
}
if (!wbc->err)
wbc->err = error;
@@ -2507,9 +2504,12 @@ struct folio *writeback_iter_next(struct address_space *mapping,
* to entering this loop.
*/
if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE)
- return writeback_finish(mapping, wbc, true);
+ goto done;
return writeback_get_folio(mapping, wbc);
+done:
+ writeback_finish(mapping, wbc, true);
+ return NULL;
}
/**
Powered by blists - more mailing lists