[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210118170148.3126186-19-willy@infradead.org>
Date: Mon, 18 Jan 2021 17:01:39 +0000
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: linux-fsdevel@...r.kernel.org, linux-mm@...ck.org
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-kernel@...r.kernel.org
Subject: [PATCH v2 18/27] mm/filemap: Convert wait_on_page_locked_async to wait_on_folio_locked_async
This saves a few calls to compound_head().
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
mm/filemap.c | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/mm/filemap.c b/mm/filemap.c
index 648f78577ab7..e997f4424ed9 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1338,22 +1338,22 @@ int wait_on_page_bit_killable(struct page *page, int bit_nr)
}
EXPORT_SYMBOL(wait_on_page_bit_killable);
-static int __wait_on_page_locked_async(struct page *page,
+static int __wait_on_folio_locked_async(struct folio *folio,
struct wait_page_queue *wait, bool set)
{
- struct wait_queue_head *q = page_waitqueue(page);
+ struct wait_queue_head *q = page_waitqueue(&folio->page);
int ret = 0;
- wait->page = page;
+ wait->page = &folio->page;
wait->bit_nr = PG_locked;
spin_lock_irq(&q->lock);
__add_wait_queue_entry_tail(q, &wait->wait);
- SetPageWaiters(page);
+ SetFolioWaiters(folio);
if (set)
- ret = !trylock_page(page);
+ ret = !trylock_folio(folio);
else
- ret = PageLocked(page);
+ ret = FolioLocked(folio);
/*
* If we were successful now, we know we're still on the
* waitqueue as we're still under the lock. This means it's
@@ -1368,12 +1368,12 @@ static int __wait_on_page_locked_async(struct page *page,
return ret;
}
-static int wait_on_page_locked_async(struct page *page,
+static int wait_on_folio_locked_async(struct folio *folio,
struct wait_page_queue *wait)
{
- if (!PageLocked(page))
+ if (!FolioLocked(folio))
return 0;
- return __wait_on_page_locked_async(compound_head(page), wait, false);
+ return __wait_on_folio_locked_async(folio, wait, false);
}
/**
@@ -1539,7 +1539,7 @@ EXPORT_SYMBOL_GPL(__lock_folio_killable);
int __lock_folio_async(struct folio *folio, struct wait_page_queue *wait)
{
- return __wait_on_page_locked_async(&folio->page, wait, true);
+ return __wait_on_folio_locked_async(folio, wait, true);
}
/*
@@ -2256,7 +2256,7 @@ generic_file_buffered_read_pagenotuptodate(struct kiocb *iocb,
* serialisations and why it's safe.
*/
if (iocb->ki_flags & IOCB_WAITQ) {
- error = wait_on_page_locked_async(page,
+ error = wait_on_folio_locked_async(page_folio(page),
iocb->ki_waitq);
} else {
error = wait_on_page_locked_killable(page);
--
2.29.2
Powered by blists - more mailing lists