[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201208194653.19180-8-willy@infradead.org>
Date: Tue, 8 Dec 2020 19:46:49 +0000
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: linux-fsdevel@...r.kernel.org, linux-mm@...ck.org
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-kernel@...r.kernel.org
Subject: [RFC PATCH 07/11] mm: Add lock_folio_killable
This is like lock_page_killable() but for use by callers who
know they have a folio. Convert __lock_page_killable() to be
__lock_folio_killable(). This saves one call to compound_head() per
contended call to lock_page_killable().
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
include/linux/pagemap.h | 15 ++++++++++-----
mm/filemap.c | 17 +++++++++--------
2 files changed, 19 insertions(+), 13 deletions(-)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 1d4a1828a434..060faeb8d701 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -584,7 +584,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
}
extern void __lock_folio(struct folio *folio);
-extern int __lock_page_killable(struct page *page);
+extern int __lock_folio_killable(struct folio *folio);
extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags);
@@ -632,6 +632,14 @@ static inline void lock_page(struct page *page)
lock_folio(page_folio(page));
}
+static inline int lock_folio_killable(struct folio *folio)
+{
+ might_sleep();
+ if (!trylock_folio(folio))
+ return __lock_folio_killable(folio);
+ return 0;
+}
+
/*
* lock_page_killable is like lock_page but can be interrupted by fatal
* signals. It returns 0 if it locked the page and -EINTR if it was
@@ -639,10 +647,7 @@ static inline void lock_page(struct page *page)
*/
static inline int lock_page_killable(struct page *page)
{
- might_sleep();
- if (!trylock_page(page))
- return __lock_page_killable(page);
- return 0;
+ return lock_folio_killable(page_folio(page));
}
/*
diff --git a/mm/filemap.c b/mm/filemap.c
index 8e87906f5dd6..50535b21b452 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1534,14 +1534,13 @@ void __lock_folio(struct folio *folio)
}
EXPORT_SYMBOL(__lock_folio);
-int __lock_page_killable(struct page *__page)
+int __lock_folio_killable(struct folio *folio)
{
- struct page *page = compound_head(__page);
- wait_queue_head_t *q = page_waitqueue(page);
- return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE,
+ wait_queue_head_t *q = page_waitqueue(&folio->page);
+ return wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_KILLABLE,
EXCLUSIVE);
}
-EXPORT_SYMBOL_GPL(__lock_page_killable);
+EXPORT_SYMBOL_GPL(__lock_folio_killable);
int __lock_page_async(struct page *page, struct wait_page_queue *wait)
{
@@ -1562,6 +1561,8 @@ int __lock_page_async(struct page *page, struct wait_page_queue *wait)
int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags)
{
+ struct folio *folio = page_folio(page);
+
if (fault_flag_allow_retry_first(flags)) {
/*
* CAUTION! In this case, mmap_lock is not released
@@ -1580,13 +1581,13 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
if (flags & FAULT_FLAG_KILLABLE) {
int ret;
- ret = __lock_page_killable(page);
+ ret = __lock_folio_killable(folio);
if (ret) {
mmap_read_unlock(mm);
return 0;
}
} else {
- __lock_folio(page_folio(page));
+ __lock_folio(folio);
}
return 1;
@@ -2778,7 +2779,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
if (vmf->flags & FAULT_FLAG_KILLABLE) {
- if (__lock_page_killable(&folio->page)) {
+ if (__lock_folio_killable(folio)) {
/*
* We didn't have the right flags to drop the mmap_lock,
* but all fault_handlers only check for fatal signals
--
2.29.2
Powered by blists - more mailing lists