[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210712190204.80979-20-willy@infradead.org>
Date: Mon, 12 Jul 2021 20:01:51 +0100
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: akpm@...ux-foundation.org
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
linux-fsdevel@...r.kernel.org, Christoph Hellwig <hch@....de>,
Jeff Layton <jlayton@...nel.org>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
Vlastimil Babka <vbabka@...e.cz>,
William Kucharski <william.kucharski@...cle.com>
Subject: [PATCH v13 19/32] mm/filemap: Add folio_lock_killable()
This is like lock_page_killable() but for use by callers who
know they have a folio. Convert __lock_page_killable() to be
__folio_lock_killable(). This saves one call to compound_head() per
contended call to lock_page_killable().
__folio_lock_killable() is 19 bytes smaller than __lock_page_killable()
was. filemap_fault() shrinks by 74 bytes and __lock_page_or_retry()
shrinks by 71 bytes. That's a total of 164 bytes of text saved.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
Reviewed-by: Christoph Hellwig <hch@....de>
Acked-by: Jeff Layton <jlayton@...nel.org>
Acked-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
Acked-by: Vlastimil Babka <vbabka@...e.cz>
Reviewed-by: William Kucharski <william.kucharski@...cle.com>
---
include/linux/pagemap.h | 15 ++++++++++-----
mm/filemap.c | 17 +++++++++--------
2 files changed, 19 insertions(+), 13 deletions(-)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 5c82933af2fe..3d78d96c163d 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -654,7 +654,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
}
void __folio_lock(struct folio *folio);
-extern int __lock_page_killable(struct page *page);
+int __folio_lock_killable(struct folio *folio);
extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags);
@@ -694,6 +694,14 @@ static inline void lock_page(struct page *page)
__folio_lock(folio);
}
+static inline int folio_lock_killable(struct folio *folio)
+{
+ might_sleep();
+ if (!folio_trylock(folio))
+ return __folio_lock_killable(folio);
+ return 0;
+}
+
/*
* lock_page_killable is like lock_page but can be interrupted by fatal
* signals. It returns 0 if it locked the page and -EINTR if it was
@@ -701,10 +709,7 @@ static inline void lock_page(struct page *page)
*/
static inline int lock_page_killable(struct page *page)
{
- might_sleep();
- if (!trylock_page(page))
- return __lock_page_killable(page);
- return 0;
+ return folio_lock_killable(page_folio(page));
}
/*
diff --git a/mm/filemap.c b/mm/filemap.c
index cc2682b72584..108cc825638f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1589,14 +1589,13 @@ void __folio_lock(struct folio *folio)
}
EXPORT_SYMBOL(__folio_lock);
-int __lock_page_killable(struct page *__page)
+int __folio_lock_killable(struct folio *folio)
{
- struct page *page = compound_head(__page);
- wait_queue_head_t *q = page_waitqueue(page);
- return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE,
+ wait_queue_head_t *q = page_waitqueue(&folio->page);
+ return wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_KILLABLE,
EXCLUSIVE);
}
-EXPORT_SYMBOL_GPL(__lock_page_killable);
+EXPORT_SYMBOL_GPL(__folio_lock_killable);
int __lock_page_async(struct page *page, struct wait_page_queue *wait)
{
@@ -1638,6 +1637,8 @@ int __lock_page_async(struct page *page, struct wait_page_queue *wait)
int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags)
{
+ struct folio *folio = page_folio(page);
+
if (fault_flag_allow_retry_first(flags)) {
/*
* CAUTION! In this case, mmap_lock is not released
@@ -1656,13 +1657,13 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
if (flags & FAULT_FLAG_KILLABLE) {
int ret;
- ret = __lock_page_killable(page);
+ ret = __folio_lock_killable(folio);
if (ret) {
mmap_read_unlock(mm);
return 0;
}
} else {
- __folio_lock(page_folio(page));
+ __folio_lock(folio);
}
return 1;
@@ -2851,7 +2852,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
if (vmf->flags & FAULT_FLAG_KILLABLE) {
- if (__lock_page_killable(&folio->page)) {
+ if (__folio_lock_killable(folio)) {
/*
* We didn't have the right flags to drop the mmap_lock,
* but all fault_handlers only check for fatal signals
--
2.30.2
Powered by blists - more mailing lists