[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220204195852.1751729-48-willy@infradead.org>
Date: Fri, 4 Feb 2022 19:58:24 +0000
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: linux-mm@...ck.org
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-kernel@...r.kernel.org
Subject: [PATCH 47/75] mm/mlock: Turn mlock_vma_page() into mlock_vma_folio()
Add mlock_vma_page() back as a wrapper. Saves a few calls to
compound_head() and an assertion that the page is not a tail page.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
mm/folio-compat.c | 5 +++++
mm/internal.h | 3 ++-
mm/mlock.c | 18 +++++++++---------
3 files changed, 16 insertions(+), 10 deletions(-)
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index bcb037d9cec3..9cb0867d5b38 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -169,3 +169,8 @@ void clear_page_mlock(struct page *page)
{
folio_end_mlock(page_folio(page));
}
+
+void mlock_vma_page(struct page *page)
+{
+ mlock_vma_folio(page_folio(page));
+}
diff --git a/mm/internal.h b/mm/internal.h
index 041c76a4c284..18b024aa7e59 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -411,7 +411,8 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
/*
* must be called with vma's mmap_lock held for read or write, and page locked.
*/
-extern void mlock_vma_page(struct page *page);
+void mlock_vma_page(struct page *page);
+void mlock_vma_folio(struct folio *folio);
extern unsigned int munlock_vma_page(struct page *page);
extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
diff --git a/mm/mlock.c b/mm/mlock.c
index ff067d64acc5..d998fd5c84bf 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -94,21 +94,21 @@ void folio_end_mlock(struct folio *folio)
* Mark page as mlocked if not already.
* If page on LRU, isolate and putback to move to unevictable list.
*/
-void mlock_vma_page(struct page *page)
+void mlock_vma_folio(struct folio *folio)
{
/* Serialize with page migration */
- BUG_ON(!PageLocked(page));
+ BUG_ON(!folio_test_locked(folio));
- VM_BUG_ON_PAGE(PageTail(page), page);
- VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
+ VM_BUG_ON_FOLIO(folio_test_large(folio) && folio_test_double_map(folio),
+ folio);
- if (!TestSetPageMlocked(page)) {
- int nr_pages = thp_nr_pages(page);
+ if (!folio_test_set_mlocked(folio)) {
+ long nr_pages = folio_nr_pages(folio);
- mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
+ zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
- if (!isolate_lru_page(page))
- putback_lru_page(page);
+ if (!folio_isolate_lru(folio))
+ folio_putback_lru(folio);
}
}
--
2.34.1
Powered by blists - more mailing lists