[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220204195852.1751729-49-willy@infradead.org>
Date: Fri, 4 Feb 2022 19:58:25 +0000
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: linux-mm@...ck.org
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-kernel@...r.kernel.org
Subject: [PATCH 48/75] mm/rmap: Turn page_mlock() into folio_mlock()
Add back page_mlock() as a wrapper around folio_mlock(). Removes
a few hidden calls to compound_head().
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
include/linux/rmap.h | 1 +
mm/folio-compat.c | 6 ++++++
mm/rmap.c | 31 +++++++++++++++++--------------
3 files changed, 24 insertions(+), 14 deletions(-)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 00b772cdaaaa..31f3a299ef66 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -261,6 +261,7 @@ int folio_mkclean(struct folio *);
* the page mlocked.
*/
void page_mlock(struct page *page);
+void folio_mlock(struct folio *folio);
void remove_migration_ptes(struct page *old, struct page *new, bool locked);
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index 9cb0867d5b38..90f03187a5e3 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -7,6 +7,7 @@
#include <linux/migrate.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
+#include <linux/rmap.h>
#include "internal.h"
struct address_space *page_mapping(struct page *page)
@@ -174,3 +175,8 @@ void mlock_vma_page(struct page *page)
{
mlock_vma_folio(page_folio(page));
}
+
+void page_mlock(struct page *page)
+{
+ folio_mlock(page_folio(page));
+}
diff --git a/mm/rmap.c b/mm/rmap.c
index 1cedcfd6105c..a383e25fb196 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -2001,6 +2001,7 @@ void try_to_migrate(struct page *page, enum ttu_flags flags)
static bool page_mlock_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *unused)
{
+ struct folio *folio = page_folio(page);
struct page_vma_mapped_walk pvmw = {
.vma = vma,
.address = address,
@@ -2024,9 +2025,9 @@ static bool page_mlock_one(struct page *page, struct vm_area_struct *vma,
* nor on an Anon THP (which may still be PTE-mapped
* after DoubleMap was cleared).
*/
- mlock_vma_page(page);
+ mlock_vma_folio(folio);
/*
- * No need to scan further once the page is marked
+ * No need to scan further once the folio is marked
* as mlocked.
*/
page_vma_mapped_walk_done(&pvmw);
@@ -2038,14 +2039,14 @@ static bool page_mlock_one(struct page *page, struct vm_area_struct *vma,
}
/**
- * page_mlock - try to mlock a page
- * @page: the page to be mlocked
+ * folio_mlock() - Try to mlock a folio.
+ * @folio: The folio to be mlocked.
*
- * Called from munlock code. Checks all of the VMAs mapping the page and mlocks
- * the page if any are found. The page will be returned with PG_mlocked cleared
- * if it is not mapped by any locked vmas.
+ * Called from munlock code. Checks all of the VMAs mapping the folio
+ * and mlocks the folio if any are found. The folio will be returned
+ * with the mlocked flag clear if it is not mapped by any locked vmas.
*/
-void page_mlock(struct page *page)
+void folio_mlock(struct folio *folio)
{
struct rmap_walk_control rwc = {
.rmap_one = page_mlock_one,
@@ -2054,14 +2055,16 @@ void page_mlock(struct page *page)
};
- VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
- VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio) || folio_test_lru(folio),
+ folio);
+ VM_BUG_ON_FOLIO(folio_test_large(folio) && folio_test_double_map(folio),
+ folio);
/* Anon THP are only marked as mlocked when singly mapped */
- if (PageTransCompound(page) && PageAnon(page))
+ if (folio_test_large(folio) && folio_test_anon(folio))
return;
- rmap_walk(page, &rwc);
+ rmap_walk(&folio->page, &rwc);
}
#ifdef CONFIG_DEVICE_PRIVATE
@@ -2290,7 +2293,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the anon_vma struct it points to.
*
- * When called from page_mlock(), the mmap_lock of the mm containing the vma
+ * When called from folio_mlock(), the mmap_lock of the mm containing the vma
* where the page was found will be held for write. So, we won't recheck
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* LOCKED.
@@ -2343,7 +2346,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the address_space struct it points to.
*
- * When called from page_mlock(), the mmap_lock of the mm containing the vma
+ * When called from folio_mlock(), the mmap_lock of the mm containing the vma
* where the page was found will be held for write. So, we won't recheck
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* LOCKED.
--
2.34.1
Powered by blists - more mailing lists