[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220204195852.1751729-44-willy@infradead.org>
Date: Fri, 4 Feb 2022 19:58:20 +0000
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: linux-mm@...ck.org
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-kernel@...r.kernel.org
Subject: [PATCH 43/75] mm/page_idle: Convert page_idle_clear_pte_refs() to use a folio
The PG_idle and PG_young bits are ignored if they're set on tail
pages, so ensure we're passing a folio around.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
mm/page_idle.c | 19 +++++++++++--------
1 file changed, 11 insertions(+), 8 deletions(-)
diff --git a/mm/page_idle.c b/mm/page_idle.c
index 20d35d720872..544814bd9e37 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -13,6 +13,8 @@
#include <linux/page_ext.h>
#include <linux/page_idle.h>
+#include "internal.h"
+
#define BITMAP_CHUNK_SIZE sizeof(u64)
#define BITMAP_CHUNK_BITS (BITMAP_CHUNK_SIZE * BITS_PER_BYTE)
@@ -48,6 +50,7 @@ static bool page_idle_clear_pte_refs_one(struct page *page,
struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
+ struct folio *folio = page_folio(page);
struct page_vma_mapped_walk pvmw = {
.vma = vma,
.address = addr,
@@ -74,19 +77,20 @@ static bool page_idle_clear_pte_refs_one(struct page *page,
}
if (referenced) {
- clear_page_idle(page);
+ folio_clear_idle(folio);
/*
* We cleared the referenced bit in a mapping to this page. To
* avoid interference with page reclaim, mark it young so that
* page_referenced() will return > 0.
*/
- set_page_young(page);
+ folio_set_young(folio);
}
return true;
}
static void page_idle_clear_pte_refs(struct page *page)
{
+ struct folio *folio = page_folio(page);
/*
* Since rwc.arg is unused, rwc is effectively immutable, so we
* can make it static const to save some cycles and stack.
@@ -97,18 +101,17 @@ static void page_idle_clear_pte_refs(struct page *page)
};
bool need_lock;
- if (!page_mapped(page) ||
- !page_rmapping(page))
+ if (!folio_mapped(folio) || !folio_raw_mapping(folio))
return;
- need_lock = !PageAnon(page) || PageKsm(page);
- if (need_lock && !trylock_page(page))
+ need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
+ if (need_lock && !folio_trylock(folio))
return;
- rmap_walk(page, (struct rmap_walk_control *)&rwc);
+ rmap_walk(&folio->page, (struct rmap_walk_control *)&rwc);
if (need_lock)
- unlock_page(page);
+ folio_unlock(folio);
}
static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
--
2.34.1
Powered by blists - more mailing lists