[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251128044146.80050-4-jniethe@nvidia.com>
Date: Fri, 28 Nov 2025 15:41:43 +1100
From: Jordan Niethe <jniethe@...dia.com>
To: linux-mm@...ck.org
Cc: balbirs@...dia.com,
matthew.brost@...el.com,
akpm@...ux-foundation.org,
linux-kernel@...r.kernel.org,
dri-devel@...ts.freedesktop.org,
david@...hat.com,
ziy@...dia.com,
apopple@...dia.com,
lorenzo.stoakes@...cle.com,
lyude@...hat.com,
dakr@...nel.org,
airlied@...il.com,
simona@...ll.ch,
rcampbell@...dia.com,
mpenttil@...hat.com,
jgg@...dia.com,
willy@...radead.org
Subject: [RFC PATCH 3/6] mm/page_vma_mapped: Add flags to page_vma_mapped_walk::pfn to track device private PFNs
A future change will remove device private pages from the physical
address space. This will mean that device private pages no longer have
normal PFN and must be handled separately.
Prepare for this by modifying page_vma_mapped_walk::pfn to contain flags
as well as a PFN. Introduce a PVMW_PFN_DEVICE_PRIVATE flag to indicate
that a page_vma_mapped_walk::pfn contains a PFN for a device private
page.
Signed-off-by: Jordan Niethe <jniethe@...dia.com>
Signed-off-by: Alistair Popple <apopple@...dia.com>
---
include/linux/rmap.h | 26 +++++++++++++++++++++++++-
mm/page_vma_mapped.c | 6 +++---
mm/rmap.c | 4 ++--
mm/vmscan.c | 2 +-
4 files changed, 31 insertions(+), 7 deletions(-)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index daa92a58585d..79e5c733d9c8 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -939,9 +939,33 @@ struct page_vma_mapped_walk {
unsigned int flags;
};
+/* pfn is a device private offset */
+#define PVMW_PFN_DEVICE_PRIVATE (1UL << 0)
+#define PVMW_PFN_SHIFT 1
+
+static inline unsigned long page_vma_walk_pfn(unsigned long pfn)
+{
+ return (pfn << PVMW_PFN_SHIFT);
+}
+
+static inline unsigned long folio_page_vma_walk_pfn(const struct folio *folio)
+{
+ return page_vma_walk_pfn(folio_pfn(folio));
+}
+
+static inline struct page *page_vma_walk_pfn_to_page(unsigned long pvmw_pfn)
+{
+ return pfn_to_page(pvmw_pfn >> PVMW_PFN_SHIFT);
+}
+
+static inline struct folio *page_vma_walk_pfn_to_folio(unsigned long pvmw_pfn)
+{
+ return page_folio(page_vma_walk_pfn_to_page(pvmw_pfn));
+}
+
#define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags) \
struct page_vma_mapped_walk name = { \
- .pfn = folio_pfn(_folio), \
+ .pfn = folio_page_vma_walk_pfn(_folio), \
.nr_pages = folio_nr_pages(_folio), \
.pgoff = folio_pgoff(_folio), \
.vma = _vma, \
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index c498a91b6706..9146bd084435 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -133,9 +133,9 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw, unsigned long pte_nr)
pfn = pte_pfn(ptent);
}
- if ((pfn + pte_nr - 1) < pvmw->pfn)
+ if ((pfn + pte_nr - 1) < (pvmw->pfn >> PVMW_PFN_SHIFT))
return false;
- if (pfn > (pvmw->pfn + pvmw->nr_pages - 1))
+ if (pfn > ((pvmw->pfn >> PVMW_PFN_SHIFT) + pvmw->nr_pages - 1))
return false;
return true;
}
@@ -346,7 +346,7 @@ unsigned long page_mapped_in_vma(const struct page *page,
{
const struct folio *folio = page_folio(page);
struct page_vma_mapped_walk pvmw = {
- .pfn = page_to_pfn(page),
+ .pfn = folio_page_vma_walk_pfn(folio),
.nr_pages = 1,
.vma = vma,
.flags = PVMW_SYNC,
diff --git a/mm/rmap.c b/mm/rmap.c
index ac4f783d6ec2..e94500318f92 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1129,7 +1129,7 @@ static bool mapping_wrprotect_range_one(struct folio *folio,
{
struct wrprotect_file_state *state = (struct wrprotect_file_state *)arg;
struct page_vma_mapped_walk pvmw = {
- .pfn = state->pfn,
+ .pfn = page_vma_walk_pfn(state->pfn),
.nr_pages = state->nr_pages,
.pgoff = state->pgoff,
.vma = vma,
@@ -1207,7 +1207,7 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
struct vm_area_struct *vma)
{
struct page_vma_mapped_walk pvmw = {
- .pfn = pfn,
+ .pfn = page_vma_walk_pfn(pfn),
.nr_pages = nr_pages,
.pgoff = pgoff,
.vma = vma,
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b2fc8b626d3d..e07ad830e30a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4238,7 +4238,7 @@ bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
pte_t *pte = pvmw->pte;
unsigned long addr = pvmw->address;
struct vm_area_struct *vma = pvmw->vma;
- struct folio *folio = pfn_folio(pvmw->pfn);
+ struct folio *folio = page_vma_walk_pfn_to_folio(pvmw->pfn);
struct mem_cgroup *memcg = folio_memcg(folio);
struct pglist_data *pgdat = folio_pgdat(folio);
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
--
2.34.1
Powered by blists - more mailing lists