[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c9da3d2ef9fbff693fdfae0114fcff39378b8c03.1739941374.git-series.apopple@nvidia.com>
Date: Wed, 19 Feb 2025 16:04:47 +1100
From: Alistair Popple <apopple@...dia.com>
To: akpm@...ux-foundation.org,
linux-mm@...ck.org
Cc: Alistair Popple <apopple@...dia.com>,
gerald.schaefer@...ux.ibm.com,
dan.j.williams@...el.com,
jgg@...pe.ca,
willy@...radead.org,
david@...hat.com,
linux-kernel@...r.kernel.org,
nvdimm@...ts.linux.dev,
linux-fsdevel@...r.kernel.org,
linux-ext4@...r.kernel.org,
linux-xfs@...r.kernel.org,
jhubbard@...dia.com,
hch@....de,
zhang.lyra@...il.com,
debug@...osinc.com,
bjorn@...nel.org,
balbirs@...dia.com
Subject: [PATCH RFC v2 03/12] mm/pagewalk: Skip dax pages in pagewalk
Previously dax pages were skipped by the pagewalk code as pud_special() or
vm_normal_page{_pmd}() would be false for DAX pages. Now that dax pages are
refcounted normally that is no longer the case, so add explicit checks to
skip them.
Signed-off-by: Alistair Popple <apopple@...dia.com>
---
include/linux/memremap.h | 11 +++++++++++
mm/pagewalk.c | 12 ++++++++++--
2 files changed, 21 insertions(+), 2 deletions(-)
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 4aa1519..54e8b57 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -198,6 +198,17 @@ static inline bool folio_is_fsdax(const struct folio *folio)
return is_fsdax_page(&folio->page);
}
+static inline bool is_devdax_page(const struct page *page)
+{
+ return is_zone_device_page(page) &&
+ page_pgmap(page)->type == MEMORY_DEVICE_GENERIC;
+}
+
+static inline bool folio_is_devdax(const struct folio *folio)
+{
+ return is_devdax_page(&folio->page);
+}
+
#ifdef CONFIG_ZONE_DEVICE
void zone_device_page_init(struct page *page);
void *memremap_pages(struct dev_pagemap *pgmap, int nid);
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index e478777..0dfb9c2 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -884,6 +884,12 @@ struct folio *folio_walk_start(struct folio_walk *fw,
* support PUD mappings in VM_PFNMAP|VM_MIXEDMAP VMAs.
*/
page = pud_page(pud);
+
+ if (is_devdax_page(page)) {
+ spin_unlock(ptl);
+ goto not_found;
+ }
+
goto found;
}
@@ -911,7 +917,8 @@ struct folio *folio_walk_start(struct folio_walk *fw,
goto pte_table;
} else if (pmd_present(pmd)) {
page = vm_normal_page_pmd(vma, addr, pmd);
- if (page) {
+ if (page && !is_devdax_page(page) &&
+ !is_fsdax_page(page)) {
goto found;
} else if ((flags & FW_ZEROPAGE) &&
is_huge_zero_pmd(pmd)) {
@@ -945,7 +952,8 @@ struct folio *folio_walk_start(struct folio_walk *fw,
if (pte_present(pte)) {
page = vm_normal_page(vma, addr, pte);
- if (page)
+ if (page && !is_devdax_page(page) &&
+ !is_fsdax_page(page))
goto found;
if ((flags & FW_ZEROPAGE) &&
is_zero_pfn(pte_pfn(pte))) {
--
git-series 0.9.1
Powered by blists - more mailing lists