lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-ID: <f3ebda542373feb70ed3e5d83b276a2e8347609f.1734407924.git-series.apopple@nvidia.com> Date: Tue, 17 Dec 2024 16:13:02 +1100 From: Alistair Popple <apopple@...dia.com> To: akpm@...ux-foundation.org, dan.j.williams@...el.com, linux-mm@...ck.org Cc: Alistair Popple <apopple@...dia.com>, lina@...hilina.net, zhang.lyra@...il.com, gerald.schaefer@...ux.ibm.com, vishal.l.verma@...el.com, dave.jiang@...el.com, logang@...tatee.com, bhelgaas@...gle.com, jack@...e.cz, jgg@...pe.ca, catalin.marinas@....com, will@...nel.org, mpe@...erman.id.au, npiggin@...il.com, dave.hansen@...ux.intel.com, ira.weiny@...el.com, willy@...radead.org, djwong@...nel.org, tytso@....edu, linmiaohe@...wei.com, david@...hat.com, peterx@...hat.com, linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org, linux-arm-kernel@...ts.infradead.org, linuxppc-dev@...ts.ozlabs.org, nvdimm@...ts.linux.dev, linux-cxl@...r.kernel.org, linux-fsdevel@...r.kernel.org, linux-ext4@...r.kernel.org, linux-xfs@...r.kernel.org, jhubbard@...dia.com, hch@....de, david@...morbit.com Subject: [PATCH v4 19/25] proc/task_mmu: Ignore ZONE_DEVICE pages The procfs mmu files such as smaps currently ignore device dax and fs dax pages because these pages are considered special. To maintain existing behaviour once these pages are treated as normal pages and returned from vm_normal_page() add tests to explicitly skip them. Signed-off-by: Alistair Popple <apopple@...dia.com> --- fs/proc/task_mmu.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 38a5a3e..c9b227a 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -801,6 +801,8 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, if (pte_present(ptent)) { page = vm_normal_page(vma, addr, ptent); + if (page && (is_device_dax_page(page) || is_fsdax_page(page))) + page = NULL; young = pte_young(ptent); dirty = pte_dirty(ptent); present = true; @@ -849,6 +851,8 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, if (pmd_present(*pmd)) { page = vm_normal_page_pmd(vma, addr, *pmd); + if (page && (is_device_dax_page(page) || is_fsdax_page(page))) + page = NULL; present = true; } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) { swp_entry_t entry = pmd_to_swp_entry(*pmd); @@ -1378,7 +1382,7 @@ static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))) return false; folio = vm_normal_folio(vma, addr, pte); - if (!folio) + if (!folio || folio_is_device_dax(folio) || folio_is_fsdax(folio)) return false; return folio_maybe_dma_pinned(folio); } @@ -1703,6 +1707,8 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, frame = pte_pfn(pte); flags |= PM_PRESENT; page = vm_normal_page(vma, addr, pte); + if (page && (is_device_dax_page(page) || is_fsdax_page(page))) + page = NULL; if (pte_soft_dirty(pte)) flags |= PM_SOFT_DIRTY; if (pte_uffd_wp(pte)) @@ -2089,7 +2095,9 @@ static unsigned long pagemap_page_category(struct pagemap_scan_private *p, if (p->masks_of_interest & PAGE_IS_FILE) { page = vm_normal_page(vma, addr, pte); - if (page && !PageAnon(page)) + if (page && !PageAnon(page) && + !is_device_dax_page(page) && + !is_fsdax_page(page)) categories |= PAGE_IS_FILE; } @@ -2151,7 +2159,9 @@ static unsigned long pagemap_thp_category(struct pagemap_scan_private *p, if (p->masks_of_interest & PAGE_IS_FILE) { page = vm_normal_page_pmd(vma, addr, pmd); - if (page && !PageAnon(page)) + if (page && !PageAnon(page) && + !is_device_dax_page(page) && + !is_fsdax_page(page)) categories |= PAGE_IS_FILE; } @@ -2914,7 +2924,7 @@ static struct page *can_gather_numa_stats_pmd(pmd_t pmd, return NULL; page = vm_normal_page_pmd(vma, addr, pmd); - if (!page) + if (!page || is_device_dax_page(page) || is_fsdax_page(page)) return NULL; if (PageReserved(page)) -- git-series 0.9.1
Powered by blists - more mailing lists