[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1709a271-273b-4668-b813-648e5785e4e8@redhat.com>
Date: Tue, 17 Jun 2025 11:49:03 +0200
From: David Hildenbrand <david@...hat.com>
To: Alistair Popple <apopple@...dia.com>, akpm@...ux-foundation.org
Cc: linux-mm@...ck.org, gerald.schaefer@...ux.ibm.com,
dan.j.williams@...el.com, jgg@...pe.ca, willy@...radead.org,
linux-kernel@...r.kernel.org, nvdimm@...ts.linux.dev,
linux-fsdevel@...r.kernel.org, linux-ext4@...r.kernel.org,
linux-xfs@...r.kernel.org, jhubbard@...dia.com, hch@....de,
zhang.lyra@...il.com, debug@...osinc.com, bjorn@...nel.org,
balbirs@...dia.com, lorenzo.stoakes@...cle.com,
linux-arm-kernel@...ts.infradead.org, loongarch@...ts.linux.dev,
linuxppc-dev@...ts.ozlabs.org, linux-riscv@...ts.infradead.org,
linux-cxl@...r.kernel.org, dri-devel@...ts.freedesktop.org, John@...ves.net,
m.szyprowski@...sung.com, Jason Gunthorpe <jgg@...dia.com>
Subject: Re: [PATCH v2 03/14] mm: Convert vmf_insert_mixed() from using
pte_devmap to pte_special
On 16.06.25 13:58, Alistair Popple wrote:
> DAX no longer requires device PTEs as it always has a ZONE_DEVICE page
> associated with the PTE that can be reference counted normally. Other users
> of pte_devmap are drivers that set PFN_DEV when calling vmf_insert_mixed()
> which ensures vm_normal_page() returns NULL for these entries.
>
> There is no reason to distinguish these pte_devmap users so in order to
> free up a PTE bit use pte_special instead for entries created with
> vmf_insert_mixed(). This will ensure vm_normal_page() will continue to
> return NULL for these pages.
>
> Architectures that don't support pte_special also don't support pte_devmap
> so those will continue to rely on pfn_valid() to determine if the page can
> be mapped.
>
> Signed-off-by: Alistair Popple <apopple@...dia.com>
> Reviewed-by: Jason Gunthorpe <jgg@...dia.com>
> Reviewed-by: Dan Williams <dan.j.williams@...el.com>
> ---
> mm/hmm.c | 3 ---
> mm/memory.c | 20 ++------------------
> mm/vmscan.c | 2 +-
> 3 files changed, 3 insertions(+), 22 deletions(-)
>
> diff --git a/mm/hmm.c b/mm/hmm.c
> index 5311753..1a3489f 100644
> --- a/mm/hmm.c
> +++ b/mm/hmm.c
> @@ -302,13 +302,10 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
> goto fault;
>
> /*
> - * Bypass devmap pte such as DAX page when all pfn requested
> - * flags(pfn_req_flags) are fulfilled.
> * Since each architecture defines a struct page for the zero page, just
> * fall through and treat it like a normal page.
> */
> if (!vm_normal_page(walk->vma, addr, pte) &&
> - !pte_devmap(pte) &&
> !is_zero_pfn(pte_pfn(pte))) {
> if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
> pte_unmap(ptep);
> diff --git a/mm/memory.c b/mm/memory.c
> index b0cda5a..2c6eda1 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -598,16 +598,6 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
> return NULL;
> if (is_zero_pfn(pfn))
> return NULL;
> - if (pte_devmap(pte))
> - /*
> - * NOTE: New users of ZONE_DEVICE will not set pte_devmap()
> - * and will have refcounts incremented on their struct pages
> - * when they are inserted into PTEs, thus they are safe to
> - * return here. Legacy ZONE_DEVICE pages that set pte_devmap()
> - * do not have refcounts. Example of legacy ZONE_DEVICE is
> - * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers.
> - */
> - return NULL;
>
> print_bad_pte(vma, addr, pte, NULL);
> return NULL;
> @@ -2483,10 +2473,7 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
> }
>
> /* Ok, finally just insert the thing.. */
> - if (pfn_t_devmap(pfn))
> - entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
> - else
> - entry = pte_mkspecial(pfn_t_pte(pfn, prot));
> + entry = pte_mkspecial(pfn_t_pte(pfn, prot));
>
> if (mkwrite) {
> entry = pte_mkyoung(entry);
> @@ -2597,8 +2584,6 @@ static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn, bool mkwrite)
> /* these checks mirror the abort conditions in vm_normal_page */
> if (vma->vm_flags & VM_MIXEDMAP)
> return true;
> - if (pfn_t_devmap(pfn))
> - return true;
> if (pfn_t_special(pfn))
> return true;
> if (is_zero_pfn(pfn_t_to_pfn(pfn)))
> @@ -2630,8 +2615,7 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
> * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
> * without pte special, it would there be refcounted as a normal page.
> */
> - if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
> - !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
> + if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pfn_t_valid(pfn)) {
> struct page *page;
>
> /*
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index a93a1ba..85bf782 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -3424,7 +3424,7 @@ static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned
> if (!pte_present(pte) || is_zero_pfn(pfn))
> return -1;
>
> - if (WARN_ON_ONCE(pte_devmap(pte) || pte_special(pte)))
> + if (WARN_ON_ONCE(pte_special(pte)))
> return -1;
>
> if (!pte_young(pte) && !mm_has_notifiers(vma->vm_mm))
--
Cheers,
David / dhildenb
Powered by blists - more mailing lists