[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <22331b7c-93a1-4fa9-b7dd-5c8455dbb9aa@lucifer.local>
Date: Thu, 17 Jul 2025 16:34:19 +0100
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: David Hildenbrand <david@...hat.com>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
xen-devel@...ts.xenproject.org, linux-fsdevel@...r.kernel.org,
nvdimm@...ts.linux.dev, Andrew Morton <akpm@...ux-foundation.org>,
Juergen Gross <jgross@...e.com>,
Stefano Stabellini <sstabellini@...nel.org>,
Oleksandr Tyshchenko <oleksandr_tyshchenko@...m.com>,
Dan Williams <dan.j.williams@...el.com>,
Matthew Wilcox <willy@...radead.org>, Jan Kara <jack@...e.cz>,
Alexander Viro <viro@...iv.linux.org.uk>,
Christian Brauner <brauner@...nel.org>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Vlastimil Babka <vbabka@...e.cz>, Mike Rapoport <rppt@...nel.org>,
Suren Baghdasaryan <surenb@...gle.com>, Michal Hocko <mhocko@...e.com>,
Zi Yan <ziy@...dia.com>, Baolin Wang <baolin.wang@...ux.alibaba.com>,
Nico Pache <npache@...hat.com>, Ryan Roberts <ryan.roberts@....com>,
Dev Jain <dev.jain@....com>, Barry Song <baohua@...nel.org>,
Jann Horn <jannh@...gle.com>, Pedro Falcato <pfalcato@...e.de>,
Hugh Dickins <hughd@...gle.com>, Oscar Salvador <osalvador@...e.de>,
Lance Yang <lance.yang@...ux.dev>,
Alistair Popple <apopple@...dia.com>
Subject: Re: [PATCH v2 1/9] mm/huge_memory: move more common code into
insert_pmd()
On Thu, Jul 17, 2025 at 01:52:04PM +0200, David Hildenbrand wrote:
> Let's clean it all further up.
>
> No functional change intended.
>
> Reviewed-by: Oscar Salvador <osalvador@...e.de>
> Reviewed-by: Alistair Popple <apopple@...dia.com>
> Signed-off-by: David Hildenbrand <david@...hat.com>
Very nice!
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
> ---
> mm/huge_memory.c | 72 ++++++++++++++++--------------------------------
> 1 file changed, 24 insertions(+), 48 deletions(-)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index fe17b0a157cda..1178760d2eda4 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -1390,15 +1390,25 @@ struct folio_or_pfn {
> bool is_folio;
> };
>
> -static int insert_pmd(struct vm_area_struct *vma, unsigned long addr,
> +static vm_fault_t insert_pmd(struct vm_area_struct *vma, unsigned long addr,
> pmd_t *pmd, struct folio_or_pfn fop, pgprot_t prot,
> - bool write, pgtable_t pgtable)
> + bool write)
> {
> struct mm_struct *mm = vma->vm_mm;
> + pgtable_t pgtable = NULL;
> + spinlock_t *ptl;
> pmd_t entry;
>
> - lockdep_assert_held(pmd_lockptr(mm, pmd));
> + if (addr < vma->vm_start || addr >= vma->vm_end)
> + return VM_FAULT_SIGBUS;
>
> + if (arch_needs_pgtable_deposit()) {
> + pgtable = pte_alloc_one(vma->vm_mm);
> + if (!pgtable)
> + return VM_FAULT_OOM;
> + }
> +
> + ptl = pmd_lock(mm, pmd);
> if (!pmd_none(*pmd)) {
> const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
> fop.pfn;
> @@ -1406,15 +1416,14 @@ static int insert_pmd(struct vm_area_struct *vma, unsigned long addr,
> if (write) {
> if (pmd_pfn(*pmd) != pfn) {
> WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
> - return -EEXIST;
> + goto out_unlock;
> }
> entry = pmd_mkyoung(*pmd);
> entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
> if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
> update_mmu_cache_pmd(vma, addr, pmd);
> }
> -
> - return -EEXIST;
> + goto out_unlock;
> }
>
> if (fop.is_folio) {
> @@ -1435,11 +1444,17 @@ static int insert_pmd(struct vm_area_struct *vma, unsigned long addr,
> if (pgtable) {
> pgtable_trans_huge_deposit(mm, pmd, pgtable);
> mm_inc_nr_ptes(mm);
> + pgtable = NULL;
> }
>
> set_pmd_at(mm, addr, pmd, entry);
> update_mmu_cache_pmd(vma, addr, pmd);
> - return 0;
> +
> +out_unlock:
> + spin_unlock(ptl);
> + if (pgtable)
> + pte_free(mm, pgtable);
> + return VM_FAULT_NOPAGE;
> }
>
> /**
> @@ -1461,9 +1476,6 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
> struct folio_or_pfn fop = {
> .pfn = pfn,
> };
> - pgtable_t pgtable = NULL;
> - spinlock_t *ptl;
> - int error;
>
> /*
> * If we had pmd_special, we could avoid all these restrictions,
> @@ -1475,25 +1487,9 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
> (VM_PFNMAP|VM_MIXEDMAP));
> BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
>
> - if (addr < vma->vm_start || addr >= vma->vm_end)
> - return VM_FAULT_SIGBUS;
> -
> - if (arch_needs_pgtable_deposit()) {
> - pgtable = pte_alloc_one(vma->vm_mm);
> - if (!pgtable)
> - return VM_FAULT_OOM;
> - }
> -
> pfnmap_setup_cachemode_pfn(pfn, &pgprot);
>
> - ptl = pmd_lock(vma->vm_mm, vmf->pmd);
> - error = insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write,
> - pgtable);
> - spin_unlock(ptl);
> - if (error && pgtable)
> - pte_free(vma->vm_mm, pgtable);
> -
> - return VM_FAULT_NOPAGE;
> + return insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write);
> }
> EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
>
> @@ -1502,35 +1498,15 @@ vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
> {
> struct vm_area_struct *vma = vmf->vma;
> unsigned long addr = vmf->address & PMD_MASK;
> - struct mm_struct *mm = vma->vm_mm;
> struct folio_or_pfn fop = {
> .folio = folio,
> .is_folio = true,
> };
> - spinlock_t *ptl;
> - pgtable_t pgtable = NULL;
> - int error;
> -
> - if (addr < vma->vm_start || addr >= vma->vm_end)
> - return VM_FAULT_SIGBUS;
>
> if (WARN_ON_ONCE(folio_order(folio) != PMD_ORDER))
> return VM_FAULT_SIGBUS;
>
> - if (arch_needs_pgtable_deposit()) {
> - pgtable = pte_alloc_one(vma->vm_mm);
> - if (!pgtable)
> - return VM_FAULT_OOM;
> - }
> -
> - ptl = pmd_lock(mm, vmf->pmd);
> - error = insert_pmd(vma, addr, vmf->pmd, fop, vma->vm_page_prot,
> - write, pgtable);
> - spin_unlock(ptl);
> - if (error && pgtable)
> - pte_free(mm, pgtable);
> -
> - return VM_FAULT_NOPAGE;
> + return insert_pmd(vma, addr, vmf->pmd, fop, vma->vm_page_prot, write);
> }
> EXPORT_SYMBOL_GPL(vmf_insert_folio_pmd);
>
> --
> 2.50.1
>
Powered by blists - more mailing lists