[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <400a4584f6f628998a7093aee49d9f86c592754b.1719386613.git-series.apopple@nvidia.com>
Date: Thu, 27 Jun 2024 10:54:26 +1000
From: Alistair Popple <apopple@...dia.com>
To: dan.j.williams@...el.com,
vishal.l.verma@...el.com,
dave.jiang@...el.com,
logang@...tatee.com,
bhelgaas@...gle.com,
jack@...e.cz,
jgg@...pe.ca
Cc: catalin.marinas@....com,
will@...nel.org,
mpe@...erman.id.au,
npiggin@...il.com,
dave.hansen@...ux.intel.com,
ira.weiny@...el.com,
willy@...radead.org,
djwong@...nel.org,
tytso@....edu,
linmiaohe@...wei.com,
david@...hat.com,
peterx@...hat.com,
linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org,
linuxppc-dev@...ts.ozlabs.org,
nvdimm@...ts.linux.dev,
linux-cxl@...r.kernel.org,
linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org,
linux-ext4@...r.kernel.org,
linux-xfs@...r.kernel.org,
jhubbard@...dia.com,
hch@....de,
david@...morbit.com,
Alistair Popple <apopple@...dia.com>
Subject: [PATCH 11/13] huge_memory: Remove dead vmf_insert_pXd code
Now that DAX is managing page reference counts the same as normal
pages there are no callers for vmf_insert_pXd functions so remove
them.
Signed-off-by: Alistair Popple <apopple@...dia.com>
---
include/linux/huge_mm.h | 2 +-
mm/huge_memory.c | 165 +-----------------------------------------
2 files changed, 167 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 9207d8e..0fb6bff 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -37,8 +37,6 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr, pgprot_t newprot,
unsigned long cp_flags);
-vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
-vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
vm_fault_t dax_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
vm_fault_t dax_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5191f91..de39af4 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1111,97 +1111,6 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
}
-static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
- pgtable_t pgtable)
-{
- struct mm_struct *mm = vma->vm_mm;
- pmd_t entry;
- spinlock_t *ptl;
-
- ptl = pmd_lock(mm, pmd);
- if (!pmd_none(*pmd)) {
- if (write) {
- if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
- WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
- goto out_unlock;
- }
- entry = pmd_mkyoung(*pmd);
- entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
- if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
- update_mmu_cache_pmd(vma, addr, pmd);
- }
-
- goto out_unlock;
- }
-
- entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
- if (pfn_t_devmap(pfn))
- entry = pmd_mkdevmap(entry);
- if (write) {
- entry = pmd_mkyoung(pmd_mkdirty(entry));
- entry = maybe_pmd_mkwrite(entry, vma);
- }
-
- if (pgtable) {
- pgtable_trans_huge_deposit(mm, pmd, pgtable);
- mm_inc_nr_ptes(mm);
- pgtable = NULL;
- }
-
- set_pmd_at(mm, addr, pmd, entry);
- update_mmu_cache_pmd(vma, addr, pmd);
-
-out_unlock:
- spin_unlock(ptl);
- if (pgtable)
- pte_free(mm, pgtable);
-}
-
-/**
- * vmf_insert_pfn_pmd - insert a pmd size pfn
- * @vmf: Structure describing the fault
- * @pfn: pfn to insert
- * @write: whether it's a write fault
- *
- * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
- *
- * Return: vm_fault_t value.
- */
-vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
-{
- unsigned long addr = vmf->address & PMD_MASK;
- struct vm_area_struct *vma = vmf->vma;
- pgprot_t pgprot = vma->vm_page_prot;
- pgtable_t pgtable = NULL;
-
- /*
- * If we had pmd_special, we could avoid all these restrictions,
- * but we need to be consistent with PTEs and architectures that
- * can't support a 'special' bit.
- */
- BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
- !pfn_t_devmap(pfn));
- BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
- (VM_PFNMAP|VM_MIXEDMAP));
- BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
-
- if (addr < vma->vm_start || addr >= vma->vm_end)
- return VM_FAULT_SIGBUS;
-
- if (arch_needs_pgtable_deposit()) {
- pgtable = pte_alloc_one(vma->vm_mm);
- if (!pgtable)
- return VM_FAULT_OOM;
- }
-
- track_pfn_insert(vma, &pgprot, pfn);
-
- insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
- return VM_FAULT_NOPAGE;
-}
-EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
-
vm_fault_t dax_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
{
struct vm_area_struct *vma = vmf->vma;
@@ -1280,80 +1189,6 @@ static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
return pud;
}
-static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
- pud_t *pud, pfn_t pfn, bool write)
-{
- struct mm_struct *mm = vma->vm_mm;
- pgprot_t prot = vma->vm_page_prot;
- pud_t entry;
- spinlock_t *ptl;
-
- ptl = pud_lock(mm, pud);
- if (!pud_none(*pud)) {
- if (write) {
- if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
- WARN_ON_ONCE(!is_huge_zero_pud(*pud));
- goto out_unlock;
- }
- entry = pud_mkyoung(*pud);
- entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
- if (pudp_set_access_flags(vma, addr, pud, entry, 1))
- update_mmu_cache_pud(vma, addr, pud);
- }
- goto out_unlock;
- }
-
- entry = pud_mkhuge(pfn_t_pud(pfn, prot));
- if (pfn_t_devmap(pfn))
- entry = pud_mkdevmap(entry);
- if (write) {
- entry = pud_mkyoung(pud_mkdirty(entry));
- entry = maybe_pud_mkwrite(entry, vma);
- }
- set_pud_at(mm, addr, pud, entry);
- update_mmu_cache_pud(vma, addr, pud);
-
-out_unlock:
- spin_unlock(ptl);
-}
-
-/**
- * vmf_insert_pfn_pud - insert a pud size pfn
- * @vmf: Structure describing the fault
- * @pfn: pfn to insert
- * @write: whether it's a write fault
- *
- * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
- *
- * Return: vm_fault_t value.
- */
-vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
-{
- unsigned long addr = vmf->address & PUD_MASK;
- struct vm_area_struct *vma = vmf->vma;
- pgprot_t pgprot = vma->vm_page_prot;
-
- /*
- * If we had pud_special, we could avoid all these restrictions,
- * but we need to be consistent with PTEs and architectures that
- * can't support a 'special' bit.
- */
- BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
- !pfn_t_devmap(pfn));
- BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
- (VM_PFNMAP|VM_MIXEDMAP));
- BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
-
- if (addr < vma->vm_start || addr >= vma->vm_end)
- return VM_FAULT_SIGBUS;
-
- track_pfn_insert(vma, &pgprot, pfn);
-
- insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
- return VM_FAULT_NOPAGE;
-}
-EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
-
/**
* dax_insert_pfn_pud - insert a pud size pfn backed by a normal page
* @vmf: Structure describing the fault
--
git-series 0.9.1
Powered by blists - more mailing lists