[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200110190313.17144-2-joao.m.martins@oracle.com>
Date: Fri, 10 Jan 2020 19:03:04 +0000
From: Joao Martins <joao.m.martins@...cle.com>
To: linux-nvdimm@...ts.01.org
Cc: Dan Williams <dan.j.williams@...el.com>,
Vishal Verma <vishal.l.verma@...el.com>,
Dave Jiang <dave.jiang@...el.com>,
Ira Weiny <ira.weiny@...el.com>,
Alex Williamson <alex.williamson@...hat.com>,
Cornelia Huck <cohuck@...hat.com>, kvm@...r.kernel.org,
Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
"H . Peter Anvin" <hpa@...or.com>, x86@...nel.org,
Liran Alon <liran.alon@...cle.com>,
Nikita Leshenko <nikita.leshchenko@...cle.com>,
Barret Rhoden <brho@...gle.com>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
Matthew Wilcox <willy@...radead.org>,
Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>
Subject: [PATCH RFC 01/10] mm: Add pmd support for _PAGE_SPECIAL
Currently vmf_insert_pfn_pmd only works with devmap
and BUG_ON otherwise. Add support for handling
page special when pfn_t has it marked with PFN_SPECIAL.
Usage of page special aren't expected to do GUP
hence return no pages on gup_huge_pmd() much like how
it is done for ptes on gup_pte_range().
This allows a DAX driver to handle 2M hugepages without
struct pages.
Signed-off-by: Joao Martins <joao.m.martins@...cle.com>
---
arch/x86/include/asm/pgtable.h | 16 +++++++++++++++-
mm/gup.c | 3 +++
mm/huge_memory.c | 7 ++++---
mm/memory.c | 3 ++-
4 files changed, 24 insertions(+), 5 deletions(-)
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index ad97dc155195..60351c0c15fe 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -255,7 +255,7 @@ static inline int pmd_large(pmd_t pte)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_trans_huge(pmd_t pmd)
{
- return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
+ return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP|_PAGE_SPECIAL)) == _PAGE_PSE;
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
@@ -293,6 +293,15 @@ static inline int pgd_devmap(pgd_t pgd)
{
return 0;
}
+#endif
+
+#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
+static inline int pmd_special(pmd_t pmd)
+{
+ return !!(pmd_flags(pmd) & _PAGE_SPECIAL);
+}
+#endif
+
#endif
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -414,6 +423,11 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
return pmd_set_flags(pmd, _PAGE_DEVMAP);
}
+static inline pmd_t pmd_mkspecial(pmd_t pmd)
+{
+ return pmd_set_flags(pmd, _PAGE_SPECIAL);
+}
+
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
return pmd_set_flags(pmd, _PAGE_PSE);
diff --git a/mm/gup.c b/mm/gup.c
index 7646bf993b25..ba5f10535392 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2079,6 +2079,9 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr);
}
+ if (pmd_special(orig))
+ return 0;
+
refs = 0;
page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
do {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 41a0fbddc96b..06ad4d6f7477 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -791,6 +791,8 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
if (pfn_t_devmap(pfn))
entry = pmd_mkdevmap(entry);
+ else if (pfn_t_special(pfn))
+ entry = pmd_mkspecial(entry);
if (write) {
entry = pmd_mkyoung(pmd_mkdirty(entry));
entry = maybe_pmd_mkwrite(entry, vma);
@@ -823,8 +825,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
* but we need to be consistent with PTEs and architectures that
* can't support a 'special' bit.
*/
- BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
- !pfn_t_devmap(pfn));
+ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
(VM_PFNMAP|VM_MIXEDMAP));
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
@@ -2013,7 +2014,7 @@ spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
spinlock_t *ptl;
ptl = pmd_lock(vma->vm_mm, pmd);
if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
- pmd_devmap(*pmd)))
+ pmd_devmap(*pmd) || pmd_special(*pmd)))
return ptl;
spin_unlock(ptl);
return NULL;
diff --git a/mm/memory.c b/mm/memory.c
index 45442d9a4f52..cfc3668bddeb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1165,7 +1165,8 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
+ if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
+ pmd_devmap(*pmd) || pmd_special(*pmd)) {
if (next - addr != HPAGE_PMD_SIZE)
__split_huge_pmd(vma, pmd, addr, false, NULL);
else if (zap_huge_pmd(tlb, vma, pmd, addr))
--
2.17.1
Powered by blists - more mailing lists