[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191211213207.215936-2-brho@google.com>
Date: Wed, 11 Dec 2019 16:32:06 -0500
From: Barret Rhoden <brho@...gle.com>
To: Paolo Bonzini <pbonzini@...hat.com>,
Dan Williams <dan.j.williams@...el.com>,
David Hildenbrand <david@...hat.com>,
Dave Jiang <dave.jiang@...el.com>,
Alexander Duyck <alexander.h.duyck@...ux.intel.com>
Cc: linux-nvdimm@...ts.01.org, x86@...nel.org, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org, jason.zeng@...el.com
Subject: [PATCH v4 1/2] mm: make dev_pagemap_mapping_shift() externally visible
KVM has a use case for determining the size of a dax mapping. The KVM
code has easy access to the address and the mm; hence the change in
parameters.
Signed-off-by: Barret Rhoden <brho@...gle.com>
Reviewed-by: David Hildenbrand <david@...hat.com>
Acked-by: Dan Williams <dan.j.williams@...el.com>
---
include/linux/mm.h | 3 +++
mm/memory-failure.c | 38 +++-----------------------------------
mm/util.c | 34 ++++++++++++++++++++++++++++++++++
3 files changed, 40 insertions(+), 35 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8b0ef04b6d15..f88bcc6a3bd1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -998,6 +998,9 @@ static inline bool is_pci_p2pdma_page(const struct page *page)
#define page_ref_zero_or_close_to_overflow(page) \
((unsigned int) page_ref_count(page) + 127u <= 127u)
+unsigned long dev_pagemap_mapping_shift(unsigned long address,
+ struct mm_struct *mm);
+
static inline void get_page(struct page *page)
{
page = compound_head(page);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 41c634f45d45..9dc487e73d9b 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -261,40 +261,6 @@ void shake_page(struct page *p, int access)
}
EXPORT_SYMBOL_GPL(shake_page);
-static unsigned long dev_pagemap_mapping_shift(struct page *page,
- struct vm_area_struct *vma)
-{
- unsigned long address = vma_address(page, vma);
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- pgd = pgd_offset(vma->vm_mm, address);
- if (!pgd_present(*pgd))
- return 0;
- p4d = p4d_offset(pgd, address);
- if (!p4d_present(*p4d))
- return 0;
- pud = pud_offset(p4d, address);
- if (!pud_present(*pud))
- return 0;
- if (pud_devmap(*pud))
- return PUD_SHIFT;
- pmd = pmd_offset(pud, address);
- if (!pmd_present(*pmd))
- return 0;
- if (pmd_devmap(*pmd))
- return PMD_SHIFT;
- pte = pte_offset_map(pmd, address);
- if (!pte_present(*pte))
- return 0;
- if (pte_devmap(*pte))
- return PAGE_SHIFT;
- return 0;
-}
-
/*
* Failure handling: if we can't find or can't kill a process there's
* not much we can do. We just print a message and ignore otherwise.
@@ -318,7 +284,9 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
tk->addr = page_address_in_vma(p, vma);
if (is_zone_device_page(p))
- tk->size_shift = dev_pagemap_mapping_shift(p, vma);
+ tk->size_shift =
+ dev_pagemap_mapping_shift(vma_address(page, vma),
+ vma->vm_mm);
else
tk->size_shift = page_shift(compound_head(p));
diff --git a/mm/util.c b/mm/util.c
index 988d11e6c17c..553fbe1692ed 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -911,3 +911,37 @@ int memcmp_pages(struct page *page1, struct page *page2)
kunmap_atomic(addr1);
return ret;
}
+
+unsigned long dev_pagemap_mapping_shift(unsigned long address,
+ struct mm_struct *mm)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = pgd_offset(mm, address);
+ if (!pgd_present(*pgd))
+ return 0;
+ p4d = p4d_offset(pgd, address);
+ if (!p4d_present(*p4d))
+ return 0;
+ pud = pud_offset(p4d, address);
+ if (!pud_present(*pud))
+ return 0;
+ if (pud_devmap(*pud))
+ return PUD_SHIFT;
+ pmd = pmd_offset(pud, address);
+ if (!pmd_present(*pmd))
+ return 0;
+ if (pmd_devmap(*pmd))
+ return PMD_SHIFT;
+ pte = pte_offset_map(pmd, address);
+ if (!pte_present(*pte))
+ return 0;
+ if (pte_devmap(*pte))
+ return PAGE_SHIFT;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pagemap_mapping_shift);
--
2.24.0.525.g8f36a354ae-goog
Powered by blists - more mailing lists