[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20221021163703.3218176-13-jthoughton@google.com>
Date: Fri, 21 Oct 2022 16:36:28 +0000
From: James Houghton <jthoughton@...gle.com>
To: Mike Kravetz <mike.kravetz@...cle.com>,
Muchun Song <songmuchun@...edance.com>,
Peter Xu <peterx@...hat.com>
Cc: David Hildenbrand <david@...hat.com>,
David Rientjes <rientjes@...gle.com>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Mina Almasry <almasrymina@...gle.com>,
"Zach O'Keefe" <zokeefe@...gle.com>,
Manish Mishra <manish.mishra@...anix.com>,
Naoya Horiguchi <naoya.horiguchi@....com>,
"Dr . David Alan Gilbert" <dgilbert@...hat.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Vlastimil Babka <vbabka@...e.cz>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Miaohe Lin <linmiaohe@...wei.com>,
Yang Shi <shy828301@...il.com>,
Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
James Houghton <jthoughton@...gle.com>
Subject: [RFC PATCH v2 12/47] hugetlb: add hugetlb_hgm_walk and hugetlb_walk_step
hugetlb_hgm_walk implements high-granularity page table walks for
HugeTLB. It is safe to call on non-HGM enabled VMAs; it will return
immediately.
hugetlb_walk_step implements how we step forwards in the walk. For
architectures that don't use GENERAL_HUGETLB, they will need to provide
their own implementation.
Signed-off-by: James Houghton <jthoughton@...gle.com>
---
include/linux/hugetlb.h | 13 +++++
mm/hugetlb.c | 125 ++++++++++++++++++++++++++++++++++++++++
2 files changed, 138 insertions(+)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 003255b0e40f..4b1548adecde 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -276,6 +276,10 @@ u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pud_t *pud);
+int hugetlb_hgm_walk(struct mm_struct *mm, struct vm_area_struct *vma,
+ struct hugetlb_pte *hpte, unsigned long addr,
+ unsigned long sz, bool stop_at_none);
+
struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
extern int sysctl_hugetlb_shm_group;
@@ -288,6 +292,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
unsigned long hugetlb_mask_last_page(struct hstate *h);
+int hugetlb_walk_step(struct mm_struct *mm, struct hugetlb_pte *hpte,
+ unsigned long addr, unsigned long sz);
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep);
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
@@ -1066,6 +1072,8 @@ void hugetlb_register_node(struct node *node);
void hugetlb_unregister_node(struct node *node);
#endif
+enum hugetlb_level hpage_size_to_level(unsigned long sz);
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
@@ -1253,6 +1261,11 @@ static inline void hugetlb_register_node(struct node *node)
static inline void hugetlb_unregister_node(struct node *node)
{
}
+
+static inline enum hugetlb_level hpage_size_to_level(unsigned long sz)
+{
+ return HUGETLB_LEVEL_PTE;
+}
#endif /* CONFIG_HUGETLB_PAGE */
#ifdef CONFIG_HUGETLB_HIGH_GRANULARITY_MAPPING
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e3733388adee..90db59632559 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -95,6 +95,29 @@ static void hugetlb_vma_data_free(struct vm_area_struct *vma);
static int hugetlb_vma_data_alloc(struct vm_area_struct *vma);
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
+/*
+ * hpage_size_to_level() - convert @sz to the corresponding page table level
+ *
+ * @sz must be less than or equal to a valid hugepage size.
+ */
+enum hugetlb_level hpage_size_to_level(unsigned long sz)
+{
+ /*
+ * We order the conditionals from smallest to largest to pick the
+ * smallest level when multiple levels have the same size (i.e.,
+ * when levels are folded).
+ */
+ if (sz < PMD_SIZE)
+ return HUGETLB_LEVEL_PTE;
+ if (sz < PUD_SIZE)
+ return HUGETLB_LEVEL_PMD;
+ if (sz < P4D_SIZE)
+ return HUGETLB_LEVEL_PUD;
+ if (sz < PGDIR_SIZE)
+ return HUGETLB_LEVEL_P4D;
+ return HUGETLB_LEVEL_PGD;
+}
+
static inline bool subpool_is_free(struct hugepage_subpool *spool)
{
if (spool->count)
@@ -7321,6 +7344,70 @@ bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
}
#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
+/* hugetlb_hgm_walk - walks a high-granularity HugeTLB page table to resolve
+ * the page table entry for @addr.
+ *
+ * @hpte must always be pointing at an hstate-level PTE (or deeper).
+ *
+ * This function will never walk further if it encounters a PTE of a size
+ * less than or equal to @sz.
+ *
+ * @stop_at_none determines what we do when we encounter an empty PTE. If true,
+ * we return that PTE. If false and @sz is less than the current PTE's size,
+ * we make that PTE point to the next level down, going until @sz is the same
+ * as our current PTE.
+ *
+ * If @stop_at_none is true and @sz is PAGE_SIZE, this function will always
+ * succeed, but that does not guarantee that hugetlb_pte_size(hpte) is @sz.
+ *
+ * Return:
+ * -ENOMEM if we couldn't allocate new PTEs.
+ * -EEXIST if the caller wanted to walk further than a migration PTE,
+ * poison PTE, or a PTE marker. The caller needs to manually deal
+ * with this scenario.
+ * -EINVAL if called with invalid arguments (@sz invalid, @hpte not
+ * initialized).
+ * 0 otherwise.
+ *
+ * Even if this function fails, @hpte is guaranteed to always remain
+ * valid.
+ */
+int hugetlb_hgm_walk(struct mm_struct *mm, struct vm_area_struct *vma,
+ struct hugetlb_pte *hpte, unsigned long addr,
+ unsigned long sz, bool stop_at_none)
+{
+ int ret = 0;
+ pte_t pte;
+
+ if (WARN_ON_ONCE(sz < PAGE_SIZE))
+ return -EINVAL;
+
+ if (!hugetlb_hgm_enabled(vma)) {
+ if (stop_at_none)
+ return 0;
+ return sz == huge_page_size(hstate_vma(vma)) ? 0 : -EINVAL;
+ }
+
+ hugetlb_vma_assert_locked(vma);
+
+ if (WARN_ON_ONCE(!hpte->ptep))
+ return -EINVAL;
+
+ while (hugetlb_pte_size(hpte) > sz && !ret) {
+ pte = huge_ptep_get(hpte->ptep);
+ if (!pte_present(pte)) {
+ if (stop_at_none)
+ return 0;
+ if (unlikely(!huge_pte_none(pte)))
+ return -EEXIST;
+ } else if (hugetlb_pte_present_leaf(hpte, pte))
+ return 0;
+ ret = hugetlb_walk_step(mm, hpte, addr, sz);
+ }
+
+ return ret;
+}
+
#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
@@ -7388,6 +7475,44 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return (pte_t *)pmd;
}
+/*
+ * hugetlb_walk_step() - Walk the page table one step to resolve the page
+ * (hugepage or subpage) entry at address @addr.
+ *
+ * @sz always points at the final target PTE size (e.g. PAGE_SIZE for the
+ * lowest level PTE).
+ *
+ * @hpte will always remain valid, even if this function fails.
+ */
+int hugetlb_walk_step(struct mm_struct *mm, struct hugetlb_pte *hpte,
+ unsigned long addr, unsigned long sz)
+{
+ pte_t *ptep;
+ spinlock_t *ptl;
+
+ switch (hpte->level) {
+ case HUGETLB_LEVEL_PUD:
+ ptep = (pte_t *)hugetlb_pmd_alloc(mm, hpte, addr);
+ if (IS_ERR(ptep))
+ return PTR_ERR(ptep);
+ hugetlb_pte_populate(hpte, ptep, PMD_SHIFT, HUGETLB_LEVEL_PMD);
+ break;
+ case HUGETLB_LEVEL_PMD:
+ ptep = hugetlb_pte_alloc(mm, hpte, addr);
+ if (IS_ERR(ptep))
+ return PTR_ERR(ptep);
+ ptl = pte_lockptr(mm, (pmd_t *)hpte->ptep);
+ hugetlb_pte_populate(hpte, ptep, PAGE_SHIFT, HUGETLB_LEVEL_PTE);
+ hpte->ptl = ptl;
+ break;
+ default:
+ WARN_ONCE(1, "%s: got invalid level: %d (shift: %d)\n",
+ __func__, hpte->level, hpte->shift);
+ return -EINVAL;
+ }
+ return 0;
+}
+
/*
* Return a mask that can be used to update an address to the last huge
* page in a page table page mapping size. Used to skip non-present
--
2.38.0.135.g90850a2211-goog
Powered by blists - more mailing lists