[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230105101844.1893104-28-jthoughton@google.com>
Date: Thu, 5 Jan 2023 10:18:25 +0000
From: James Houghton <jthoughton@...gle.com>
To: Mike Kravetz <mike.kravetz@...cle.com>,
Muchun Song <songmuchun@...edance.com>,
Peter Xu <peterx@...hat.com>
Cc: David Hildenbrand <david@...hat.com>,
David Rientjes <rientjes@...gle.com>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Mina Almasry <almasrymina@...gle.com>,
"Zach O'Keefe" <zokeefe@...gle.com>,
Manish Mishra <manish.mishra@...anix.com>,
Naoya Horiguchi <naoya.horiguchi@....com>,
"Dr . David Alan Gilbert" <dgilbert@...hat.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Vlastimil Babka <vbabka@...e.cz>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Miaohe Lin <linmiaohe@...wei.com>,
Yang Shi <shy828301@...il.com>,
Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
James Houghton <jthoughton@...gle.com>
Subject: [PATCH 27/46] hugetlb: add HGM support for move_hugetlb_page_tables
This is very similar to the support that was added to
copy_hugetlb_page_range. We simply do a high-granularity walk now, and
most of the rest of the code stays the same.
Signed-off-by: James Houghton <jthoughton@...gle.com>
---
mm/hugetlb.c | 47 +++++++++++++++++++++++++++--------------------
1 file changed, 27 insertions(+), 20 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 21a5116f509b..582d14a206b5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5313,16 +5313,16 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
return ret;
}
-static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
- unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte)
+static void move_hugetlb_pte(struct vm_area_struct *vma, unsigned long old_addr,
+ unsigned long new_addr, struct hugetlb_pte *src_hpte,
+ struct hugetlb_pte *dst_hpte)
{
- struct hstate *h = hstate_vma(vma);
struct mm_struct *mm = vma->vm_mm;
spinlock_t *src_ptl, *dst_ptl;
pte_t pte;
- dst_ptl = huge_pte_lock(h, mm, dst_pte);
- src_ptl = huge_pte_lockptr(huge_page_shift(h), mm, src_pte);
+ dst_ptl = hugetlb_pte_lock(dst_hpte);
+ src_ptl = hugetlb_pte_lockptr(src_hpte);
/*
* We don't have to worry about the ordering of src and dst ptlocks
@@ -5331,8 +5331,8 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
if (src_ptl != dst_ptl)
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
- pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
- set_huge_pte_at(mm, new_addr, dst_pte, pte);
+ pte = huge_ptep_get_and_clear(mm, old_addr, src_hpte->ptep);
+ set_huge_pte_at(mm, new_addr, dst_hpte->ptep, pte);
if (src_ptl != dst_ptl)
spin_unlock(src_ptl);
@@ -5350,9 +5350,9 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
unsigned long old_end = old_addr + len;
unsigned long last_addr_mask;
- pte_t *src_pte, *dst_pte;
struct mmu_notifier_range range;
bool shared_pmd = false;
+ struct hugetlb_pte src_hpte, dst_hpte;
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, old_addr,
old_end);
@@ -5368,28 +5368,35 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
/* Prevent race with file truncation */
hugetlb_vma_lock_write(vma);
i_mmap_lock_write(mapping);
- for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
- src_pte = hugetlb_walk(vma, old_addr, sz);
- if (!src_pte) {
- old_addr |= last_addr_mask;
- new_addr |= last_addr_mask;
+ while (old_addr < old_end) {
+ if (hugetlb_full_walk(&src_hpte, vma, old_addr)) {
+ /* The hstate-level PTE wasn't allocated. */
+ old_addr = (old_addr | last_addr_mask) + sz;
+ new_addr = (new_addr | last_addr_mask) + sz;
continue;
}
- if (huge_pte_none(huge_ptep_get(src_pte)))
+
+ if (huge_pte_none(huge_ptep_get(src_hpte.ptep))) {
+ old_addr += hugetlb_pte_size(&src_hpte);
+ new_addr += hugetlb_pte_size(&src_hpte);
continue;
+ }
- if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
+ if (hugetlb_pte_size(&src_hpte) == sz &&
+ huge_pmd_unshare(mm, vma, old_addr, src_hpte.ptep)) {
shared_pmd = true;
- old_addr |= last_addr_mask;
- new_addr |= last_addr_mask;
+ old_addr = (old_addr | last_addr_mask) + sz;
+ new_addr = (new_addr | last_addr_mask) + sz;
continue;
}
- dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
- if (!dst_pte)
+ if (hugetlb_full_walk_alloc(&dst_hpte, new_vma, new_addr,
+ hugetlb_pte_size(&src_hpte)))
break;
- move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte);
+ move_hugetlb_pte(vma, old_addr, new_addr, &src_hpte, &dst_hpte);
+ old_addr += hugetlb_pte_size(&src_hpte);
+ new_addr += hugetlb_pte_size(&src_hpte);
}
if (shared_pmd)
--
2.39.0.314.g84b9a713c41-goog
Powered by blists - more mailing lists