[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200928175428.4110504-9-zi.yan@sent.com>
Date: Mon, 28 Sep 2020 13:54:06 -0400
From: Zi Yan <zi.yan@...t.com>
To: linux-mm@...ck.org
Cc: "Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
Roman Gushchin <guro@...com>, Rik van Riel <riel@...riel.com>,
Matthew Wilcox <willy@...radead.org>,
Shakeel Butt <shakeelb@...gle.com>,
Yang Shi <shy828301@...il.com>,
Jason Gunthorpe <jgg@...dia.com>,
Mike Kravetz <mike.kravetz@...cle.com>,
Michal Hocko <mhocko@...e.com>,
David Hildenbrand <david@...hat.com>,
William Kucharski <william.kucharski@...cle.com>,
Andrea Arcangeli <aarcange@...hat.com>,
John Hubbard <jhubbard@...dia.com>,
David Nellans <dnellans@...dia.com>,
linux-kernel@...r.kernel.org, Zi Yan <ziy@...dia.com>
Subject: [RFC PATCH v2 08/30] mm: thp: add PUD THP support for copy_huge_pud.
From: Zi Yan <ziy@...dia.com>
copy_huge_pud needs to allocate 1 PMD page table page and 512 PTE page
table pages and deposit them when copying a PUD THP. It is similar to
what we do at PUD THP page faults.
Signed-off-by: Zi Yan <ziy@...dia.com>
---
mm/huge_memory.c | 36 ++++++++++++++++++++++++++++--------
1 file changed, 28 insertions(+), 8 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 20a3d393d451..ea9fbedcda26 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1264,7 +1264,12 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
{
spinlock_t *dst_ptl, *src_ptl;
pud_t pud;
- int ret;
+ pmd_t *pmd_pgtable = NULL;
+ int ret = -ENOMEM;
+
+ pmd_pgtable = pmd_alloc_one_page_with_ptes(vma->vm_mm, addr);
+ if (unlikely(!pmd_pgtable))
+ goto out;
dst_ptl = pud_lock(dst_mm, dst_pud);
src_ptl = pud_lockptr(src_mm, src_pud);
@@ -1272,16 +1277,30 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
ret = -EAGAIN;
pud = *src_pud;
- if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
- goto out_unlock;
/*
- * When page table lock is held, the huge zero pud should not be
- * under splitting since we don't split the page itself, only pud to
- * a page table.
+ * only transparent huge pud page needs extra page table pages for
+ * possible huge page split
*/
- if (is_huge_zero_pud(pud)) {
- /* No huge zero pud yet */
+ if (!pud_trans_huge(pud))
+ pmd_free_page_with_ptes(dst_mm, pmd_pgtable);
+
+ if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
+ goto out_unlock;
+
+ if (pud_trans_huge(pud)) {
+ struct page *src_page;
+ int i;
+
+ src_page = pud_page(pud);
+ VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
+ get_page(src_page);
+ page_dup_rmap(src_page, true);
+ add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PUD_NR);
+ mm_inc_nr_pmds(dst_mm);
+ for (i = 0; i < (1<<(HPAGE_PUD_ORDER - HPAGE_PMD_ORDER)); i++)
+ mm_inc_nr_ptes(dst_mm);
+ pgtable_trans_huge_pud_deposit(dst_mm, dst_pud, virt_to_page(pmd_pgtable));
}
pudp_set_wrprotect(src_mm, addr, src_pud);
@@ -1292,6 +1311,7 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
out_unlock:
spin_unlock(src_ptl);
spin_unlock(dst_ptl);
+out:
return ret;
}
--
2.28.0
Powered by blists - more mailing lists