[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230207035139.272707-6-shiyn.lin@gmail.com>
Date: Tue, 7 Feb 2023 11:51:30 +0800
From: Chih-En Lin <shiyn.lin@...il.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Qi Zheng <zhengqi.arch@...edance.com>,
David Hildenbrand <david@...hat.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Christophe Leroy <christophe.leroy@...roup.eu>,
John Hubbard <jhubbard@...dia.com>,
Nadav Amit <namit@...are.com>, Barry Song <baohua@...nel.org>
Cc: Steven Rostedt <rostedt@...dmis.org>,
Masami Hiramatsu <mhiramat@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Yang Shi <shy828301@...il.com>, Peter Xu <peterx@...hat.com>,
Vlastimil Babka <vbabka@...e.cz>,
"Zach O'Keefe" <zokeefe@...gle.com>,
Yun Zhou <yun.zhou@...driver.com>,
Hugh Dickins <hughd@...gle.com>,
Suren Baghdasaryan <surenb@...gle.com>,
Pasha Tatashin <pasha.tatashin@...een.com>,
Yu Zhao <yuzhao@...gle.com>, Juergen Gross <jgross@...e.com>,
Tong Tiangen <tongtiangen@...wei.com>,
Liu Shixin <liushixin2@...wei.com>,
Anshuman Khandual <anshuman.khandual@....com>,
Li kunyu <kunyu@...china.com>,
Minchan Kim <minchan@...nel.org>,
Miaohe Lin <linmiaohe@...wei.com>,
Gautam Menghani <gautammenghani201@...il.com>,
Catalin Marinas <catalin.marinas@....com>,
Mark Brown <broonie@...nel.org>, Will Deacon <will@...nel.org>,
Vincenzo Frascino <Vincenzo.Frascino@....com>,
Thomas Gleixner <tglx@...utronix.de>,
"Eric W. Biederman" <ebiederm@...ssion.com>,
Andy Lutomirski <luto@...nel.org>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Fenghua Yu <fenghua.yu@...el.com>,
Andrei Vagin <avagin@...il.com>,
Barret Rhoden <brho@...gle.com>,
Michal Hocko <mhocko@...e.com>,
"Jason A. Donenfeld" <Jason@...c4.com>,
Alexey Gladkov <legion@...nel.org>,
linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org, linux-trace-kernel@...r.kernel.org,
linux-perf-users@...r.kernel.org,
Dinglan Peng <peng301@...due.edu>,
Pedro Fonseca <pfonseca@...due.edu>,
Jim Huang <jserv@...s.ncku.edu.tw>,
Huichun Feng <foxhoundsk.tw@...il.com>,
Chih-En Lin <shiyn.lin@...il.com>
Subject: [PATCH v4 05/14] mm/khugepaged: Break COW PTE before scanning pte
We should not allow THP to collapse COW-ed PTE. So, break COW PTE
before collapse_pte_mapped_thp() collapse to THP. Also, break COW
PTE before khugepaged_scan_pmd() scan PTE.
Signed-off-by: Chih-En Lin <shiyn.lin@...il.com>
---
include/trace/events/huge_memory.h | 1 +
mm/khugepaged.c | 35 +++++++++++++++++++++++++++++-
2 files changed, 35 insertions(+), 1 deletion(-)
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index 3e6fb05852f9..5f2c39f61521 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -13,6 +13,7 @@
EM( SCAN_PMD_NULL, "pmd_null") \
EM( SCAN_PMD_NONE, "pmd_none") \
EM( SCAN_PMD_MAPPED, "page_pmd_mapped") \
+ EM( SCAN_COW_PTE, "cowed_pte") \
EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \
EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \
EM( SCAN_EXCEED_SHARED_PTE, "exceed_shared_pte") \
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 90acfea40c13..1cddc20318d5 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -31,6 +31,7 @@ enum scan_result {
SCAN_PMD_NULL,
SCAN_PMD_NONE,
SCAN_PMD_MAPPED,
+ SCAN_COW_PTE,
SCAN_EXCEED_NONE_PTE,
SCAN_EXCEED_SWAP_PTE,
SCAN_EXCEED_SHARED_PTE,
@@ -875,7 +876,7 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm,
return SCAN_PMD_MAPPED;
if (pmd_devmap(pmde))
return SCAN_PMD_NULL;
- if (pmd_bad(pmde))
+ if (pmd_write(pmde) && pmd_bad(pmde))
return SCAN_PMD_NULL;
return SCAN_SUCCEED;
}
@@ -926,6 +927,8 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
pte_unmap(vmf.pte);
continue;
}
+ if (break_cow_pte(vma, pmd, address))
+ return SCAN_COW_PTE;
ret = do_swap_page(&vmf);
/*
@@ -1038,6 +1041,9 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
if (result != SCAN_SUCCEED)
goto out_up_write;
+ /* We should already handled COW-ed PTE. */
+ VM_WARN_ON(test_bit(MMF_COW_PTE, &mm->flags) && !pmd_write(*pmd));
+
anon_vma_lock_write(vma->anon_vma);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
@@ -1148,6 +1154,13 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
memset(cc->node_load, 0, sizeof(cc->node_load));
nodes_clear(cc->alloc_nmask);
+
+ /* Break COW PTE before we collapse the pages. */
+ if (break_cow_pte(vma, pmd, address)) {
+ result = SCAN_COW_PTE;
+ goto out;
+ }
+
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
_pte++, _address += PAGE_SIZE) {
@@ -1206,6 +1219,10 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
goto out_unmap;
}
+ /*
+ * If we only trigger the break COW PTE, the page usually
+ * still in COW mapping, which it still be shared.
+ */
if (page_mapcount(page) > 1) {
++shared;
if (cc->is_khugepaged &&
@@ -1501,6 +1518,11 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
goto drop_hpage;
}
+ /* We shouldn't let COW-ed PTE collapse. */
+ if (break_cow_pte(vma, pmd, haddr))
+ goto drop_hpage;
+ VM_WARN_ON(test_bit(MMF_COW_PTE, &mm->flags) && !pmd_write(*pmd));
+
/*
* We need to lock the mapping so that from here on, only GUP-fast and
* hardware page walks can access the parts of the page tables that
@@ -1706,6 +1728,11 @@ static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
result = SCAN_PTE_UFFD_WP;
goto unlock_next;
}
+ if (test_bit(MMF_COW_PTE, &mm->flags) &&
+ !pmd_write(*pmd)) {
+ result = SCAN_COW_PTE;
+ goto unlock_next;
+ }
collapse_and_free_pmd(mm, vma, addr, pmd);
if (!cc->is_khugepaged && is_target)
result = set_huge_pmd(vma, addr, pmd, hpage);
@@ -2143,6 +2170,11 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
swap = 0;
memset(cc->node_load, 0, sizeof(cc->node_load));
nodes_clear(cc->alloc_nmask);
+ if (break_cow_pte(find_vma(mm, addr), NULL, addr)) {
+ result = SCAN_COW_PTE;
+ goto out;
+ }
+
rcu_read_lock();
xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
if (xas_retry(&xas, page))
@@ -2213,6 +2245,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
}
rcu_read_unlock();
+out:
if (result == SCAN_SUCCEED) {
if (cc->is_khugepaged &&
present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
--
2.34.1
Powered by blists - more mailing lists