[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230414142341.354556-6-shiyn.lin@gmail.com>
Date: Fri, 14 Apr 2023 22:23:29 +0800
From: Chih-En Lin <shiyn.lin@...il.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Qi Zheng <zhengqi.arch@...edance.com>,
David Hildenbrand <david@...hat.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Christophe Leroy <christophe.leroy@...roup.eu>,
John Hubbard <jhubbard@...dia.com>,
Nadav Amit <namit@...are.com>, Barry Song <baohua@...nel.org>,
Pasha Tatashin <pasha.tatashin@...een.com>
Cc: Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
"H. Peter Anvin" <hpa@...or.com>,
Steven Rostedt <rostedt@...dmis.org>,
Masami Hiramatsu <mhiramat@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Ian Rogers <irogers@...gle.com>,
Adrian Hunter <adrian.hunter@...el.com>,
Yu Zhao <yuzhao@...gle.com>,
Steven Barrett <steven@...uorix.net>,
Juergen Gross <jgross@...e.com>, Peter Xu <peterx@...hat.com>,
Kefeng Wang <wangkefeng.wang@...wei.com>,
Tong Tiangen <tongtiangen@...wei.com>,
Christoph Hellwig <hch@...radead.org>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Yang Shi <shy828301@...il.com>,
Vlastimil Babka <vbabka@...e.cz>,
Alex Sierra <alex.sierra@....com>,
Vincent Whitchurch <vincent.whitchurch@...s.com>,
Anshuman Khandual <anshuman.khandual@....com>,
Li kunyu <kunyu@...china.com>,
Liu Shixin <liushixin2@...wei.com>,
Hugh Dickins <hughd@...gle.com>,
Minchan Kim <minchan@...nel.org>,
Joey Gouly <joey.gouly@....com>,
Chih-En Lin <shiyn.lin@...il.com>,
Michal Hocko <mhocko@...e.com>,
Suren Baghdasaryan <surenb@...gle.com>,
"Zach O'Keefe" <zokeefe@...gle.com>,
Gautam Menghani <gautammenghani201@...il.com>,
Catalin Marinas <catalin.marinas@....com>,
Mark Brown <broonie@...nel.org>,
"Eric W. Biederman" <ebiederm@...ssion.com>,
Andrei Vagin <avagin@...il.com>,
Shakeel Butt <shakeelb@...gle.com>,
Daniel Bristot de Oliveira <bristot@...nel.org>,
"Jason A. Donenfeld" <Jason@...c4.com>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Alexey Gladkov <legion@...nel.org>, x86@...nel.org,
linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org, linux-trace-kernel@...r.kernel.org,
linux-perf-users@...r.kernel.org,
Dinglan Peng <peng301@...due.edu>,
Pedro Fonseca <pfonseca@...due.edu>,
Jim Huang <jserv@...s.ncku.edu.tw>,
Huichun Feng <foxhoundsk.tw@...il.com>
Subject: [PATCH v5 05/17] mm: Handle COW-ed PTE during zapping
To support the zap functionally for COW-ed PTE, we need to zap the
entire PTE table each time instead of partially zapping pages.
Therefore, if the zap range covers the entire PTE table, we can
handle de-account, remove the rmap, etc. However we shouldn't modify
the entries when there are still someone references to the COW-ed
PTE. Otherwise, if only the zapped process references to this COW-ed
PTE, we just reuse it and do the normal zapping.
Signed-off-by: Chih-En Lin <shiyn.lin@...il.com>
---
mm/memory.c | 92 ++++++++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 87 insertions(+), 5 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index f8a87a0fc382..7908e20f802a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -192,6 +192,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
+#ifdef CONFIG_COW_PTE
+ if (test_bit(MMF_COW_PTE, &tlb->mm->flags)) {
+ if (!pmd_none(*pmd) && !pmd_write(*pmd))
+ VM_WARN_ON(cow_pte_count(pmd) != 1);
+ }
+#endif
if (pmd_none_or_clear_bad(pmd))
continue;
free_pte_range(tlb, pmd, addr);
@@ -1656,6 +1662,7 @@ zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
#define ZAP_PTE_INIT 0x0000
#define ZAP_PTE_FORCE_FLUSH 0x0001
+#define ZAP_PTE_IS_SHARED 0x0002
struct zap_pte_details {
pte_t **pte;
@@ -1681,9 +1688,13 @@ zap_present_pte(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (unlikely(!should_zap_page(details, page)))
return 0;
- ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
+ if (pte_details->flags & ZAP_PTE_IS_SHARED)
+ ptent = ptep_get(pte);
+ else
+ ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
tlb_remove_tlb_entry(tlb, pte, addr);
- zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
+ if (!(pte_details->flags & ZAP_PTE_IS_SHARED))
+ zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
if (unlikely(!page))
return 0;
@@ -1767,8 +1778,10 @@ zap_nopresent_pte(struct mmu_gather *tlb, struct vm_area_struct *vma,
/* We should have covered all the swap entry types */
WARN_ON_ONCE(1);
}
- pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
- zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
+ if (!(pte_details->flags & ZAP_PTE_IS_SHARED)) {
+ pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
+ zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
+ }
}
static unsigned long zap_pte_range(struct mmu_gather *tlb,
@@ -1785,6 +1798,36 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
.flags = ZAP_PTE_INIT,
.pte = &pte,
};
+#ifdef CONFIG_COW_PTE
+ unsigned long orig_addr = addr;
+
+ if (test_bit(MMF_COW_PTE, &mm->flags) && !pmd_write(*pmd)) {
+ if (!range_in_vma(vma, addr & PMD_MASK,
+ (addr + PMD_SIZE) & PMD_MASK)) {
+ /*
+ * We cannot promise this COW-ed PTE will also be zap
+ * with the rest of VMAs. So, break COW PTE here.
+ */
+ break_cow_pte(vma, pmd, addr);
+ } else {
+ /*
+ * We free the batched memory before we handle
+ * COW-ed PTE.
+ */
+ tlb_flush_mmu(tlb);
+ end = (addr + PMD_SIZE) & PMD_MASK;
+ addr = addr & PMD_MASK;
+ start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ if (cow_pte_count(pmd) == 1) {
+ /* Reuse COW-ed PTE */
+ pmd_t new = pmd_mkwrite(*pmd);
+ set_pmd_at(tlb->mm, addr, pmd, new);
+ } else
+ pte_details.flags |= ZAP_PTE_IS_SHARED;
+ pte_unmap_unlock(start_pte, ptl);
+ }
+ }
+#endif
tlb_change_page_size(tlb, PAGE_SIZE);
again:
@@ -1828,7 +1871,16 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
*/
if (pte_details.flags & ZAP_PTE_FORCE_FLUSH) {
pte_details.flags &= ~ZAP_PTE_FORCE_FLUSH;
- tlb_flush_mmu(tlb);
+ /*
+ * With COW-ed PTE, we defer freeing the batched memory until
+ * after we have actually cleared the COW-ed PTE's pmd entry.
+ * Since, if we are the only ones still referencing the COW-ed
+ * PTe table after we have freed the batched memory, the page
+ * table check will report a bug with anon_map_count != 0 in
+ * page_table_check_zero().
+ */
+ if (!(pte_details.flags & ZAP_PTE_IS_SHARED))
+ tlb_flush_mmu(tlb);
}
if (addr != end) {
@@ -1836,6 +1888,36 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
goto again;
}
+#ifdef CONFIG_COW_PTE
+ if (pte_details.flags & ZAP_PTE_IS_SHARED) {
+ start_pte = pte_offset_map_lock(mm, pmd, orig_addr, &ptl);
+ if (!pmd_put_pte(pmd)) {
+ pmd_t new = pmd_mkwrite(*pmd);
+ set_pmd_at(tlb->mm, addr, pmd, new);
+ /*
+ * We are the only ones who still referencing this.
+ * Clear the page table check before we free the
+ * batched memory.
+ */
+ page_table_check_pte_clear_range(mm, orig_addr, *pmd);
+ pte_unmap_unlock(start_pte, ptl);
+ /* free the batched memory and flush the TLB. */
+ tlb_flush_mmu(tlb);
+ free_pte_range(tlb, pmd, addr);
+ } else {
+ pmd_clear(pmd);
+ pte_unmap_unlock(start_pte, ptl);
+ mm_dec_nr_ptes(tlb->mm);
+ /*
+ * Someone still referencing to the table,
+ * we just flush TLB here.
+ */
+ flush_tlb_range(vma, addr & PMD_MASK,
+ (addr + PMD_SIZE) & PMD_MASK);
+ }
+ }
+#endif
+
return addr;
}
--
2.34.1
Powered by blists - more mailing lists