[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230414142341.354556-5-shiyn.lin@gmail.com>
Date: Fri, 14 Apr 2023 22:23:28 +0800
From: Chih-En Lin <shiyn.lin@...il.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Qi Zheng <zhengqi.arch@...edance.com>,
David Hildenbrand <david@...hat.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Christophe Leroy <christophe.leroy@...roup.eu>,
John Hubbard <jhubbard@...dia.com>,
Nadav Amit <namit@...are.com>, Barry Song <baohua@...nel.org>,
Pasha Tatashin <pasha.tatashin@...een.com>
Cc: Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
"H. Peter Anvin" <hpa@...or.com>,
Steven Rostedt <rostedt@...dmis.org>,
Masami Hiramatsu <mhiramat@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Ian Rogers <irogers@...gle.com>,
Adrian Hunter <adrian.hunter@...el.com>,
Yu Zhao <yuzhao@...gle.com>,
Steven Barrett <steven@...uorix.net>,
Juergen Gross <jgross@...e.com>, Peter Xu <peterx@...hat.com>,
Kefeng Wang <wangkefeng.wang@...wei.com>,
Tong Tiangen <tongtiangen@...wei.com>,
Christoph Hellwig <hch@...radead.org>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Yang Shi <shy828301@...il.com>,
Vlastimil Babka <vbabka@...e.cz>,
Alex Sierra <alex.sierra@....com>,
Vincent Whitchurch <vincent.whitchurch@...s.com>,
Anshuman Khandual <anshuman.khandual@....com>,
Li kunyu <kunyu@...china.com>,
Liu Shixin <liushixin2@...wei.com>,
Hugh Dickins <hughd@...gle.com>,
Minchan Kim <minchan@...nel.org>,
Joey Gouly <joey.gouly@....com>,
Chih-En Lin <shiyn.lin@...il.com>,
Michal Hocko <mhocko@...e.com>,
Suren Baghdasaryan <surenb@...gle.com>,
"Zach O'Keefe" <zokeefe@...gle.com>,
Gautam Menghani <gautammenghani201@...il.com>,
Catalin Marinas <catalin.marinas@....com>,
Mark Brown <broonie@...nel.org>,
"Eric W. Biederman" <ebiederm@...ssion.com>,
Andrei Vagin <avagin@...il.com>,
Shakeel Butt <shakeelb@...gle.com>,
Daniel Bristot de Oliveira <bristot@...nel.org>,
"Jason A. Donenfeld" <Jason@...c4.com>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Alexey Gladkov <legion@...nel.org>, x86@...nel.org,
linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org, linux-trace-kernel@...r.kernel.org,
linux-perf-users@...r.kernel.org,
Dinglan Peng <peng301@...due.edu>,
Pedro Fonseca <pfonseca@...due.edu>,
Jim Huang <jserv@...s.ncku.edu.tw>,
Huichun Feng <foxhoundsk.tw@...il.com>
Subject: [PATCH v5 04/17] mm: Add break COW PTE fault and helper functions
Add the function, handle_cow_pte_fault(), to break (unshare) COW-ed PTE
with the page fault that will modify the PTE table or the mapped page
resided in COW-ed PTE (i.e., write, unshared, file read fault).
When breaking COW PTE, it first checks COW-ed PTE's refcount to try to
reuse it. If COW-ed PTE cannot be reused, allocates new PTE and
duplicates all pte entries in COW-ed PTE. Moreover, flush TLB when we
change the write protection of PTE.
In addition, provide the helper functions, break_cow_pte{,_range}(), to
let the other features (remap, THP, migration, swapfile, etc) to use.
Signed-off-by: Chih-En Lin <shiyn.lin@...il.com>
---
include/linux/mm.h | 17 +++
include/linux/pgtable.h | 6 +
mm/memory.c | 318 +++++++++++++++++++++++++++++++++++++++-
mm/mmap.c | 4 +
mm/mremap.c | 2 +
mm/swapfile.c | 2 +
6 files changed, 348 insertions(+), 1 deletion(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 828f8a1b1e32..b4c9658ccd28 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2179,6 +2179,23 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int generic_error_remove_page(struct address_space *mapping, struct page *page);
+#ifdef CONFIG_COW_PTE
+int break_cow_pte(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr);
+int break_cow_pte_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+#else
+static inline int break_cow_pte(struct vm_area_struct *vma,
+ pmd_t *pmd, unsigned long addr)
+{
+ return 0;
+}
+static inline int break_cow_pte_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_MMU
extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index c63cd44777ec..f177a9d48b70 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1378,6 +1378,12 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
(IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
return 1;
+ /*
+ * COW-ed PTE has write protection which can trigger pmd_bad().
+ * To avoid this, return here if entry is write protection.
+ */
+ if (!pmd_write(pmdval))
+ return 0;
if (unlikely(pmd_bad(pmdval))) {
pmd_clear_bad(pmd);
return 1;
diff --git a/mm/memory.c b/mm/memory.c
index 3b1c4a7e632c..f8a87a0fc382 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2166,6 +2166,8 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
if (retval)
goto out;
retval = -ENOMEM;
+ if (break_cow_pte(vma, NULL, addr))
+ goto out;
pte = get_locked_pte(vma->vm_mm, addr, &ptl);
if (!pte)
goto out;
@@ -2425,6 +2427,9 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
pte_t *pte, entry;
spinlock_t *ptl;
+ if (break_cow_pte(vma, NULL, addr))
+ return VM_FAULT_OOM;
+
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
return VM_FAULT_OOM;
@@ -2802,6 +2807,10 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
BUG_ON(addr >= end);
pfn -= addr >> PAGE_SHIFT;
pgd = pgd_offset(mm, addr);
+
+ if (break_cow_pte_range(vma, addr, end))
+ return -ENOMEM;
+
flush_cache_range(vma, addr, end);
do {
next = pgd_addr_end(addr, end);
@@ -5192,6 +5201,285 @@ static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
return VM_FAULT_FALLBACK;
}
+#ifdef CONFIG_COW_PTE
+/*
+ * Break (unshare) COW PTE
+ *
+ * Since the pte lock is held during all operations on the COW-ed PTE
+ * table, it should be safe to modify it's pmd entry as well, provided
+ * it has been ensured that the pmd entry points to a COW-ed PTE table
+ * rather than a huge page or default PTE. Otherwise, we should also
+ * consider holding the pmd lock as we do for the huge page.
+ */
+static vm_fault_t handle_cow_pte_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct mm_struct *mm = vma->vm_mm;
+ pmd_t *pmd = vmf->pmd;
+ unsigned long start, end, addr = vmf->address;
+ struct mmu_notifier_range range;
+ pmd_t new_entry, cowed_entry;
+ pte_t *orig_dst_pte, *orig_src_pte;
+ pte_t *dst_pte, *src_pte;
+ pgtable_t new_pte_table = NULL;
+ spinlock_t *src_ptl;
+ int ret = 0;
+
+ /* Do nothing with the fault that doesn't have PTE yet. */
+ if (pmd_none(*pmd) || pmd_write(*pmd))
+ return 0;
+ /* COW PTE doesn't handle huge page. */
+ if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
+ return 0;
+
+ start = addr & PMD_MASK;
+ end = (addr + PMD_SIZE) & PMD_MASK;
+ addr = start;
+
+ mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
+ 0, vma, mm, start, end);
+ /*
+ * Because of the address range is PTE not only for the faulted
+ * vma, it might have some unmatch situations since mmu notifier
+ * will only reigster the faulted vma.
+ * Do we really need to care about this kind of unmatch?
+ */
+ mmu_notifier_invalidate_range_start(&range);
+ raw_write_seqcount_begin(&mm->write_protect_seq);
+
+ /*
+ * Fast path, check if we are the only one faulted task
+ * references to this COW-ed PTE, reuse it.
+ */
+ src_pte = pte_offset_map(pmd, addr);
+ src_ptl = pte_lockptr(mm, pmd);
+ spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+ if (cow_pte_count(pmd) == 1) {
+ pmd_t new = pmd_mkwrite(*pmd);
+ set_pmd_at(mm, addr, pmd, new);
+ pte_unmap_unlock(src_pte, src_ptl);
+ goto flush_tlb;
+ }
+ /* We don't hold the lock when allocating the new PTE. */
+ pte_unmap_unlock(src_pte, src_ptl);
+
+ /*
+ * Slow path. Since we already did the accounting and still
+ * sharing the mapped pages, we can just clone PTE.
+ */
+
+ /*
+ * Before acquiring the lock, we allocate the memory we may
+ * possibly need.
+ */
+ new_pte_table = pte_alloc_one(mm);
+ if (unlikely(!new_pte_table)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * To protect the pte table from the rmap and page table walk,
+ * we should hold the lock of COW-ed PTE until all the operations
+ * have been done including setting pmd entry, duplicating, and
+ * decrease refcount.
+ */
+ orig_src_pte = src_pte = pte_offset_map(pmd, addr);
+ src_ptl = pte_lockptr(mm, pmd);
+ spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+
+ /* Before pouplate the new pte table, we store the cowed (old) one. */
+ cowed_entry = READ_ONCE(*pmd);
+
+ /*
+ * Someone may also break COW PTE when we allocating the pte table.
+ * So, let check refcount again.
+ */
+ if (cow_pte_count(&cowed_entry) == 1) {
+ pmd_t new = pmd_mkwrite(*pmd);
+ set_pmd_at(mm, addr, pmd, new);
+ pte_unmap_unlock(src_pte, src_ptl);
+ goto flush_tlb;
+ }
+
+ /*
+ * We will only set the new pte table to the pmd entry after finish
+ * all the duplicating.
+ * We first store the new table in another pmd entry even though we
+ * have held the COW-ed PTE's lock. This is because, if we clear the
+ * pmd entry assigned to the COW-ed PTe table, other places (e.g.,
+ * another page fault) may allocate an empty PTe table, leading to
+ * potential issues.
+ */
+ pmd_clear(&new_entry);
+ pmd_populate(mm, &new_entry, new_pte_table);
+ /*
+ * No one else excluding us can access to this new table, so we don't
+ * have to hold the second pte lock.
+ */
+ orig_dst_pte = dst_pte = pte_offset_map(&new_entry, addr);
+
+ arch_enter_lazy_mmu_mode();
+
+ /*
+ * All the mapped pages in COW-ed PTE are COW mapping. We can
+ * set the entries and leave other stuff to handle_pte_fault().
+ */
+ do {
+ if (pte_none(*src_pte))
+ continue;
+ set_pte_at(mm, addr, dst_pte, *src_pte);
+ } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
+
+ arch_leave_lazy_mmu_mode();
+
+ pte_unmap(orig_dst_pte);
+
+ /*
+ * Decrease the refcount of COW-ed PTE.
+ * In this path, we assume that someone is still using COW-ed PTE.
+ * So, if the refcount is 1 before we decrease it, this might be
+ * wrong.
+ */
+ VM_WARN_ON(!pmd_put_pte(&cowed_entry));
+ VM_WARN_ON(!pmd_same(*pmd, cowed_entry));
+
+ /* Now, we can finally install the new PTE table to the pmd entry. */
+ set_pmd_at(mm, start, pmd, new_entry);
+ /*
+ * We installed the new table, let cleanup the new_pte_table
+ * variable to prevent pte_free() free it in the following.
+ */
+ new_pte_table = NULL;
+ pte_unmap_unlock(orig_src_pte, src_ptl);
+
+flush_tlb:
+ /*
+ * If we change the protection, flush TLB.
+ * flush_tlb_range() will only use vma to get mm, we don't need
+ * to consider the unmatch address range with vma problem here.
+ *
+ * Should we flush TLB when holding the pte lock?
+ */
+ flush_tlb_range(vma, start, end);
+out:
+ raw_write_seqcount_end(&mm->write_protect_seq);
+ mmu_notifier_invalidate_range_end(&range);
+
+ if (new_pte_table)
+ pte_free(mm, new_pte_table);
+
+ return ret;
+}
+
+static inline int __break_cow_pte(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr)
+{
+ struct vm_fault vmf = {
+ .vma = vma,
+ .address = addr & PAGE_MASK,
+ .pmd = pmd,
+ };
+
+ return handle_cow_pte_fault(&vmf);
+}
+
+/**
+ * break_cow_pte - duplicate/reuse shared, wprotected (COW-ed) PTE
+ * @vma: target vma want to break COW
+ * @pmd: pmd index that maps to the shared PTE
+ * @addr: the address trigger break COW PTE
+ *
+ * Return: zero on success, < 0 otherwise.
+ *
+ * The address needs to be in the range of shared and write portected
+ * PTE that the pmd index mapped. If pmd is NULL, it will get the pmd
+ * from vma. Duplicate COW-ed PTE when some still mapping to it.
+ * Otherwise, reuse COW-ed PTE.
+ * If the first attempt fails, it will wait for some time and try
+ * again. If it fails again, then the OOM killer will be called.
+ */
+int break_cow_pte(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr)
+{
+ struct mm_struct *mm;
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ int ret = 0;
+
+ if (!vma)
+ return -EINVAL;
+ mm = vma->vm_mm;
+
+ if (!test_bit(MMF_COW_PTE, &mm->flags))
+ return 0;
+
+ if (!pmd) {
+ pgd = pgd_offset(mm, addr);
+ if (pgd_none_or_clear_bad(pgd))
+ return 0;
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_none_or_clear_bad(p4d))
+ return 0;
+ pud = pud_offset(p4d, addr);
+ if (pud_none_or_clear_bad(pud))
+ return 0;
+ pmd = pmd_offset(pud, addr);
+ }
+
+ /* We will check the type of pmd entry later. */
+
+ ret = __break_cow_pte(vma, pmd, addr);
+
+ if (unlikely(ret == -ENOMEM)) {
+ unsigned int cow_pte_alloc_sleep_millisecs = 60000;
+
+ schedule_timeout(msecs_to_jiffies(
+ cow_pte_alloc_sleep_millisecs));
+
+ ret = __break_cow_pte(vma, pmd, addr);
+ if (unlikely(ret == -ENOMEM)) {
+ struct oom_control oc = {
+ .gfp_mask = GFP_PGTABLE_USER,
+ };
+
+ mutex_lock(&oom_lock);
+ out_of_memory(&oc);
+ mutex_unlock(&oom_lock);
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * break_cow_pte_range - duplicate/reuse COW-ed PTE in a given range
+ * @vma: target vma want to break COW
+ * @start: the address of start breaking
+ * @end: the address of end breaking
+ *
+ * Return: zero on success, the number of failed otherwise.
+ */
+int break_cow_pte_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ unsigned long addr, next;
+ int nr_failed = 0;
+
+ if (!range_in_vma(vma, start, end))
+ return -EINVAL;
+
+ addr = start;
+ do {
+ next = pmd_addr_end(addr, end);
+ if (break_cow_pte(vma, NULL, addr))
+ nr_failed++;
+ } while (addr = next, addr != end);
+
+ return nr_failed;
+}
+#endif /* CONFIG_COW_PTE */
+
/*
* These routines also need to handle stuff like marking pages dirty
* and/or accessed for architectures that don't do it in hardware (most
@@ -5267,8 +5555,13 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
return do_fault(vmf);
}
- if (!pte_present(vmf->orig_pte))
+ if (!pte_present(vmf->orig_pte)) {
+#ifdef CONFIG_COW_PTE
+ if (test_bit(MMF_COW_PTE, &vmf->vma->vm_mm->flags))
+ handle_cow_pte_fault(vmf);
+#endif
return do_swap_page(vmf);
+ }
if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
return do_numa_page(vmf);
@@ -5404,8 +5697,31 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
return 0;
}
}
+#ifdef CONFIG_COW_PTE
+ /*
+ * Duplicate COW-ed PTE when page fault will change the
+ * mapped pages (write or unshared fault) or COW-ed PTE
+ * (file mapped read fault, see do_read_fault()).
+ */
+ if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE) ||
+ vma->vm_ops) && test_bit(MMF_COW_PTE, &mm->flags)) {
+ ret = handle_cow_pte_fault(&vmf);
+ if (unlikely(ret == -ENOMEM))
+ return VM_FAULT_OOM;
+ }
+#endif
}
+#ifdef CONFIG_COW_PTE
+ /*
+ * It's definitely will break the kernel when refcount of PTE
+ * is higher than 1 and it is writeable in PMD entry. But we
+ * want to see more information so just warning here.
+ */
+ if (likely(!pmd_none(*vmf.pmd)))
+ VM_WARN_ON(cow_pte_count(vmf.pmd) > 1 && pmd_write(*vmf.pmd));
+#endif
+
return handle_pte_fault(&vmf);
}
diff --git a/mm/mmap.c b/mm/mmap.c
index ff68a67a2a7c..ac1002e85d88 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2169,6 +2169,10 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
return err;
}
+ err = break_cow_pte(vma, NULL, addr);
+ if (err)
+ return err;
+
new = vm_area_dup(vma);
if (!new)
return -ENOMEM;
diff --git a/mm/mremap.c b/mm/mremap.c
index 411a85682b58..0668e9ead65a 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -534,6 +534,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
old_pmd = get_old_pmd(vma->vm_mm, old_addr);
if (!old_pmd)
continue;
+ /* TLB flush twice time here? */
+ break_cow_pte(vma, old_pmd, old_addr);
new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
if (!new_pmd)
break;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 2c718f45745f..b7aa880957fd 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1919,6 +1919,8 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
next = pmd_addr_end(addr, end);
if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
+ if (break_cow_pte(vma, pmd, addr))
+ return -ENOMEM;
ret = unuse_pte_range(vma, pmd, addr, next, type);
if (ret)
return ret;
--
2.34.1
Powered by blists - more mailing lists