[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221220072743.3039060-5-shiyn.lin@gmail.com>
Date: Tue, 20 Dec 2022 15:27:33 +0800
From: Chih-En Lin <shiyn.lin@...il.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Qi Zheng <zhengqi.arch@...edance.com>,
David Hildenbrand <david@...hat.com>,
Matthew Wilcox <willy@...radead.org>,
Christophe Leroy <christophe.leroy@...roup.eu>,
John Hubbard <jhubbard@...dia.com>,
Nadav Amit <namit@...are.com>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
Steven Rostedt <rostedt@...dmis.org>,
Masami Hiramatsu <mhiramat@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Yang Shi <shy828301@...il.com>, Peter Xu <peterx@...hat.com>,
Zach O'Keefe <zokeefe@...gle.com>,
"Liam R . Howlett" <Liam.Howlett@...cle.com>,
Alex Sierra <alex.sierra@....com>,
Xianting Tian <xianting.tian@...ux.alibaba.com>,
Colin Cross <ccross@...gle.com>,
Suren Baghdasaryan <surenb@...gle.com>,
Barry Song <baohua@...nel.org>,
Pasha Tatashin <pasha.tatashin@...een.com>,
Suleiman Souhlal <suleiman@...gle.com>,
Brian Geffon <bgeffon@...gle.com>, Yu Zhao <yuzhao@...gle.com>,
Tong Tiangen <tongtiangen@...wei.com>,
Liu Shixin <liushixin2@...wei.com>,
Li kunyu <kunyu@...china.com>,
Anshuman Khandual <anshuman.khandual@....com>,
Vlastimil Babka <vbabka@...e.cz>,
Hugh Dickins <hughd@...gle.com>,
Minchan Kim <minchan@...nel.org>,
Miaohe Lin <linmiaohe@...wei.com>,
Gautam Menghani <gautammenghani201@...il.com>,
Catalin Marinas <catalin.marinas@....com>,
Mark Brown <broonie@...nel.org>, Will Deacon <will@...nel.org>,
"Eric W . Biederman" <ebiederm@...ssion.com>,
Thomas Gleixner <tglx@...utronix.de>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Andy Lutomirski <luto@...nel.org>,
Fenghua Yu <fenghua.yu@...el.com>,
Barret Rhoden <brho@...gle.com>,
Davidlohr Bueso <dave@...olabs.net>,
"Jason A . Donenfeld" <Jason@...c4.com>,
Dinglan Peng <peng301@...due.edu>,
Pedro Fonseca <pfonseca@...due.edu>,
Jim Huang <jserv@...s.ncku.edu.tw>,
Huichun Feng <foxhoundsk.tw@...il.com>,
Chih-En Lin <shiyn.lin@...il.com>
Subject: [PATCH v3 04/14] mm/rmap: Break COW PTE in rmap walking
Some of the features (unmap, migrate, device exclusive, mkclean, etc)
might modify the pte entry via rmap. Add a new page vma mapped walk
flag, PVMW_BREAK_COW_PTE, to indicate the rmap walking to break COW PTE.
Signed-off-by: Chih-En Lin <shiyn.lin@...il.com>
---
include/linux/rmap.h | 2 ++
mm/migrate.c | 3 ++-
mm/page_vma_mapped.c | 2 ++
mm/rmap.c | 12 +++++++-----
mm/vmscan.c | 7 ++++++-
5 files changed, 19 insertions(+), 7 deletions(-)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index bd3504d11b155..d0f07e5519736 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -368,6 +368,8 @@ int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
#define PVMW_SYNC (1 << 0)
/* Look for migration entries rather than present PTEs */
#define PVMW_MIGRATION (1 << 1)
+/* Break COW-ed PTE during walking */
+#define PVMW_BREAK_COW_PTE (1 << 2)
struct page_vma_mapped_walk {
unsigned long pfn;
diff --git a/mm/migrate.c b/mm/migrate.c
index dff333593a8ae..a4be7e04c9b09 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -174,7 +174,8 @@ void putback_movable_pages(struct list_head *l)
static bool remove_migration_pte(struct folio *folio,
struct vm_area_struct *vma, unsigned long addr, void *old)
{
- DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
+ DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr,
+ PVMW_SYNC | PVMW_MIGRATION | PVMW_BREAK_COW_PTE);
while (page_vma_mapped_walk(&pvmw)) {
rmap_t rmap_flags = RMAP_NONE;
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 93e13fc17d3cb..5dfc9236dc505 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -251,6 +251,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
step_forward(pvmw, PMD_SIZE);
continue;
}
+ if (pvmw->flags & PVMW_BREAK_COW_PTE)
+ break_cow_pte(vma, pvmw->pmd, pvmw->address);
if (!map_pte(pvmw))
goto next_pte;
this_pte:
diff --git a/mm/rmap.c b/mm/rmap.c
index 2ec925e5fa6a9..b1b7dcbd498be 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -807,7 +807,8 @@ static bool folio_referenced_one(struct folio *folio,
struct vm_area_struct *vma, unsigned long address, void *arg)
{
struct folio_referenced_arg *pra = arg;
- DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
+ /* it will clear the entry, so we should break COW PTE. */
+ DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_BREAK_COW_PTE);
int referenced = 0;
while (page_vma_mapped_walk(&pvmw)) {
@@ -1012,7 +1013,8 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
- DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
+ DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address,
+ PVMW_SYNC | PVMW_BREAK_COW_PTE);
int *cleaned = arg;
*cleaned += page_vma_mkclean_one(&pvmw);
@@ -1471,7 +1473,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
- DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
+ DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_BREAK_COW_PTE);
pte_t pteval;
struct page *subpage;
bool anon_exclusive, ret = true;
@@ -1842,7 +1844,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
- DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
+ DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_BREAK_COW_PTE);
pte_t pteval;
struct page *subpage;
bool anon_exclusive, ret = true;
@@ -2195,7 +2197,7 @@ static bool page_make_device_exclusive_one(struct folio *folio,
struct vm_area_struct *vma, unsigned long address, void *priv)
{
struct mm_struct *mm = vma->vm_mm;
- DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
+ DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_BREAK_COW_PTE);
struct make_exclusive_args *args = priv;
pte_t pteval;
struct page *subpage;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 026199c047e0e..980d2056adfd1 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1781,6 +1781,10 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
}
}
+ /*
+ * Break COW PTE since checking the reference
+ * of folio might modify the PTE.
+ */
if (!ignore_references)
references = folio_check_references(folio, sc);
@@ -1864,7 +1868,8 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
/*
* The folio is mapped into the page tables of one or more
- * processes. Try to unmap it here.
+ * processes. Try to unmap it here. Also, since it will write
+ * to the page tables, break COW PTE if they are.
*/
if (folio_mapped(folio)) {
enum ttu_flags flags = TTU_BATCH_FLUSH;
--
2.37.3
Powered by blists - more mailing lists