lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230207035139.272707-5-shiyn.lin@gmail.com>
Date:   Tue,  7 Feb 2023 11:51:29 +0800
From:   Chih-En Lin <shiyn.lin@...il.com>
To:     Andrew Morton <akpm@...ux-foundation.org>,
        Qi Zheng <zhengqi.arch@...edance.com>,
        David Hildenbrand <david@...hat.com>,
        "Matthew Wilcox (Oracle)" <willy@...radead.org>,
        Christophe Leroy <christophe.leroy@...roup.eu>,
        John Hubbard <jhubbard@...dia.com>,
        Nadav Amit <namit@...are.com>, Barry Song <baohua@...nel.org>
Cc:     Steven Rostedt <rostedt@...dmis.org>,
        Masami Hiramatsu <mhiramat@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>,
        Arnaldo Carvalho de Melo <acme@...nel.org>,
        Mark Rutland <mark.rutland@....com>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        Jiri Olsa <jolsa@...nel.org>,
        Namhyung Kim <namhyung@...nel.org>,
        Yang Shi <shy828301@...il.com>, Peter Xu <peterx@...hat.com>,
        Vlastimil Babka <vbabka@...e.cz>,
        "Zach O'Keefe" <zokeefe@...gle.com>,
        Yun Zhou <yun.zhou@...driver.com>,
        Hugh Dickins <hughd@...gle.com>,
        Suren Baghdasaryan <surenb@...gle.com>,
        Pasha Tatashin <pasha.tatashin@...een.com>,
        Yu Zhao <yuzhao@...gle.com>, Juergen Gross <jgross@...e.com>,
        Tong Tiangen <tongtiangen@...wei.com>,
        Liu Shixin <liushixin2@...wei.com>,
        Anshuman Khandual <anshuman.khandual@....com>,
        Li kunyu <kunyu@...china.com>,
        Minchan Kim <minchan@...nel.org>,
        Miaohe Lin <linmiaohe@...wei.com>,
        Gautam Menghani <gautammenghani201@...il.com>,
        Catalin Marinas <catalin.marinas@....com>,
        Mark Brown <broonie@...nel.org>, Will Deacon <will@...nel.org>,
        Vincenzo Frascino <Vincenzo.Frascino@....com>,
        Thomas Gleixner <tglx@...utronix.de>,
        "Eric W. Biederman" <ebiederm@...ssion.com>,
        Andy Lutomirski <luto@...nel.org>,
        Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
        "Liam R. Howlett" <Liam.Howlett@...cle.com>,
        Fenghua Yu <fenghua.yu@...el.com>,
        Andrei Vagin <avagin@...il.com>,
        Barret Rhoden <brho@...gle.com>,
        Michal Hocko <mhocko@...e.com>,
        "Jason A. Donenfeld" <Jason@...c4.com>,
        Alexey Gladkov <legion@...nel.org>,
        linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org,
        linux-mm@...ck.org, linux-trace-kernel@...r.kernel.org,
        linux-perf-users@...r.kernel.org,
        Dinglan Peng <peng301@...due.edu>,
        Pedro Fonseca <pfonseca@...due.edu>,
        Jim Huang <jserv@...s.ncku.edu.tw>,
        Huichun Feng <foxhoundsk.tw@...il.com>,
        Chih-En Lin <shiyn.lin@...il.com>
Subject: [PATCH v4 04/14] mm/rmap: Break COW PTE in rmap walking

Some of the features (unmap, migrate, device exclusive, mkclean, etc)
might modify the pte entry via rmap. Add a new page vma mapped walk
flag, PVMW_BREAK_COW_PTE, to indicate the rmap walking to break COW PTE.

Signed-off-by: Chih-En Lin <shiyn.lin@...il.com>
---
 include/linux/rmap.h | 2 ++
 mm/migrate.c         | 3 ++-
 mm/page_vma_mapped.c | 4 ++++
 mm/rmap.c            | 9 +++++----
 mm/vmscan.c          | 3 ++-
 5 files changed, 15 insertions(+), 6 deletions(-)

diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index bd3504d11b15..d0f07e551973 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -368,6 +368,8 @@ int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
 #define PVMW_SYNC		(1 << 0)
 /* Look for migration entries rather than present PTEs */
 #define PVMW_MIGRATION		(1 << 1)
+/* Break COW-ed PTE during walking */
+#define PVMW_BREAK_COW_PTE	(1 << 2)
 
 struct page_vma_mapped_walk {
 	unsigned long pfn;
diff --git a/mm/migrate.c b/mm/migrate.c
index a4d3fc65085f..04376ce05aa8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -183,7 +183,8 @@ void putback_movable_pages(struct list_head *l)
 static bool remove_migration_pte(struct folio *folio,
 		struct vm_area_struct *vma, unsigned long addr, void *old)
 {
-	DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
+	DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr,
+			      PVMW_SYNC | PVMW_MIGRATION | PVMW_BREAK_COW_PTE);
 
 	while (page_vma_mapped_walk(&pvmw)) {
 		rmap_t rmap_flags = RMAP_NONE;
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 93e13fc17d3c..7b35e85b9964 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -251,6 +251,10 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 			step_forward(pvmw, PMD_SIZE);
 			continue;
 		}
+		if (pvmw->flags & PVMW_BREAK_COW_PTE) {
+			if (break_cow_pte(vma, pvmw->pmd, pvmw->address))
+				return not_found(pvmw);
+		}
 		if (!map_pte(pvmw))
 			goto next_pte;
 this_pte:
diff --git a/mm/rmap.c b/mm/rmap.c
index b616870a09be..bce97496b1f6 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1012,7 +1012,8 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
 			     unsigned long address, void *arg)
 {
-	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
+	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address,
+			      PVMW_SYNC | PVMW_BREAK_COW_PTE);
 	int *cleaned = arg;
 
 	*cleaned += page_vma_mkclean_one(&pvmw);
@@ -1463,7 +1464,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 		     unsigned long address, void *arg)
 {
 	struct mm_struct *mm = vma->vm_mm;
-	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
+	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_BREAK_COW_PTE);
 	pte_t pteval;
 	struct page *subpage;
 	bool anon_exclusive, ret = true;
@@ -1834,7 +1835,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
 		     unsigned long address, void *arg)
 {
 	struct mm_struct *mm = vma->vm_mm;
-	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
+	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_BREAK_COW_PTE);
 	pte_t pteval;
 	struct page *subpage;
 	bool anon_exclusive, ret = true;
@@ -2187,7 +2188,7 @@ static bool page_make_device_exclusive_one(struct folio *folio,
 		struct vm_area_struct *vma, unsigned long address, void *priv)
 {
 	struct mm_struct *mm = vma->vm_mm;
-	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
+	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_BREAK_COW_PTE);
 	struct make_exclusive_args *args = priv;
 	pte_t pteval;
 	struct page *subpage;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bf3eedf0209c..15eda32146fd 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1882,7 +1882,8 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
 
 		/*
 		 * The folio is mapped into the page tables of one or more
-		 * processes. Try to unmap it here.
+		 * processes. Try to unmap it here. Also, since it will write
+		 * to the page tables, break COW PTE if they are.
 		 */
 		if (folio_mapped(folio)) {
 			enum ttu_flags flags = TTU_BATCH_FLUSH;
-- 
2.34.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ