lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed,  6 May 2020 17:41:39 -0700
From:   Anthony Yznaga <anthony.yznaga@...cle.com>
To:     linux-mm@...ck.org, linux-kernel@...r.kernel.org
Cc:     willy@...radead.org, corbet@....net, tglx@...utronix.de,
        mingo@...hat.com, bp@...en8.de, x86@...nel.org, hpa@...or.com,
        dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
        rppt@...ux.ibm.com, akpm@...ux-foundation.org, hughd@...gle.com,
        ebiederm@...ssion.com, masahiroy@...nel.org, ardb@...nel.org,
        ndesaulniers@...gle.com, dima@...ovin.in, daniel.kiper@...cle.com,
        nivedita@...m.mit.edu, rafael.j.wysocki@...el.com,
        dan.j.williams@...el.com, zhenzhong.duan@...cle.com,
        jroedel@...e.de, bhe@...hat.com, guro@...com,
        Thomas.Lendacky@....com, andriy.shevchenko@...ux.intel.com,
        keescook@...omium.org, hannes@...xchg.org, minchan@...nel.org,
        mhocko@...nel.org, ying.huang@...el.com,
        yang.shi@...ux.alibaba.com, gustavo@...eddedor.com,
        ziqian.lzq@...fin.com, vdavydov.dev@...il.com,
        jason.zeng@...el.com, kevin.tian@...el.com, zhiyuan.lv@...el.com,
        lei.l.li@...el.com, paul.c.lai@...el.com, ashok.raj@...el.com,
        linux-fsdevel@...r.kernel.org, linux-doc@...r.kernel.org,
        kexec@...ts.infradead.org
Subject: [RFC 13/43] mm: PKRAM: free preserved pages pagetable

After the page ranges in the pagetable have been reserved the pagetable
is no longer needed.  Rather than free it during early boot by unreserving
page-sized blocks which can be inefficient when dealing with a large number
of blocks, wait until the page structs have been initialized and free them
as pages.

Signed-off-by: Anthony Yznaga <anthony.yznaga@...cle.com>
---
 arch/x86/mm/init_64.c |  1 +
 include/linux/pkram.h |  3 ++
 mm/pkram.c            | 11 +++++++
 mm/pkram_pagetable.c  | 82 +++++++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 97 insertions(+)

diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index ae569ef6bd7d..72662615977b 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1245,6 +1245,7 @@ void __init mem_init(void)
 	after_bootmem = 1;
 	x86_init.hyper.init_after_bootmem();
 
+	pkram_free_pgt();
 	totalram_pages_add(pkram_reserved_pages);
 	/*
 	 * Must be done after boot memory is put on freelist, because here we
diff --git a/include/linux/pkram.h b/include/linux/pkram.h
index 1b475f6e1598..edc5d8bef9d3 100644
--- a/include/linux/pkram.h
+++ b/include/linux/pkram.h
@@ -39,6 +39,7 @@ struct pkram_pg_state {
 };
 
 void pkram_walk_pgt_rev(struct pkram_pg_state *st, pgd_t *pgd);
+void pkram_free_pgt_walk_pgd(pgd_t *pgd);
 
 int pkram_prepare_save(struct pkram_stream *ps, const char *name,
 		       gfp_t gfp_mask);
@@ -64,9 +65,11 @@ size_t pkram_read(struct pkram_stream *ps, void *buf, size_t count);
 #ifdef CONFIG_PKRAM
 extern unsigned long pkram_reserved_pages;
 void pkram_reserve(void);
+void pkram_free_pgt(void);
 #else
 #define pkram_reserved_pages 0UL
 static inline void pkram_reserve(void) { }
+static inline void pkram_free_pgt(void) { }
 #endif
 
 #endif /* _LINUX_PKRAM_H */
diff --git a/mm/pkram.c b/mm/pkram.c
index 2c323154df76..dd3c89614010 100644
--- a/mm/pkram.c
+++ b/mm/pkram.c
@@ -1227,3 +1227,14 @@ static int __init pkram_reserve_page_ranges(pgd_t *pgd)
 
 	return err;
 }
+
+void pkram_free_pgt(void)
+{
+	if (!pkram_pgd)
+		return;
+
+	pkram_free_pgt_walk_pgd(pkram_pgd);
+
+	__free_pages_core(virt_to_page(pkram_pgd), 0);
+	pkram_pgd = NULL;
+}
diff --git a/mm/pkram_pagetable.c b/mm/pkram_pagetable.c
index d31aa36207ba..7033e9b1c47f 100644
--- a/mm/pkram_pagetable.c
+++ b/mm/pkram_pagetable.c
@@ -3,6 +3,8 @@
 #include <asm/pgtable.h>
 #include <linux/pkram.h>
 
+#include "internal.h"
+
 #define pgd_none(a)  (pgtable_l5_enabled() ? pgd_none(a) : p4d_none(__p4d(pgd_val(a))))
 
 static int note_page_rev(struct pkram_pg_state *st, unsigned long curr_size, bool present)
@@ -167,3 +169,83 @@ void pkram_walk_pgt_rev(struct pkram_pg_state *st, pgd_t *pgd)
 			break;
 	}
 }
+
+static void pkram_free_pgt_walk_pmd(pud_t addr)
+{
+	unsigned long bitmap_pa;
+	struct page *page;
+	pmd_t *start;
+	int i;
+
+	start = (pmd_t *)pud_page_vaddr(addr);
+	for (i = 0; i < PTRS_PER_PMD; i++, start++) {
+		if (!pmd_none(*start)) {
+			bitmap_pa = pte_val(pte_clrhuge(*(pte_t *)start));
+			if (pmd_large(*start) && !bitmap_pa)
+				continue;
+			page = virt_to_page(__va(bitmap_pa));
+			__free_pages_core(page, 0);
+		}
+	}
+}
+
+static void pkram_free_pgt_walk_pud(p4d_t addr)
+{
+	struct page *page;
+	pud_t *start;
+	int i;
+
+	start = (pud_t *)p4d_page_vaddr(addr);
+	for (i = 0; i < PTRS_PER_PUD; i++, start++) {
+		if (!pud_none(*start)) {
+			if (pud_large(*start)) {
+				WARN_ONCE(1, "PKRAM: unexpected pud hugepage\n");
+				continue;
+			}
+			pkram_free_pgt_walk_pmd(*start);
+			page = virt_to_page(__va(pud_val(*start)));
+			__free_pages_core(page, 0);
+		}
+	}
+}
+
+static void pkram_free_pgt_walk_p4d(pgd_t addr)
+{
+	struct page *page;
+	p4d_t *start;
+	int i;
+
+	if (PTRS_PER_P4D == 1)
+		return pkram_free_pgt_walk_pud(__p4d(pgd_val(addr)));
+
+	start = (p4d_t *)pgd_page_vaddr(addr);
+	for (i = 0; i < PTRS_PER_P4D; i++, start++) {
+		if (!p4d_none(*start)) {
+			if (p4d_large(*start)) {
+				WARN_ONCE(1, "PKRAM: unexpected p4d hugepage\n");
+				continue;
+			}
+			pkram_free_pgt_walk_pud(*start);
+			page = virt_to_page(__va(p4d_val(*start)));
+			__free_pages_core(page, 0);
+		}
+	}
+}
+
+/*
+ * Free the pagetable passed from the previous boot.
+ */
+void pkram_free_pgt_walk_pgd(pgd_t *pgd)
+{
+	pgd_t *start = pgd;
+	struct page *page;
+	int i;
+
+	for (i = 0; i < PTRS_PER_PGD; i++, start++) {
+		if (!pgd_none(*start)) {
+			pkram_free_pgt_walk_p4d(*start);
+			page = virt_to_page(__va(pgd_val(*start)));
+			__free_pages_core(page, 0);
+		}
+	}
+}
-- 
2.13.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ