lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1491973350-26816-1-git-send-email-hoeun.ryu@gmail.com>
Date:   Wed, 12 Apr 2017 14:01:59 +0900
From:   Hoeun Ryu <hoeun.ryu@...il.com>
To:     Andrew Morton <akpm@...ux-foundation.org>,
        Andrey Ryabinin <aryabinin@...tuozzo.com>,
        Andreas Dilger <adilger@...ger.ca>,
        Vlastimil Babka <vbabka@...e.cz>,
        Michal Hocko <mhocko@...e.com>,
        Chris Wilson <chris@...is-wilson.co.uk>,
        Ingo Molnar <mingo@...nel.org>, zijun_hu <zijun_hu@....com>,
        Matthew Wilcox <mawilcox@...rosoft.com>,
        Thomas Garnier <thgarnie@...gle.com>,
        "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Cc:     linux-arch@...r.kernel.org, Hoeun Ryu <hoeun.ryu@...il.com>,
        linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: [PATCH] mm: add VM_STATIC flag to vmalloc and prevent from removing the areas

vm_area_add_early/vm_area_register_early() are used to reserve vmalloc area
during boot process and those virtually mapped areas are never unmapped.
So `OR` VM_STATIC flag to the areas in vmalloc_init() when importing
existing vmlist entries and prevent those areas from being removed from the
rbtree by accident.

Signed-off-by: Hoeun Ryu <hoeun.ryu@...il.com>
---
 include/linux/vmalloc.h | 1 +
 mm/vmalloc.c            | 9 ++++++---
 2 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 46991ad..3df53fc 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -19,6 +19,7 @@ struct notifier_block;		/* in notifier.h */
 #define VM_UNINITIALIZED	0x00000020	/* vm_struct is not fully initialized */
 #define VM_NO_GUARD		0x00000040      /* don't add guard page */
 #define VM_KASAN		0x00000080      /* has allocated kasan shadow memory */
+#define VM_STATIC		0x00000200
 /* bits [20..32] reserved for arch specific ioremap internals */
 
 /*
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 8ef8ea1..fb5049a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1262,7 +1262,7 @@ void __init vmalloc_init(void)
 	/* Import existing vmlist entries. */
 	for (tmp = vmlist; tmp; tmp = tmp->next) {
 		va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
-		va->flags = VM_VM_AREA;
+		va->flags = VM_VM_AREA | VM_STATIC;
 		va->va_start = (unsigned long)tmp->addr;
 		va->va_end = va->va_start + tmp->size;
 		va->vm = tmp;
@@ -1480,7 +1480,7 @@ struct vm_struct *remove_vm_area(const void *addr)
 	might_sleep();
 
 	va = find_vmap_area((unsigned long)addr);
-	if (va && va->flags & VM_VM_AREA) {
+	if (va && va->flags & VM_VM_AREA && likely(!(va->flags & VM_STATIC))) {
 		struct vm_struct *vm = va->vm;
 
 		spin_lock(&vmap_area_lock);
@@ -1510,7 +1510,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
 
 	area = remove_vm_area(addr);
 	if (unlikely(!area)) {
-		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
+		WARN(1, KERN_ERR "Trying to vfree() nonexistent or static vm area (%p)\n",
 				addr);
 		return;
 	}
@@ -2708,6 +2708,9 @@ static int s_show(struct seq_file *m, void *p)
 	if (v->phys_addr)
 		seq_printf(m, " phys=%pa", &v->phys_addr);
 
+	if (v->flags & VM_STATIC)
+		seq_puts(m, " static");
+
 	if (v->flags & VM_IOREMAP)
 		seq_puts(m, " ioremap");
 
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ