[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190902112028.23773-6-dja@axtens.net>
Date: Mon, 2 Sep 2019 21:20:28 +1000
From: Daniel Axtens <dja@...ens.net>
To: kasan-dev@...glegroups.com, linux-mm@...ck.org, x86@...nel.org,
aryabinin@...tuozzo.com, glider@...gle.com, luto@...nel.org,
linux-kernel@...r.kernel.org, mark.rutland@....com,
dvyukov@...gle.com, christophe.leroy@....fr
Cc: linuxppc-dev@...ts.ozlabs.org, gor@...ux.ibm.com,
Daniel Axtens <dja@...ens.net>
Subject: [PATCH v6 5/5] kasan debug: track pages allocated for vmalloc shadow
Provide the current number of vmalloc shadow pages in
/sys/kernel/debug/kasan_vmalloc/shadow_pages.
Signed-off-by: Daniel Axtens <dja@...ens.net>
---
Merging this is probably overkill, but I leave it to the discretion
of the broader community.
On v4 (no dynamic freeing), I saw the following approximate figures
on my test VM:
- fresh boot: 720
- after test_vmalloc: ~14000
With v5 (lazy dynamic freeing):
- boot: ~490-500
- running modprobe test_vmalloc pushes the figures up to sometimes
as high as ~14000, but they drop down to ~560 after the test ends.
I'm not sure where the extra sixty pages are from, but running the
test repeately doesn't cause the number to keep growing, so I don't
think we're leaking.
- with vmap_stack, spawning tasks pushes the figure up to ~4200, then
some clearing kicks in and drops it down to previous levels again.
---
mm/kasan/common.c | 26 ++++++++++++++++++++++++++
1 file changed, 26 insertions(+)
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 0b5141108cdc..fae3cf4ab23a 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -35,6 +35,7 @@
#include <linux/vmalloc.h>
#include <linux/bug.h>
#include <linux/uaccess.h>
+#include <linux/debugfs.h>
#include "kasan.h"
#include "../slab.h"
@@ -748,6 +749,8 @@ core_initcall(kasan_memhotplug_init);
#endif
#ifdef CONFIG_KASAN_VMALLOC
+static u64 vmalloc_shadow_pages;
+
static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
void *unused)
{
@@ -774,6 +777,7 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
if (likely(pte_none(*ptep))) {
set_pte_at(&init_mm, addr, ptep, pte);
page = 0;
+ vmalloc_shadow_pages++;
}
spin_unlock(&init_mm.page_table_lock);
if (page)
@@ -827,6 +831,7 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
if (likely(!pte_none(*ptep))) {
pte_clear(&init_mm, addr, ptep);
free_page(page);
+ vmalloc_shadow_pages--;
}
spin_unlock(&init_mm.page_table_lock);
@@ -882,4 +887,25 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
(unsigned long)(shadow_end - shadow_start),
kasan_depopulate_vmalloc_pte, NULL);
}
+
+static __init int kasan_init_vmalloc_debugfs(void)
+{
+ struct dentry *root, *count;
+
+ root = debugfs_create_dir("kasan_vmalloc", NULL);
+ if (IS_ERR(root)) {
+ if (PTR_ERR(root) == -ENODEV)
+ return 0;
+ return PTR_ERR(root);
+ }
+
+ count = debugfs_create_u64("shadow_pages", 0444, root,
+ &vmalloc_shadow_pages);
+
+ if (IS_ERR(count))
+ return PTR_ERR(root);
+
+ return 0;
+}
+late_initcall(kasan_init_vmalloc_debugfs);
#endif
--
2.20.1
Powered by blists - more mailing lists