[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191029042059.28541-6-dja@axtens.net>
Date: Tue, 29 Oct 2019 15:20:59 +1100
From: Daniel Axtens <dja@...ens.net>
To: kasan-dev@...glegroups.com, linux-mm@...ck.org, x86@...nel.org,
aryabinin@...tuozzo.com, glider@...gle.com, luto@...nel.org,
linux-kernel@...r.kernel.org, mark.rutland@....com,
dvyukov@...gle.com, christophe.leroy@....fr
Cc: linuxppc-dev@...ts.ozlabs.org, gor@...ux.ibm.com,
Daniel Axtens <dja@...ens.net>
Subject: [PATCH v10 5/5] kasan debug: track pages allocated for vmalloc shadow
Provide the current number of vmalloc shadow pages in
/sys/kernel/debug/kasan/vmalloc_shadow_pages.
Signed-off-by: Daniel Axtens <dja@...ens.net>
---
v10: rebase on linux-next/master.
v8: rename kasan_vmalloc/shadow_pages -> kasan/vmalloc_shadow_pages
On v4 (no dynamic freeing), I saw the following approximate figures
on my test VM:
- fresh boot: 720
- after test_vmalloc: ~14000
With v5 (lazy dynamic freeing):
- boot: ~490-500
- running modprobe test_vmalloc pushes the figures up to sometimes
as high as ~14000, but they drop down to ~560 after the test ends.
I'm not sure where the extra sixty pages are from, but running the
test repeately doesn't cause the number to keep growing, so I don't
think we're leaking.
- with vmap_stack, spawning tasks pushes the figure up to ~4200, then
some clearing kicks in and drops it down to previous levels again.
---
mm/kasan/common.c | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 6e7bc5d3fa83..a4b5c64da16f 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -35,6 +35,7 @@
#include <linux/vmalloc.h>
#include <linux/bug.h>
#include <linux/uaccess.h>
+#include <linux/debugfs.h>
#include <asm/tlbflush.h>
@@ -750,6 +751,8 @@ core_initcall(kasan_memhotplug_init);
#endif
#ifdef CONFIG_KASAN_VMALLOC
+static u64 vmalloc_shadow_pages;
+
static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
void *unused)
{
@@ -770,6 +773,7 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
if (likely(pte_none(*ptep))) {
set_pte_at(&init_mm, addr, ptep, pte);
page = 0;
+ vmalloc_shadow_pages++;
}
spin_unlock(&init_mm.page_table_lock);
if (page)
@@ -858,6 +862,7 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
if (likely(!pte_none(*ptep))) {
pte_clear(&init_mm, addr, ptep);
free_page(page);
+ vmalloc_shadow_pages--;
}
spin_unlock(&init_mm.page_table_lock);
@@ -974,4 +979,22 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
(unsigned long)shadow_end);
}
}
+
+static __init int kasan_init_debugfs(void)
+{
+ struct dentry *root;
+
+ root = debugfs_create_dir("kasan", NULL);
+ if (IS_ERR(root)) {
+ if (PTR_ERR(root) == -ENODEV)
+ return 0;
+ return PTR_ERR(root);
+ }
+
+ debugfs_create_u64("vmalloc_shadow_pages", 0444, root,
+ &vmalloc_shadow_pages);
+
+ return 0;
+}
+late_initcall(kasan_init_debugfs);
#endif
--
2.20.1
Powered by blists - more mailing lists