lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 31 Jul 2007 16:25:12 -0700
From:	Christoph Lameter <clameter@....com>
To:	linux-kernel@...r.kernel.org
Cc:	Christoph Lameter <clameter@....com>
Subject: [PATCH 7/7] Simple Performance Counters: SLUB instrumentation

With this patch SLUB will perform tests on bootup and display results.

Signed-off-by: Christoph Lameter <clameter@....com>
---
 mm/slub.c |   97 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++---
 1 files changed, 92 insertions(+), 5 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 6c6d74f..568b16a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -20,6 +20,7 @@
 #include <linux/mempolicy.h>
 #include <linux/ctype.h>
 #include <linux/kallsyms.h>
+#include <linux/perf.h>
 
 /*
  * Lock order:
@@ -152,6 +153,7 @@ static inline void ClearSlabDebug(struct page *page)
 
 /* Enable to test recovery from slab corruption on boot */
 #undef SLUB_RESILIENCY_TEST
+#undef SLUB_PERFORMANCE_TEST
 
 #if PAGE_SHIFT <= 12
 
@@ -2870,9 +2872,97 @@ static long validate_slab_cache(struct kmem_cache *s)
 	return count;
 }
 
-#ifdef SLUB_RESILIENCY_TEST
 static void resiliency_test(void)
 {
+#ifdef SLUB_PERFORMANCE_TEST
+#define TEST_COUNT 10000
+	int size, i;
+	struct pc x;
+	void **v = kmalloc(TEST_COUNT * sizeof(void *), GFP_KERNEL);
+
+	printk(KERN_INFO "SLUB Performance testing\n");
+	printk(KERN_INFO "========================\n");
+	printk(KERN_INFO "1. Kmalloc: Repeatedly allocate then free test\n");
+	for (size = 8; size <= PAGE_SIZE << 2; size <<= 1) {
+		pc_start(&x);
+		for(i = 0; i < TEST_COUNT; i++) {
+			v[i] = kmalloc(size, GFP_KERNEL);
+		}
+		printk(KERN_INFO "%i times kmalloc(%d) = ", i, size);
+		pc_stop_printk(&x);
+		pc_start(&x);
+		for(i = 0; i < TEST_COUNT; i++)
+			kfree(v[i]);
+		printk(" kfree() = ");
+		pc_stop_printk(&x);
+		printk("\n");
+	}
+
+	printk(KERN_INFO "2. Kmalloc: alloc/free test\n");
+	for (size = 8; size <= PAGE_SIZE << 2; size <<= 1) {
+		pc_start(&x);
+		for(i = 0; i < TEST_COUNT; i++)
+			kfree(kmalloc(size, GFP_KERNEL));
+		printk(KERN_INFO "%i times kmalloc(%d)/kfree = ", i, size);
+		pc_stop_printk(&x);
+		printk("\n");
+	}
+	printk(KERN_INFO "3. kmem_cache_alloc: Repeatedly allocate then free test\n");
+	for (size = 3; size <= PAGE_SHIFT; size ++) {
+		pc_start(&x);
+		for(i = 0; i < TEST_COUNT; i++) {
+			v[i] = kmem_cache_alloc(kmalloc_caches + size, GFP_KERNEL);
+		}
+		printk(KERN_INFO "%d times kmem_cache_alloc(%d) = ", i, 1 << size);
+		pc_stop_printk(&x);
+		pc_start(&x);
+		for(i = 0; i < TEST_COUNT; i++)
+			kmem_cache_free(kmalloc_caches + size, v[i]);
+		printk(" kmem_cache_free() = ");
+		pc_stop_printk(&x);
+		printk("\n");
+	}
+
+	printk(KERN_INFO "4. kmem_cache_alloc: alloc/free test\n");
+	for (size = 3; size <= PAGE_SHIFT; size++) {
+		pc_start(&x);
+		for(i = 0; i < TEST_COUNT; i++)
+			kmem_cache_free(kmalloc_caches + size,
+				kmem_cache_alloc(kmalloc_caches + size,
+							GFP_KERNEL));
+		printk(KERN_INFO "%d times kmem_cache_alloc(%d)/kmem_cache_free = ", i, 1 << size);
+		pc_stop_printk(&x);
+		printk("\n");
+	}
+	printk(KERN_INFO "5. kmem_cache_zalloc: Repeatedly allocate then free test\n");
+	for (size = 3; size <= PAGE_SHIFT; size ++) {
+		pc_start(&x);
+		for(i = 0; i < TEST_COUNT; i++) {
+			v[i] = kmem_cache_zalloc(kmalloc_caches + size, GFP_KERNEL);
+		}
+		printk(KERN_INFO "%d times kmem_cache_zalloc(%d) = ", i, 1 << size);
+		pc_stop_printk(&x);
+		pc_start(&x);
+		for(i = 0; i < TEST_COUNT; i++)
+			kmem_cache_free(kmalloc_caches + size, v[i]);
+		printk(" kmem_cache_free() = ");
+		pc_stop_printk(&x);
+		printk("\n");
+	}
+
+	printk(KERN_INFO "6. kmem_cache_zalloc: alloc/free test\n");
+	for (size = 3; size <= PAGE_SHIFT; size++) {
+		pc_start(&x);
+		for(i = 0; i < TEST_COUNT; i++)
+			kmem_cache_free(kmalloc_caches + size,
+				kmem_cache_zalloc(kmalloc_caches + size,
+							GFP_KERNEL));
+		printk(KERN_INFO "%d times kmem_cache_zalloc(%d)/kmem_cache_free = ", i, 1 << size);
+		pc_stop_printk(&x);
+		printk("\n");
+	}
+#endif
+#ifdef SLUB_RESILIENCY_TEST
 	u8 *p;
 
 	printk(KERN_ERR "SLUB resiliency testing\n");
@@ -2920,11 +3010,8 @@ static void resiliency_test(void)
 	p[512] = 0xab;
 	printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
 	validate_slab_cache(kmalloc_caches + 9);
-}
-#else
-static void resiliency_test(void) {};
 #endif
-
+}
 /*
  * Generate lists of code addresses where slabcache objects are allocated
  * and freed.
-- 
1.5.2.4

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ