lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Pine.LNX.4.64.0706112016030.25563@schroedinger.engr.sgi.com>
Date:	Mon, 11 Jun 2007 20:16:31 -0700 (PDT)
From:	Christoph Lameter <clameter@....com>
To:	HÃ¥vard Skinnemoen <hskinnemoen@...il.com>
cc:	Haavard Skinnemoen <hskinnemoen@...el.com>,
	Linux Kernel <linux-kernel@...r.kernel.org>,
	David Brownell <david-b@...bell.net>
Subject: Re: kernel BUG at mm/slub.c:3689!

> and I can't do that over VPN. I'll test it first thing in the morning.

Here is a more general fix



SLUB: minimum alignment fixes

If ARCH_KMALLOC_MIN_ALIGN is set to a value greater than 8 (SLUBs smallest
kmalloc cache) then SLUB may generate duplicate slabs in sysfs (yes again).
No arch sets ARCH_KMALLOC_MINALIGN larger than 8 though excepts mips which
needs a 128 byte cache.

This patch increases the size of the smallest cache if ARCH_KMALLOC_MINALIGN
is greater than 8. In that case more and more of the smallest caches are
disabled.

Signed-off-by: Christoph Lameter <clameter@....com>

---
 include/linux/slub_def.h |   11 +++++++++--
 mm/slub.c                |   18 ++++++++++--------
 2 files changed, 19 insertions(+), 10 deletions(-)

Index: vps/include/linux/slub_def.h
===================================================================
--- vps.orig/include/linux/slub_def.h	2007-06-11 15:56:37.000000000 -0700
+++ vps/include/linux/slub_def.h	2007-06-11 19:31:15.000000000 -0700
@@ -28,7 +28,7 @@ struct kmem_cache {
 	int size;		/* The size of an object including meta data */
 	int objsize;		/* The size of an object without meta data */
 	int offset;		/* Free pointer offset. */
-	unsigned int order;
+	int order;
 
 	/*
 	 * Avoid an extra cache line for UP, SMP and for the node local to
@@ -56,7 +56,11 @@ struct kmem_cache {
 /*
  * Kmalloc subsystem.
  */
-#define KMALLOC_SHIFT_LOW 3
+#ifdef ARCH_KMALLOC_MIN_ALIGN
+#define KMALLOC_MIN_SIZE max(8, ARCH_KMALLOC_MIN_ALIGN)
+#else
+#define KMALLOC_MIN_SIZE 8
+#endif
 
 /*
  * We keep the general caches in an array of slab caches that are used for
@@ -76,6 +80,9 @@ static inline int kmalloc_index(size_t s
 	if (size > KMALLOC_MAX_SIZE)
 		return -1;
 
+	if (size <= KMALLOC_MIN_SIZE)
+		return ilog2(KMALLOC_MIN_SIZE);
+
 	if (size > 64 && size <= 96)
 		return 1;
 	if (size > 128 && size <= 192)
Index: vps/mm/slub.c
===================================================================
--- vps.orig/mm/slub.c	2007-06-11 15:56:37.000000000 -0700
+++ vps/mm/slub.c	2007-06-11 19:13:41.000000000 -0700
@@ -2193,11 +2193,11 @@ EXPORT_SYMBOL(kmem_cache_destroy);
  *		Kmalloc subsystem
  *******************************************************************/
 
-struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1] __cacheline_aligned;
+struct kmem_cache kmalloc_caches[ilog2(KMALLOC_MAX_SIZE) + 1] __cacheline_aligned;
 EXPORT_SYMBOL(kmalloc_caches);
 
 #ifdef CONFIG_ZONE_DMA
-static struct kmem_cache *kmalloc_caches_dma[KMALLOC_SHIFT_HIGH + 1];
+static struct kmem_cache *kmalloc_caches_dma[ilog2(KMALLOC_MAX_SIZE) + 1];
 #endif
 
 static int __init setup_slub_min_order(char *str)
@@ -2284,7 +2284,7 @@ static struct kmem_cache *get_slab(size_
 		if (!x)
 			panic("Unable to allocate memory for dma cache\n");
 
-		if (index <= KMALLOC_SHIFT_HIGH)
+		if (index <= ilog2(KMALLOC_MAX_SIZE))
 			realsize = 1 << index;
 		else {
 			if (index == 1)
@@ -2529,19 +2529,21 @@ void __init kmem_cache_init(void)
 	slab_state = PARTIAL;
 
 	/* Caches that are not of the two-to-the-power-of size */
-	create_kmalloc_cache(&kmalloc_caches[1],
+	if (KMALLOC_MIN_SIZE < 96)
+		create_kmalloc_cache(&kmalloc_caches[1],
 				"kmalloc-96", 96, GFP_KERNEL);
-	create_kmalloc_cache(&kmalloc_caches[2],
+	if (KMALLOC_MIN_SIZE < 192)
+		create_kmalloc_cache(&kmalloc_caches[2],
 				"kmalloc-192", 192, GFP_KERNEL);
 
-	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
+	for (i = ilog2(KMALLOC_MIN_SIZE); i <= ilog2(KMALLOC_MAX_SIZE); i++)
 		create_kmalloc_cache(&kmalloc_caches[i],
 			"kmalloc", 1 << i, GFP_KERNEL);
 
 	slab_state = UP;
 
 	/* Provide the correct kmalloc names now that the caches are up */
-	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
+	for (i = ilog2(KMALLOC_MIN_SIZE); i <= ilog2(KMALLOC_MAX_SIZE); i++)
 		kmalloc_caches[i]. name =
 			kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
 
@@ -2554,7 +2556,7 @@ void __init kmem_cache_init(void)
 
 	printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
 		" Processors=%d, Nodes=%d\n",
-		KMALLOC_SHIFT_HIGH, cache_line_size(),
+		ilog2(KMALLOC_MAX_SIZE), cache_line_size(),
 		slub_min_order, slub_max_order, slub_min_objects,
 		nr_cpu_ids, nr_node_ids);
 }
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ