lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0000013e5b83ec27-03a11e6a-157a-40ac-a65d-0281ee0d40fe-000000@email.amazonses.com>
Date:	Tue, 30 Apr 2013 15:16:12 +0000
From:	Christoph Lameter <cl@...ux.com>
To:	Tetsuo Handa <penguin-kernel@...ove.SAKURA.ne.jp>
cc:	glommer@...allels.com, penberg@...nel.org,
	linux-kernel@...r.kernel.org
Subject: Fix off by one error in slab.h

Subject: Fix off by one error in slab.h

We ran into some strange issues as a result of an off by one isse in slab.h

The root of the issue is the treatment of KMALLOC_SHIFT_HIGH that is confusing.

Make KMALLOC_SHIFT_HIGH the first unsupported size instead of the last supported.

Signed-off-by: Christoph Lameter <cl@...ux.com>

Index: linux/include/linux/slab.h
===================================================================
--- linux.orig/include/linux/slab.h	2013-04-30 09:54:23.636533564 -0500
+++ linux/include/linux/slab.h	2013-04-30 10:10:35.676932866 -0500
@@ -176,8 +176,8 @@ struct kmem_cache {
  * to do various tricks to work around compiler limitations in order to
  * ensure proper constant folding.
  */
-#define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
-				(MAX_ORDER + PAGE_SHIFT - 1) : 25)
+#define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT) <= 26 ? \
+				(MAX_ORDER + PAGE_SHIFT) : 26)
 #define KMALLOC_SHIFT_MAX	KMALLOC_SHIFT_HIGH
 #define KMALLOC_SHIFT_LOW	5
 #else
@@ -185,7 +185,7 @@ struct kmem_cache {
  * SLUB allocates up to order 2 pages directly and otherwise
  * passes the request to the page allocator.
  */
-#define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
+#define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 2)
 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT)
 #define KMALLOC_SHIFT_LOW	3
 #endif
@@ -193,7 +193,7 @@ struct kmem_cache {
 /* Maximum allocatable size */
 #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
 /* Maximum size for which we actually use a slab cache */
-#define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
+#define KMALLOC_MAX_CACHE_SIZE	((1UL << (KMALLOC_SHIFT_HIGH -1)))
 /* Maximum order allocatable via the slab allocagtor */
 #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)

@@ -206,9 +206,9 @@ struct kmem_cache {
 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
 #endif

-extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
+extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH];
 #ifdef CONFIG_ZONE_DMA
-extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
+extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH];
 #endif

 /*
Index: linux/mm/slab_common.c
===================================================================
--- linux.orig/mm/slab_common.c	2013-04-30 09:54:23.636533564 -0500
+++ linux/mm/slab_common.c	2013-04-30 09:54:53.693039252 -0500
@@ -319,11 +319,11 @@ struct kmem_cache *__init create_kmalloc
 	return s;
 }

-struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
+struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH];
 EXPORT_SYMBOL(kmalloc_caches);

 #ifdef CONFIG_ZONE_DMA
-struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
+struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH];
 EXPORT_SYMBOL(kmalloc_dma_caches);
 #endif

@@ -446,7 +446,7 @@ void __init create_kmalloc_caches(unsign
 	if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2])
 		kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);

-	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
+	for (i = KMALLOC_SHIFT_LOW; i < KMALLOC_SHIFT_HIGH; i++)
 		if (!kmalloc_caches[i])
 			kmalloc_caches[i] = create_kmalloc_cache(NULL,
 							1 << i, flags);
@@ -454,7 +454,7 @@ void __init create_kmalloc_caches(unsign
 	/* Kmalloc array is now usable */
 	slab_state = UP;

-	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
+	for (i = 0; i < KMALLOC_SHIFT_HIGH; i++) {
 		struct kmem_cache *s = kmalloc_caches[i];
 		char *n;

@@ -467,7 +467,7 @@ void __init create_kmalloc_caches(unsign
 	}

 #ifdef CONFIG_ZONE_DMA
-	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
+	for (i = 0; i < KMALLOC_SHIFT_HIGH; i++) {
 		struct kmem_cache *s = kmalloc_caches[i];

 		if (s) {
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ