lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 17 Feb 2009 12:05:07 -0500 (EST)
From:	Christoph Lameter <cl@...ux-foundation.org>
To:	Pekka Enberg <penberg@...helsinki.fi>
cc:	Mel Gorman <mel@....ul.ie>, Nick Piggin <nickpiggin@...oo.com.au>,
	Nick Piggin <npiggin@...e.de>,
	Linux Memory Management List <linux-mm@...ck.org>,
	Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
	Andrew Morton <akpm@...ux-foundation.org>,
	Lin Ming <ming.m.lin@...el.com>,
	"Zhang, Yanmin" <yanmin_zhang@...ux.intel.com>
Subject: Re: [patch] SLQB slab allocator (try 2)

Well yes you missed two locations (kmalloc_caches array has to be
redimensioned) and I also was writing the same patch...

Here is mine:

Subject: SLUB: Do not pass 8k objects through to the page allocator

Increase the maximum object size in SLUB so that 8k objects are not
passed through to the page allocator anymore. The network stack uses 8k
objects for performance critical operations.

Signed-off-by: Christoph Lameter <cl@...ux-foundation.org>

Index: linux-2.6/include/linux/slub_def.h
===================================================================
--- linux-2.6.orig/include/linux/slub_def.h	2009-02-17 10:45:51.000000000 -0600
+++ linux-2.6/include/linux/slub_def.h	2009-02-17 11:06:53.000000000 -0600
@@ -121,10 +121,21 @@
 #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)

 /*
+ * Maximum kmalloc object size handled by SLUB. Larger object allocations
+ * are passed through to the page allocator. The page allocator "fastpath"
+ * is relatively slow so we need this value sufficiently high so that
+ * performance critical objects are allocated through the SLUB fastpath.
+ *
+ * This should be dropped to PAGE_SIZE / 2 once the page allocator
+ * "fastpath" becomes competitive with the slab allocator fastpaths.
+ */
+#define SLUB_MAX_SIZE (2 * PAGE_SIZE)
+
+/*
  * We keep the general caches in an array of slab caches that are used for
  * 2^x bytes of allocations.
  */
-extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1];
+extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 2];

 /*
  * Sorry that the following has to be that ugly but some versions of GCC
@@ -212,7 +223,7 @@
 static __always_inline void *kmalloc(size_t size, gfp_t flags)
 {
 	if (__builtin_constant_p(size)) {
-		if (size > PAGE_SIZE)
+		if (size > SLUB_MAX_SIZE)
 			return kmalloc_large(size, flags);

 		if (!(flags & SLUB_DMA)) {
@@ -234,7 +245,7 @@
 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 {
 	if (__builtin_constant_p(size) &&
-		size <= PAGE_SIZE && !(flags & SLUB_DMA)) {
+		size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
 			struct kmem_cache *s = kmalloc_slab(size);

 		if (!s)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c	2009-02-17 10:49:47.000000000 -0600
+++ linux-2.6/mm/slub.c	2009-02-17 10:58:14.000000000 -0600
@@ -2475,7 +2475,7 @@
  *		Kmalloc subsystem
  *******************************************************************/

-struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
+struct kmem_cache kmalloc_caches[PAGE_SHIFT + 2] __cacheline_aligned;
 EXPORT_SYMBOL(kmalloc_caches);

 static int __init setup_slub_min_order(char *str)
@@ -2658,7 +2658,7 @@
 {
 	struct kmem_cache *s;

-	if (unlikely(size > PAGE_SIZE))
+	if (unlikely(size > SLUB_MAX_SIZE))
 		return kmalloc_large(size, flags);

 	s = get_slab(size, flags);
@@ -2686,7 +2686,7 @@
 {
 	struct kmem_cache *s;

-	if (unlikely(size > PAGE_SIZE))
+	if (unlikely(size > SLUB_MAX_SIZE))
 		return kmalloc_large_node(size, flags, node);

 	s = get_slab(size, flags);
@@ -3223,7 +3223,7 @@
 {
 	struct kmem_cache *s;

-	if (unlikely(size > PAGE_SIZE))
+	if (unlikely(size > SLUB_MAX_SIZE))
 		return kmalloc_large(size, gfpflags);

 	s = get_slab(size, gfpflags);
@@ -3239,7 +3239,7 @@
 {
 	struct kmem_cache *s;

-	if (unlikely(size > PAGE_SIZE))
+	if (unlikely(size > SLUB_MAX_SIZE))
 		return kmalloc_large_node(size, gfpflags, node);

 	s = get_slab(size, gfpflags);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ