lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1337680298-11929-1-git-send-email-glommer@parallels.com>
Date:	Tue, 22 May 2012 13:51:38 +0400
From:	Glauber Costa <glommer@...allels.com>
To:	<linux-kernel@...r.kernel.org>
Cc:	<cgroups@...r.kernel.org>, <linux-mm@...ck.org>,
	Glauber Costa <glommer@...allels.com>,
	Christoph Lameter <cl@...ux.com>,
	Pekka Enberg <penberg@...helsinki.fi>,
	David Rientjes <rientjes@...gle.com>
Subject: [PATCH v2] slab+slob: dup name string

The slub allocator creates a copy of the name string, and
frees it later. I would like all caches to behave the same,
whether it is the slab+slob starting to create a copy of it itself,
or the slub ceasing to.

This patch creates copies of the name string for slob and slab,
adopting slub behavior for them all.

For the slab, we can't really do it before the kmalloc caches are
up. So we manually do it before the end of EARLY phase, and conditionally
do it for the caches created afterwards.

[ v2: Also dup string for early caches, requested by David Rientjes ]

Signed-off-by: Glauber Costa <glommer@...allels.com>
CC: Christoph Lameter <cl@...ux.com>
CC: Pekka Enberg <penberg@...helsinki.fi>
CC: David Rientjes <rientjes@...gle.com>
---
 mm/slab.c |   37 +++++++++++++++++++++++++++++++++++--
 mm/slob.c |   12 ++++++++++--
 2 files changed, 45 insertions(+), 4 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index e901a36..fe05f8bf 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1676,6 +1676,33 @@ void __init kmem_cache_init(void)
 		}
 	}
 
+	/*
+	 * create a copy of all the name strings for early caches. This is
+	 * so deleting those caches will work in a consistent way. We don't
+	 * expect allocation failures this early in the process, just make sure
+	 * they didn't happen.
+	 */
+	sizes = malloc_sizes;
+
+	while (sizes->cs_size != ULONG_MAX) {
+		struct kmem_cache *cachep;
+
+		cachep = sizes->cs_cachep;
+		if (cachep) {
+			cachep->name = kstrdup(cachep->name, GFP_NOWAIT);
+			BUG_ON(!cachep->name);
+		}
+
+		cachep = sizes->cs_dmacachep;
+		if (cachep) {
+			cachep->name = kstrdup(cachep->name, GFP_NOWAIT);
+			BUG_ON(!cachep->name);
+		}
+		sizes++;
+	}
+
+	cache_cache.name = kstrdup(cache_cache.name, GFP_NOWAIT);
+	BUG_ON(!cache_cache.name);
 	g_cpucache_up = EARLY;
 }
 
@@ -2118,6 +2145,7 @@ static void __kmem_cache_destroy(struct kmem_cache *cachep)
 			kfree(l3);
 		}
 	}
+	kfree(cachep->name);
 	kmem_cache_free(&cache_cache, cachep);
 }
 
@@ -2526,9 +2554,14 @@ kmem_cache_create (const char *name, size_t size, size_t align,
 		BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
 	}
 	cachep->ctor = ctor;
-	cachep->name = name;
 
-	if (setup_cpu_cache(cachep, gfp)) {
+	/* Can't do strdup while kmalloc is not up */
+	if (slab_is_available())
+		cachep->name = kstrdup(name, GFP_KERNEL);
+	else
+		cachep->name = name;
+
+	if (!cachep->name || setup_cpu_cache(cachep, gfp)) {
 		__kmem_cache_destroy(cachep);
 		cachep = NULL;
 		goto oops;
diff --git a/mm/slob.c b/mm/slob.c
index 8105be4..8f10d36 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -575,7 +575,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
 		GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
 
 	if (c) {
-		c->name = name;
+		c->name = kstrdup(name, GFP_KERNEL);
+		if (!c->name) {
+			slob_free(c, sizeof(struct kmem_cache));
+			c = NULL;
+			goto out;
+		}
 		c->size = size;
 		if (flags & SLAB_DESTROY_BY_RCU) {
 			/* leave room for rcu footer at the end of object */
@@ -589,7 +594,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
 			c->align = ARCH_SLAB_MINALIGN;
 		if (c->align < align)
 			c->align = align;
-	} else if (flags & SLAB_PANIC)
+	}
+out:
+	if (!c && (flags & SLAB_PANIC))
 		panic("Cannot create slab cache %s\n", name);
 
 	kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
@@ -602,6 +609,7 @@ void kmem_cache_destroy(struct kmem_cache *c)
 	kmemleak_free(c);
 	if (c->flags & SLAB_DESTROY_BY_RCU)
 		rcu_barrier();
+	kfree(c->name);
 	slob_free(c, sizeof(struct kmem_cache));
 }
 EXPORT_SYMBOL(kmem_cache_destroy);
-- 
1.7.7.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ