lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Mon, 25 Apr 2011 20:46:48 +0300
From:	Pekka Enberg <penberg@...nel.org>
To:	linux-kernel@...r.kernel.org
Cc:	Pekka Enberg <penberg@...nel.org>,
	Christoph Lameter <cl@...ux.com>,
	David Rientjes <rientjes@...gle.com>,
	Linus Torvalds <torvalds@...ux-foundation.org>
Subject: [RFC/PATCH 2/2] slub: Don't share struct kmem_cache for shared caches

This patch changes the slab cache sharing in SLUB to only share 'struct
kmem_cpu_cache' which contains the actual list of slabs and object freelists.
We no longer share 'struct kmem_cache' between merged caches so /proc/slabinfo
statistics work as expected:

  Before:

  # cat /proc/slabinfo | wc -l
  104

  After:

  # cat /proc/slabinfo | wc -l
  185

Cc: Christoph Lameter <cl@...ux.com>
Cc: David Rientjes <rientjes@...gle.com>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Signed-off-by: Pekka Enberg <penberg@...nel.org>
---
 mm/slub.c |   98 +++++++++++++++++++-----------------------------------------
 1 files changed, 31 insertions(+), 67 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index cb61024..3a8fbca 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2349,6 +2349,11 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
 #endif
 }
 
+static inline void dup_kmem_cache_cpus(struct kmem_cache *src, struct kmem_cache *dst)
+{
+	dst->cpu_slab = src->cpu_slab;
+}
+
 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
 {
 	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
@@ -3441,26 +3446,41 @@ static struct kmem_cache *find_mergeable(size_t size,
 struct kmem_cache *kmem_cache_create(const char *name, size_t size,
 		size_t align, unsigned long flags, void (*ctor)(void *))
 {
-	struct kmem_cache *s;
+	struct kmem_cache *s, *parent;
 	char *n;
 
 	if (WARN_ON(!name))
 		return NULL;
 
 	down_write(&slub_lock);
-	s = find_mergeable(size, align, flags, name, ctor);
-	if (s) {
-		s->refcount++;
+	parent = find_mergeable(size, align, flags, name, ctor);
+	if (parent) {
+		n = kstrdup(name, GFP_KERNEL);
+		if (!n)
+			goto err;
+
+		s = kmalloc(kmem_size, GFP_KERNEL);
+		if (!s)
+			goto err_free;
+
+		if (!kmem_cache_open(s, n, size, align, flags, ctor))
+			goto err_free;
+
+		dup_kmem_cache_cpus(parent, s);
+
+		parent->refcount++;
 		/*
 		 * Adjust the object sizes so that we clear
 		 * the complete object on kzalloc.
 		 */
-		s->objsize = max(s->objsize, (int)size);
-		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
+		parent->objsize = max(parent->objsize, (int)size);
+		parent->inuse = max_t(int, parent->inuse, ALIGN(size, sizeof(void *)));
 
-		if (sysfs_slab_alias(s, name)) {
-			s->refcount--;
-			goto err;
+		list_add(&s->list, &slab_caches);
+		if (sysfs_slab_add(s)) {
+			parent->refcount--;
+			list_del(&s->list);
+			goto err_free;
 		}
 		up_write(&slub_lock);
 		return s;
@@ -4670,68 +4690,17 @@ static const struct kset_uevent_ops slab_uevent_ops = {
 
 static struct kset *slab_kset;
 
-#define ID_STR_LENGTH 64
-
-/* Create a unique string id for a slab cache:
- *
- * Format	:[flags-]size
- */
-static char *create_unique_id(struct kmem_cache *s)
-{
-	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
-	char *p = name;
-
-	BUG_ON(!name);
-
-	*p++ = ':';
-	/*
-	 * First flags affecting slabcache operations. We will only
-	 * get here for aliasable slabs so we do not need to support
-	 * too many flags. The flags here must cover all flags that
-	 * are matched during merging to guarantee that the id is
-	 * unique.
-	 */
-	if (s->flags & SLAB_CACHE_DMA)
-		*p++ = 'd';
-	if (s->flags & SLAB_RECLAIM_ACCOUNT)
-		*p++ = 'a';
-	if (s->flags & SLAB_DEBUG_FREE)
-		*p++ = 'F';
-	if (!(s->flags & SLAB_NOTRACK))
-		*p++ = 't';
-	if (p != name + 1)
-		*p++ = '-';
-	p += sprintf(p, "%07d", s->size);
-	BUG_ON(p > name + ID_STR_LENGTH - 1);
-	return name;
-}
-
 static int sysfs_slab_add(struct kmem_cache *s)
 {
 	int err;
 	const char *name;
-	int unmergeable;
 
 	if (slab_state < SYSFS)
 		/* Defer until later */
 		return 0;
 
-	unmergeable = slab_unmergeable(s);
-	if (unmergeable) {
-		/*
-		 * Slabcache can never be merged so we can use the name proper.
-		 * This is typically the case for debug situations. In that
-		 * case we can catch duplicate names easily.
-		 */
-		sysfs_remove_link(&slab_kset->kobj, s->name);
-		name = s->name;
-	} else {
-		/*
-		 * Create a unique name for the slab as a target
-		 * for the symlinks.
-		 */
-		name = create_unique_id(s);
-	}
+	sysfs_remove_link(&slab_kset->kobj, s->name);
+	name = s->name;
 
 	s->kobj.kset = slab_kset;
 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
@@ -4747,11 +4716,6 @@ static int sysfs_slab_add(struct kmem_cache *s)
 		return err;
 	}
 	kobject_uevent(&s->kobj, KOBJ_ADD);
-	if (!unmergeable) {
-		/* Setup first alias */
-		sysfs_slab_alias(s, s->name);
-		kfree(name);
-	}
 	return 0;
 }
 
-- 
1.7.0.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ