lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <Pine.LNX.4.64.0805212125310.22291@sbz-30.cs.Helsinki.FI>
Date:	Wed, 21 May 2008 21:25:56 +0300 (EEST)
From:	Pekka J Enberg <penberg@...helsinki.fi>
To:	linux-kernel@...r.kernel.org
cc:	clameter@....com, mpm@...enic.com, lethal@...ux-sh.org,
	dhowells@...hat.com
Subject: [RFC/PATCH 2/3] SLUB: make ksize() more strict for page allocator
 pass-through

From: Pekka Enberg <penberg@...helsinki.fi>

This patch changes ksize() to be more strict with objets passed to it. We now
set PageSlab also for objects allocated with page allocator and use page->slab
to check whether page is a regular slab page or a pass-through page.

Also moves kmalloc_large() out-of-line as it's too big for inlining now.

Cc: Christoph Lameter <clameter@....com>
Cc: Matt Mackall <mpm@...enic.com>
Cc: Paul Mundt <lethal@...ux-sh.org>
Cc: David Howells <dhowells@...hat.com>
Signed-off-by: Pekka Enberg <penberg@...helsinki.fi>
---
 include/linux/slub_def.h |    5 +----
 mm/slub.c                |   34 ++++++++++++++++++++++++++--------
 2 files changed, 27 insertions(+), 12 deletions(-)

diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 71e43a1..542f828 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -201,10 +201,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
 void *__kmalloc(size_t size, gfp_t flags);
 
-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
-{
-	return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size));
-}
+void *kmalloc_large(size_t, gfp_t);
 
 static __always_inline void *kmalloc(size_t size, gfp_t flags)
 {
diff --git a/mm/slub.c b/mm/slub.c
index a505a82..84b9689 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2687,15 +2687,29 @@ void *__kmalloc(size_t size, gfp_t flags)
 }
 EXPORT_SYMBOL(__kmalloc);
 
+void *kmalloc_large(size_t size, gfp_t flags)
+{
+	struct page *page;
+
+	page = alloc_pages(flags | __GFP_COMP, get_order(size));
+	if (!page)
+		return NULL;
+
+	__SetPageSlab(page);
+	return page_address(page);
+}
+EXPORT_SYMBOL(kmalloc_large);
+
 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
 {
 	struct page *page = alloc_pages_node(node, flags | __GFP_COMP,
 						get_order(size));
 
-	if (page)
-		return page_address(page);
-	else
+	if (!page)
 		return NULL;
+
+	__SetPageSlab(page);
+	return page_address(page);
 }
 
 #ifdef CONFIG_NUMA
@@ -2725,12 +2739,12 @@ size_t ksize(const void *object)
 		return 0;
 
 	page = virt_to_head_page(object);
+	BUG_ON(!PageSlab(page));
+	s = page->slab;
 
-	if (unlikely(!PageSlab(page)))
+	if (unlikely(!s))
 		return PAGE_SIZE << compound_order(page);
 
-	s = page->slab;
-
 #ifdef CONFIG_SLUB_DEBUG
 	/*
 	 * Debugging requires use of the padding between object
@@ -2756,6 +2770,7 @@ EXPORT_SYMBOL(ksize);
 
 void kfree(const void *x)
 {
+	struct kmem_cache *s;
 	struct page *page;
 	void *object = (void *)x;
 
@@ -2763,11 +2778,14 @@ void kfree(const void *x)
 		return;
 
 	page = virt_to_head_page(x);
-	if (unlikely(!PageSlab(page))) {
+	BUG_ON(!PageSlab(page));
+	s = page->slab;
+	if (unlikely(!s)) {
+		__ClearPageSlab(page);
 		put_page(page);
 		return;
 	}
-	slab_free(page->slab, page, object, __builtin_return_address(0));
+	slab_free(s, page, object, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(kfree);
 
-- 
1.5.2.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ