lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 21 May 2008 21:26:19 +0300 (EEST)
From:	Pekka J Enberg <penberg@...helsinki.fi>
To:	linux-kernel@...r.kernel.org
cc:	clameter@....com, mpm@...enic.com, lethal@...ux-sh.org,
	dhowells@...hat.com
Subject: [RFC/PATCH 3/3] SLOB: make ksize() more strict for page allocator
 pass-through

From: Pekka Enberg <penberg@...helsinki.fi>

This patch re-uses the PG_slab flag for marking SLOB bigblock pages so we can
check that the pointer passed ksize() was really allocated by SLOB.

Cc: Christoph Lameter <clameter@....com>
Cc: Matt Mackall <mpm@...enic.com>
Cc: Paul Mundt <lethal@...ux-sh.org>
Cc: David Howells <dhowells@...hat.com>
Signed-off-by: Pekka Enberg <penberg@...helsinki.fi>
---
 mm/slob.c |   34 ++++++++++++++++++++++++++++++----
 1 files changed, 30 insertions(+), 4 deletions(-)

diff --git a/mm/slob.c b/mm/slob.c
index a3ad667..4dc077e 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -144,6 +144,24 @@ static inline void clear_slob_page(struct slob_page *sp)
 }
 
 /*
+ * slob_bigpage: True for slob bigblock pages
+ */
+static inline int slob_bigpage(struct page *page)
+{
+	return test_bit(PG_slab, &page->flags);
+}
+
+static inline void set_slob_bigpage(struct page *page)
+{
+	__set_bit(PG_slab, &page->flags);
+}
+
+static inline void clear_slob_bigpage(struct page *page)
+{
+	__clear_bit(PG_slab, &page->flags);
+}
+
+/*
  * slob_page_free: true for pages on free_slob_pages list.
  */
 static inline int slob_page_free(struct slob_page *sp)
@@ -481,6 +499,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
 			struct page *page;
 			page = virt_to_page(ret);
 			page->private = size;
+			set_slob_bigpage(page);
 		}
 		return ret;
 	}
@@ -499,8 +518,12 @@ void kfree(const void *block)
 		int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
 		unsigned int *m = (unsigned int *)(block - align);
 		slob_free(m, *m + align);
-	} else
-		put_page(&sp->page);
+	} else {
+		struct page *page = &sp->page;
+
+		clear_slob_bigpage(page);
+		put_page(page);
+	}
 }
 EXPORT_SYMBOL(kfree);
 
@@ -508,6 +531,7 @@ EXPORT_SYMBOL(kfree);
 size_t ksize(const void *block)
 {
 	struct slob_page *sp;
+	struct page *page;
 
 	BUG_ON(!block);
 	if (unlikely(block == ZERO_SIZE_PTR))
@@ -516,8 +540,10 @@ size_t ksize(const void *block)
 	sp = (struct slob_page *)virt_to_page(block);
 	if (slob_page(sp))
 		return ((slob_t *)block - 1)->units + SLOB_UNIT;
-	else
-		return sp->page.private;
+
+	page = &sp->page;
+	BUG_ON(!slob_bigpage(page));
+	return page->private;
 }
 EXPORT_SYMBOL(ksize);
 
-- 
1.5.2.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ