lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1388137619-14741-1-git-send-email-liwanp@linux.vnet.ibm.com>
Date:	Fri, 27 Dec 2013 17:46:59 +0800
From:	Wanpeng Li <liwanp@...ux.vnet.ibm.com>
To:	Pekka Enberg <penberg@...nel.org>
Cc:	Christoph Lameter <cl@...ux.com>,
	Andrew Morton <akpm@...ux-foundation.org>,
	David Rientjes <rientjes@...gle.com>,
	Joonsoo Kim <iamjoonsoo.kim@....com>, linux-mm@...ck.org,
	linux-kernel@...r.kernel.org,
	Wanpeng Li <liwanp@...ux.vnet.ibm.com>
Subject: [PATCH] mm/slub: fix accumulate per cpu partial cache objects

SLUB per cpu partial cache is a list of slab caches to accelerate objects 
allocation. However, current codes just accumulate the objects number of 
the first slab cache of per cpu partial cache instead of traverse the whole 
list.

Signed-off-by: Wanpeng Li <liwanp@...ux.vnet.ibm.com>
---
 mm/slub.c |   32 +++++++++++++++++++++++---------
 1 files changed, 23 insertions(+), 9 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 545a170..799bfdc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4280,7 +4280,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
 							       cpu);
 			int node;
-			struct page *page;
+			struct page *page, *p;
 
 			page = ACCESS_ONCE(c->page);
 			if (!page)
@@ -4298,8 +4298,9 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 			nodes[node] += x;
 
 			page = ACCESS_ONCE(c->partial);
-			if (page) {
-				x = page->pobjects;
+			while ((p = page)) {
+				page = p->next;
+				x = p->pobjects;
 				total += x;
 				nodes[node] += x;
 			}
@@ -4520,13 +4521,15 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
 	int pages = 0;
 	int cpu;
 	int len;
+	struct page *p;
 
 	for_each_online_cpu(cpu) {
 		struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
 
-		if (page) {
-			pages += page->pages;
-			objects += page->pobjects;
+		while ((p = page)) {
+			page = p->next;
+			pages += p->pages;
+			objects += p->pobjects;
 		}
 	}
 
@@ -4535,10 +4538,21 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
 #ifdef CONFIG_SMP
 	for_each_online_cpu(cpu) {
 		struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
+		objects = 0;
+		pages = 0;
+
+		if (!page)
+			continue;
+
+		while ((p = page)) {
+			page = p->next;
+			pages += p->pages;
+			objects += p->pobjects;
+		}
 
-		if (page && len < PAGE_SIZE - 20)
-			len += sprintf(buf + len, " C%d=%d(%d)", cpu,
-				page->pobjects, page->pages);
+		if (len < PAGE_SIZE - 20)
+			len += sprintf(buf + len, " C%d=%d(%d)", cpu,
+				objects, pages);
 	}
 #endif
 	return len + sprintf(buf + len, "\n");
-- 
1.7.7.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ