lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Mon,  5 Jan 2015 10:37:28 +0900
From:	Joonsoo Kim <iamjoonsoo.kim@....com>
To:	Andrew Morton <akpm@...ux-foundation.org>
Cc:	Christoph Lameter <cl@...ux.com>,
	Pekka Enberg <penberg@...nel.org>,
	David Rientjes <rientjes@...gle.com>,
	Joonsoo Kim <iamjoonsoo.kim@....com>, linux-mm@...ck.org,
	linux-kernel@...r.kernel.org,
	Jesper Dangaard Brouer <brouer@...hat.com>
Subject: [PATCH 3/6] mm/slab: clean-up __ac_get_obj() to prepare future changes

This is the patch for clean-up and preparation to optimize
allocation fastpath. Until now, SLAB handles allocation request with
disabling irq. But, to improve performance, irq will not be disabled
at first in allocation fastpath. This requires changes of interface
and assumption of __ac_get_obj(). Object will be passed to
__ac_get_obj() rather than direct accessing object in array cache
in __ac_get_obj(). irq will not be disabled when entering.

To handle this future situation, this patch changes interface and name of
function to make suitable for future use. Main purpose of this
function, that is, if we have pfmemalloc object and we are not legimate
user for this memory, exchanging it to non-pfmemalloc object, is
unchanged.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@....com>
---
 mm/slab.c |   91 +++++++++++++++++++++++++++++++++++--------------------------
 1 file changed, 52 insertions(+), 39 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index 9aa58fc..62cd5c6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -720,50 +720,62 @@ out:
 	spin_unlock_irqrestore(&n->list_lock, flags);
 }
 
-static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
-						gfp_t flags, bool force_refill)
+static void *get_obj_from_pfmemalloc_obj(struct kmem_cache *cachep,
+				struct array_cache *ac, void *objp,
+				gfp_t flags, bool force_refill)
 {
 	int i;
-	void *objp = ac->entry[--ac->avail];
+	struct kmem_cache_node *n;
+	LIST_HEAD(list);
+	int node;
 
-	/* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
-	if (unlikely(is_obj_pfmemalloc(objp))) {
-		struct kmem_cache_node *n;
+	BUG_ON(ac->avail >= ac->limit);
+	BUG_ON(objp != ac->entry[ac->avail]);
 
-		if (gfp_pfmemalloc_allowed(flags)) {
-			clear_obj_pfmemalloc(&objp);
-			return objp;
-		}
+	if (gfp_pfmemalloc_allowed(flags)) {
+		clear_obj_pfmemalloc(&objp);
+		return objp;
+	}
 
-		/* The caller cannot use PFMEMALLOC objects, find another one */
-		for (i = 0; i < ac->avail; i++) {
-			/* If a !PFMEMALLOC object is found, swap them */
-			if (!is_obj_pfmemalloc(ac->entry[i])) {
-				objp = ac->entry[i];
-				ac->entry[i] = ac->entry[ac->avail];
-				ac->entry[ac->avail] = objp;
-				return objp;
-			}
-		}
+	/* The caller cannot use PFMEMALLOC objects, find another one */
+	for (i = 0; i < ac->avail; i++) {
+		if (is_obj_pfmemalloc(ac->entry[i]))
+			continue;
 
-		/*
-		 * If there are empty slabs on the slabs_free list and we are
-		 * being forced to refill the cache, mark this one !pfmemalloc.
-		 */
-		n = get_node(cachep, numa_mem_id());
-		if (!list_empty(&n->slabs_free) && force_refill) {
-			struct page *page = virt_to_head_page(objp);
-			ClearPageSlabPfmemalloc(page);
-			clear_obj_pfmemalloc(&objp);
-			recheck_pfmemalloc_active(cachep, ac);
-			return objp;
-		}
+		/* !PFMEMALLOC object is found, swap them */
+		objp = ac->entry[i];
+		ac->entry[i] = ac->entry[ac->avail];
+		ac->entry[ac->avail] = objp;
 
-		/* No !PFMEMALLOC objects available */
-		ac->avail++;
-		objp = NULL;
+		return objp;
 	}
 
+	/*
+	 * If there are empty slabs on the slabs_free list and we are
+	 * being forced to refill the cache, mark this one !pfmemalloc.
+	 */
+	node = numa_mem_id();
+	n = get_node(cachep, node);
+	if (!list_empty(&n->slabs_free) && force_refill) {
+		struct page *page = virt_to_head_page(objp);
+
+		ClearPageSlabPfmemalloc(page);
+		clear_obj_pfmemalloc(&objp);
+		recheck_pfmemalloc_active(cachep, ac);
+
+		return objp;
+	}
+
+	/* No !PFMEMALLOC objects available */
+	if (ac->avail < ac->limit)
+		ac->entry[ac->avail++] = objp;
+	else {
+		spin_lock(&n->list_lock);
+		free_block(cachep, &objp, 1, node, &list);
+		spin_unlock(&n->list_lock);
+	}
+	objp = NULL;
+
 	return objp;
 }
 
@@ -772,10 +784,11 @@ static inline void *ac_get_obj(struct kmem_cache *cachep,
 {
 	void *objp;
 
-	if (unlikely(sk_memalloc_socks()))
-		objp = __ac_get_obj(cachep, ac, flags, force_refill);
-	else
-		objp = ac->entry[--ac->avail];
+	objp = ac->entry[--ac->avail];
+	if (unlikely(sk_memalloc_socks()) && is_obj_pfmemalloc(objp)) {
+		objp = get_obj_from_pfmemalloc_obj(cachep, ac, objp,
+						flags, force_refill);
+	}
 
 	return objp;
 }
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ