lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <alpine.DEB.2.00.1010141000190.18275@melkki.cs.helsinki.fi>
Date:	Thu, 14 Oct 2010 10:01:23 +0300 (EEST)
From:	Pekka J Enberg <penberg@...helsinki.fi>
To:	cl@...ux.com, rientjes@...gle.com
cc:	linux-kernel@...r.kernel.org
Subject: [RFC/PATCH] slub: Drop slab lock for partial list handling

From: Pekka Enberg <penberg@...nel.org>

There's no need to hold 'page' slab lock for partial list handling functions. A
page is bound to a node so 'page->lru' is always protected by n->list_lock.

Cc: Christoph Lameter <cl@...ux.com>
Cc: David Rientjes <rientjes@...gle.com>
Signed-off-by: Pekka Enberg <penberg@...nel.org>
---
David, assuming this actually works, does it help with netperf TCP_RR? The 
patch is on top of my slab/next branch.

  mm/slub.c |   26 +++++++++++++++-----------
  1 files changed, 15 insertions(+), 11 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 8fd5401..f41126f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1452,8 +1452,11 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)

  	__ClearPageSlubFrozen(page);
  	if (page->inuse) {
+		void *prior = page->freelist;

-		if (page->freelist) {
+		slab_unlock(page);
+
+		if (prior) {
  			add_partial(n, page, tail);
  			stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
  		} else {
@@ -1461,8 +1464,8 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
  			if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER))
  				add_full(n, page);
  		}
-		slab_unlock(page);
  	} else {
+		slab_unlock(page);
  		stat(s, DEACTIVATE_EMPTY);
  		if (n->nr_partial < s->min_partial) {
  			/*
@@ -1476,9 +1479,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
  			 * the partial list.
  			 */
  			add_partial(n, page, 1);
-			slab_unlock(page);
  		} else {
-			slab_unlock(page);
  			stat(s, FREE_SLAB);
  			discard_slab(s, page);
  		}
@@ -1831,13 +1832,16 @@ checks_ok:
  	page->inuse--;

  	if (unlikely(PageSlubFrozen(page))) {
+		slab_unlock(page);
  		stat(s, FREE_FROZEN);
-		goto out_unlock;
+		goto out;
  	}

  	if (unlikely(!page->inuse))
  		goto slab_empty;

+	slab_unlock(page);
+
  	/*
  	 * Objects left in the slab. If it was not on the partial list before
  	 * then add it.
@@ -1846,12 +1850,11 @@ checks_ok:
  		add_partial(get_node(s, page_to_nid(page)), page, 1);
  		stat(s, FREE_ADD_PARTIAL);
  	}
-
-out_unlock:
-	slab_unlock(page);
+out:
  	return;

  slab_empty:
+	slab_unlock(page);
  	if (prior) {
  		/*
  		 * Slab still on the partial list.
@@ -1859,14 +1862,15 @@ slab_empty:
  		remove_partial(s, page);
  		stat(s, FREE_REMOVE_PARTIAL);
  	}
-	slab_unlock(page);
  	stat(s, FREE_SLAB);
  	discard_slab(s, page);
  	return;

  debug:
-	if (!free_debug_processing(s, page, x, addr))
-		goto out_unlock;
+	if (!free_debug_processing(s, page, x, addr)) {
+		slab_unlock(page);
+		goto out;
+	}
  	goto checks_ok;
  }

-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ