lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sun, 29 May 2022 16:15:34 +0800
From:   Rongwei Wang <rongwei.wang@...ux.alibaba.com>
To:     akpm@...ux-foundation.org, vbabka@...e.cz,
        roman.gushchin@...ux.dev, iamjoonsoo.kim@....com,
        rientjes@...gle.com, penberg@...nel.org, cl@...ux.com
Cc:     linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: [PATCH 2/3] mm/slub: improve consistency of nr_slabs count

Currently, discard_slab() can change nr_slabs count
without holding node's list_lock. This will lead some
error messages print when scanning node's partial or
full list, e.g. validate all slabs. Literally, it
affects the consistency of nr_slabs count.

Here, discard_slab() is abandoned, And dec_slabs_node()
is called before releasing node's list_lock.
dec_slabs_nodes() and free_slab() can be called separately
to ensure consistency of nr_slabs count.

Signed-off-by: Rongwei Wang <rongwei.wang@...ux.alibaba.com>
---
 mm/slub.c | 26 ++++++++++++++------------
 1 file changed, 14 insertions(+), 12 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 310e56d99116..bffb95bbb0ee 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2039,12 +2039,6 @@ static void free_slab(struct kmem_cache *s, struct slab *slab)
 		__free_slab(s, slab);
 }
 
-static void discard_slab(struct kmem_cache *s, struct slab *slab)
-{
-	dec_slabs_node(s, slab_nid(slab), slab->objects);
-	free_slab(s, slab);
-}
-
 /*
  * Management of partially allocated slabs.
  */
@@ -2413,6 +2407,7 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
 
 	if (!new.inuse && n->nr_partial >= s->min_partial) {
 		mode = M_FREE;
+		spin_lock_irqsave(&n->list_lock, flags);
 	} else if (new.freelist) {
 		mode = M_PARTIAL;
 		/*
@@ -2437,7 +2432,7 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
 				old.freelist, old.counters,
 				new.freelist, new.counters,
 				"unfreezing slab")) {
-		if (mode == M_PARTIAL || mode == M_FULL)
+		if (mode != M_FULL_NOLIST)
 			spin_unlock_irqrestore(&n->list_lock, flags);
 		goto redo;
 	}
@@ -2449,7 +2444,10 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
 		stat(s, tail);
 	} else if (mode == M_FREE) {
 		stat(s, DEACTIVATE_EMPTY);
-		discard_slab(s, slab);
+		dec_slabs_node(s, slab_nid(slab), slab->objects);
+		spin_unlock_irqrestore(&n->list_lock, flags);
+
+		free_slab(s, slab);
 		stat(s, FREE_SLAB);
 	} else if (mode == M_FULL) {
 		add_full(s, n, slab);
@@ -2502,6 +2500,7 @@ static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
 		if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
 			slab->next = slab_to_discard;
 			slab_to_discard = slab;
+			dec_slabs_node(s, slab_nid(slab), slab->objects);
 		} else {
 			add_partial(n, slab, DEACTIVATE_TO_TAIL);
 			stat(s, FREE_ADD_PARTIAL);
@@ -2516,7 +2515,7 @@ static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
 		slab_to_discard = slab_to_discard->next;
 
 		stat(s, DEACTIVATE_EMPTY);
-		discard_slab(s, slab);
+		free_slab(s, slab);
 		stat(s, FREE_SLAB);
 	}
 }
@@ -3404,9 +3403,10 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
 		remove_full(s, n, slab);
 	}
 
+	dec_slabs_node(s, slab_nid(slab), slab->objects);
 	spin_unlock_irqrestore(&n->list_lock, flags);
 	stat(s, FREE_SLAB);
-	discard_slab(s, slab);
+	free_slab(s, slab);
 }
 
 /*
@@ -4265,6 +4265,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
 		if (!slab->inuse) {
 			remove_partial(n, slab);
 			list_add(&slab->slab_list, &discard);
+			dec_slabs_node(s, slab_nid(slab), slab->objects);
 		} else {
 			list_slab_objects(s, slab,
 			  "Objects remaining in %s on __kmem_cache_shutdown()");
@@ -4273,7 +4274,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
 	spin_unlock_irq(&n->list_lock);
 
 	list_for_each_entry_safe(slab, h, &discard, slab_list)
-		discard_slab(s, slab);
+		free_slab(s, slab);
 }
 
 bool __kmem_cache_empty(struct kmem_cache *s)
@@ -4595,6 +4596,7 @@ static int __kmem_cache_do_shrink(struct kmem_cache *s)
 			if (free == slab->objects) {
 				list_move(&slab->slab_list, &discard);
 				n->nr_partial--;
+				dec_slabs_node(s, slab_nid(slab), slab->objects);
 			} else if (free <= SHRINK_PROMOTE_MAX)
 				list_move(&slab->slab_list, promote + free - 1);
 		}
@@ -4610,7 +4612,7 @@ static int __kmem_cache_do_shrink(struct kmem_cache *s)
 
 		/* Release empty slabs */
 		list_for_each_entry_safe(slab, t, &discard, slab_list)
-			discard_slab(s, slab);
+			free_slab(s, slab);
 
 		if (slabs_node(s, node))
 			ret = 1;
-- 
2.27.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ