[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <fbf59e73-8b27-56a8-d863-cfe40457f4df@suse.cz>
Date: Tue, 10 Aug 2021 11:03:02 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: Qian Cai <quic_qiancai@...cinc.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Christoph Lameter <cl@...ux.com>,
David Rientjes <rientjes@...gle.com>,
Pekka Enberg <penberg@...nel.org>,
Joonsoo Kim <iamjoonsoo.kim@....com>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Mike Galbraith <efault@....de>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Thomas Gleixner <tglx@...utronix.de>,
Mel Gorman <mgorman@...hsingularity.net>,
Jesper Dangaard Brouer <brouer@...hat.com>,
Jann Horn <jannh@...gle.com>
Subject: Re: [PATCH v4 29/35] mm: slub: Move flush_cpu_slab() invocations
__free_slab() invocations out of IRQ context
On 8/9/21 3:41 PM, Qian Cai wrote:
>>
>> +static DEFINE_MUTEX(flush_lock);
>> +static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
>> +
>> static void flush_all(struct kmem_cache *s)
>> {
>> - on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
>> + struct slub_flush_work *sfw;
>> + unsigned int cpu;
>> +
>> + mutex_lock(&flush_lock);
>
> Vlastimil, taking the lock here could trigger a warning during memory offline/online due to the locking order:
>
> slab_mutex -> flush_lock
>
> [ 91.374541] WARNING: possible circular locking dependency detected
> [ 91.381411] 5.14.0-rc5-next-20210809+ #84 Not tainted
> [ 91.387149] ------------------------------------------------------
> [ 91.394016] lsbug/1523 is trying to acquire lock:
> [ 91.399406] ffff800018e76530 (flush_lock){+.+.}-{3:3}, at: flush_all+0x50/0x1c8
> [ 91.407425]
> but task is already holding lock:
> [ 91.414638] ffff800018e48468 (slab_mutex){+.+.}-{3:3}, at: slab_memory_callback+0x44/0x280
> [ 91.423603]
> which lock already depends on the new lock.
>
OK, managed to reproduce in qemu and this fixes it for me on top of
next-20210809. Could you test as well, as your testing might be more
comprehensive? I will format is as a fixup for the proper patch in the series then.
----8<----
>From 7ce71c7f9455e8b96dc1b728ea566b6ef5e424e4 Mon Sep 17 00:00:00 2001
From: Vlastimil Babka <vbabka@...e.cz>
Date: Tue, 10 Aug 2021 10:58:07 +0200
Subject: [PATCH] mm, slub: fix memory offline lockdep splat
Reverse order of flush_lock and cpus_read_lock() to prevent lockdep splat.
In slab_mem_going_offline_callback() we already have cpus_read_lock()
held so make sure it's not taken again.
Signed-off-by: Vlastimil Babka <vbabka@...e.cz>
---
mm/slub.c | 27 ++++++++++++++++++++-------
1 file changed, 20 insertions(+), 7 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index 88a6c3ed2751..073cdd4b020f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2640,13 +2640,13 @@ static bool has_cpu_slab(int cpu, struct kmem_cache *s)
static DEFINE_MUTEX(flush_lock);
static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
-static void flush_all(struct kmem_cache *s)
+static void flush_all_cpus_locked(struct kmem_cache *s)
{
struct slub_flush_work *sfw;
unsigned int cpu;
+ lockdep_assert_cpus_held();
mutex_lock(&flush_lock);
- cpus_read_lock();
for_each_online_cpu(cpu) {
sfw = &per_cpu(slub_flush, cpu);
@@ -2667,10 +2667,16 @@ static void flush_all(struct kmem_cache *s)
flush_work(&sfw->work);
}
- cpus_read_unlock();
mutex_unlock(&flush_lock);
}
+static void flush_all(struct kmem_cache *s)
+{
+ cpus_read_lock();
+ flush_all_cpus_locked(s);
+ cpus_read_unlock();
+}
+
/*
* Use the cpu notifier to insure that the cpu slabs are flushed when
* necessary.
@@ -4516,7 +4522,7 @@ EXPORT_SYMBOL(kfree);
* being allocated from last increasing the chance that the last objects
* are freed in them.
*/
-int __kmem_cache_shrink(struct kmem_cache *s)
+int __kmem_cache_do_shrink(struct kmem_cache *s)
{
int node;
int i;
@@ -4528,7 +4534,6 @@ int __kmem_cache_shrink(struct kmem_cache *s)
unsigned long flags;
int ret = 0;
- flush_all(s);
for_each_kmem_cache_node(s, node, n) {
INIT_LIST_HEAD(&discard);
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
@@ -4578,13 +4583,21 @@ int __kmem_cache_shrink(struct kmem_cache *s)
return ret;
}
+int __kmem_cache_shrink(struct kmem_cache *s)
+{
+ flush_all(s);
+ return __kmem_cache_do_shrink(s);
+}
+
static int slab_mem_going_offline_callback(void *arg)
{
struct kmem_cache *s;
mutex_lock(&slab_mutex);
- list_for_each_entry(s, &slab_caches, list)
- __kmem_cache_shrink(s);
+ list_for_each_entry(s, &slab_caches, list) {
+ flush_all_cpus_locked(s);
+ __kmem_cache_do_shrink(s);
+ }
mutex_unlock(&slab_mutex);
return 0;
--
2.32.0
Powered by blists - more mailing lists