[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87ilcw9gaz.ffs@tglx>
Date: Sat, 13 May 2023 21:34:12 +0200
From: Thomas Gleixner <tglx@...utronix.de>
To: Ido Schimmel <idosch@...dia.com>
Cc: syzbot <syzbot+3384541342de0ca933f1@...kaller.appspotmail.com>,
linux-kernel@...r.kernel.org, syzkaller-bugs@...glegroups.com,
Peter Zijlstra <peterz@...radead.org>
Subject: Re: [syzbot] [kernel?] possible deadlock in __hrtimer_run_queues
Ido!
On Sat, May 13 2023 at 12:37, Ido Schimmel wrote:
> Thomas, I applied the fix on top of net-next (which includes
> 0af462f19e63). I'm getting the splat below during boot and then the
> systems hangs.
Because I'm a moron. I got the same splat while testing, fixed it on the
test machine and then did not sync it back before sending...
Updated fix which also replaces GFP_ATOMIC with __GFP_HIGH to prevent
the allocator to wake up the swap daemons, which causes yet another lock
inversion issue.
Thanks for testing!
tglx
---
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -124,9 +124,9 @@ static const char *obj_states[ODEBUG_STA
[ODEBUG_STATE_NOTAVAILABLE] = "not available",
};
-static void fill_pool(void)
+static void debug_objects_fill_pool(void)
{
- gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
+ gfp_t gfp = __GFP_HIGH | __GFP_NORETRY | __GFP_NOWARN;
struct debug_obj *obj;
unsigned long flags;
@@ -157,6 +157,13 @@ static void fill_pool(void)
raw_spin_unlock_irqrestore(&pool_lock, flags);
}
+ /*
+ * On RT enabled kernels the pool refill must happen in preemptible
+ * context:
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && !preemptible())
+ return;
+
if (unlikely(!obj_cache))
return;
@@ -587,16 +594,6 @@ static struct debug_obj *lookup_object_o
return NULL;
}
-static void debug_objects_fill_pool(void)
-{
- /*
- * On RT enabled kernels the pool refill must happen in preemptible
- * context:
- */
- if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
- fill_pool();
-}
-
static void
__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
{
@@ -690,13 +687,16 @@ int debug_object_activate(void *addr, co
if (!debug_objects_enabled)
return 0;
- debug_objects_fill_pool();
-
db = get_bucket((unsigned long) addr);
-
raw_spin_lock_irqsave(&db->lock, flags);
+ obj = lookup_object(addr, db);
+ if (!obj) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_objects_fill_pool();
+ raw_spin_lock_irqsave(&db->lock, flags);
+ obj = lookup_object_or_alloc(addr, db, descr, false, true);
+ }
- obj = lookup_object_or_alloc(addr, db, descr, false, true);
if (likely(!IS_ERR_OR_NULL(obj))) {
bool print_object = false;
@@ -901,13 +901,17 @@ void debug_object_assert_init(void *addr
if (!debug_objects_enabled)
return;
- debug_objects_fill_pool();
-
db = get_bucket((unsigned long) addr);
-
raw_spin_lock_irqsave(&db->lock, flags);
- obj = lookup_object_or_alloc(addr, db, descr, false, true);
+ obj = lookup_object(addr, db);
+ if (!obj) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_objects_fill_pool();
+ raw_spin_lock_irqsave(&db->lock, flags);
+ obj = lookup_object_or_alloc(addr, db, descr, false, true);
+ }
raw_spin_unlock_irqrestore(&db->lock, flags);
+
if (likely(!IS_ERR_OR_NULL(obj)))
return;
Powered by blists - more mailing lists