[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZnyS8rH8ZNirufcl@Boquns-Mac-mini.home>
Date: Wed, 26 Jun 2024 15:15:14 -0700
From: Boqun Feng <boqun.feng@...il.com>
To: "Paul E. McKenney" <paulmck@...nel.org>
Cc: Carlos Llamas <cmllamas@...gle.com>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>, Will Deacon <will@...nel.org>,
Waiman Long <longman@...hat.com>,
Bart Van Assche <bvanassche@....org>,
Andrew Morton <akpm@...ux-foundation.org>,
John Stultz <jstultz@...gle.com>,
Joel Fernandes <joel@...lfernandes.org>,
linux-kernel@...r.kernel.org, kernel-team@...roid.com,
Zhiguo Niu <zhiguo.niu@...soc.com>, stable@...r.kernel.org,
Xuewen Yan <xuewen.yan@...soc.com>, Ingo Molnar <mingo@...nel.org>
Subject: Re: [PATCH v4][RESEND x4] lockdep: fix deadlock issue between
lockdep and rcu
On Tue, Jun 25, 2024 at 07:38:15AM -0700, Paul E. McKenney wrote:
> On Thu, Jun 20, 2024 at 10:54:34PM +0000, Carlos Llamas wrote:
> > From: Zhiguo Niu <zhiguo.niu@...soc.com>
> >
> > There is a deadlock scenario between lockdep and rcu when
> > rcu nocb feature is enabled, just as following call stack:
>
> I have pulled this into -rcu for further review and testing.
>
> If someone else (for example, the lockdep folks) would like to take this:
>
> Acked-by: Paul E. McKenney <paulmck@...nel.org>
>
FWIW, I add this patch and another one [1] to my tree:
git://git.kernel.org/pub/scm/linux/kernel/git/boqun/linux.git lockdep
(based on today's tip/locking/core)
I figured I have time to handle lockdep-only patches, which shouldn't
be a lot. So Ingo & Peter, I'm happy to help. If you need me to pick up
lockdep patches and send a PR against tip/master or tip/locking/core,
please let me know.
Regards,
Boqun
[1]: https://lore.kernel.org/all/20240528120008.403511-2-thorsten.blum@toblux.com/
> > rcuop/x
> > -000|queued_spin_lock_slowpath(lock = 0xFFFFFF817F2A8A80, val = ?)
> > -001|queued_spin_lock(inline) // try to hold nocb_gp_lock
> > -001|do_raw_spin_lock(lock = 0xFFFFFF817F2A8A80)
> > -002|__raw_spin_lock_irqsave(inline)
> > -002|_raw_spin_lock_irqsave(lock = 0xFFFFFF817F2A8A80)
> > -003|wake_nocb_gp_defer(inline)
> > -003|__call_rcu_nocb_wake(rdp = 0xFFFFFF817F30B680)
> > -004|__call_rcu_common(inline)
> > -004|call_rcu(head = 0xFFFFFFC082EECC28, func = ?)
> > -005|call_rcu_zapped(inline)
> > -005|free_zapped_rcu(ch = ?)// hold graph lock
> > -006|rcu_do_batch(rdp = 0xFFFFFF817F245680)
> > -007|nocb_cb_wait(inline)
> > -007|rcu_nocb_cb_kthread(arg = 0xFFFFFF817F245680)
> > -008|kthread(_create = 0xFFFFFF80803122C0)
> > -009|ret_from_fork(asm)
> >
> > rcuop/y
> > -000|queued_spin_lock_slowpath(lock = 0xFFFFFFC08291BBC8, val = 0)
> > -001|queued_spin_lock()
> > -001|lockdep_lock()
> > -001|graph_lock() // try to hold graph lock
> > -002|lookup_chain_cache_add()
> > -002|validate_chain()
> > -003|lock_acquire
> > -004|_raw_spin_lock_irqsave(lock = 0xFFFFFF817F211D80)
> > -005|lock_timer_base(inline)
> > -006|mod_timer(inline)
> > -006|wake_nocb_gp_defer(inline)// hold nocb_gp_lock
> > -006|__call_rcu_nocb_wake(rdp = 0xFFFFFF817F2A8680)
> > -007|__call_rcu_common(inline)
> > -007|call_rcu(head = 0xFFFFFFC0822E0B58, func = ?)
> > -008|call_rcu_hurry(inline)
> > -008|rcu_sync_call(inline)
> > -008|rcu_sync_func(rhp = 0xFFFFFFC0822E0B58)
> > -009|rcu_do_batch(rdp = 0xFFFFFF817F266680)
> > -010|nocb_cb_wait(inline)
> > -010|rcu_nocb_cb_kthread(arg = 0xFFFFFF817F266680)
> > -011|kthread(_create = 0xFFFFFF8080363740)
> > -012|ret_from_fork(asm)
> >
> > rcuop/x and rcuop/y are rcu nocb threads with the same nocb gp thread.
> > This patch release the graph lock before lockdep call_rcu.
> >
> > Fixes: a0b0fd53e1e6 ("locking/lockdep: Free lock classes that are no longer in use")
> > Cc: stable@...r.kernel.org
> > Cc: Boqun Feng <boqun.feng@...il.com>
> > Cc: Waiman Long <longman@...hat.com>
> > Cc: Carlos Llamas <cmllamas@...gle.com>
> > Cc: Bart Van Assche <bvanassche@....org>
> > Signed-off-by: Zhiguo Niu <zhiguo.niu@...soc.com>
> > Signed-off-by: Xuewen Yan <xuewen.yan@...soc.com>
> > Reviewed-by: Boqun Feng <boqun.feng@...il.com>
> > Reviewed-by: Waiman Long <longman@...hat.com>
> > Reviewed-by: Carlos Llamas <cmllamas@...gle.com>
> > Reviewed-by: Bart Van Assche <bvanassche@....org>
> > Signed-off-by: Carlos Llamas <cmllamas@...gle.com>
> > ---
> > kernel/locking/lockdep.c | 48 ++++++++++++++++++++++++++--------------
> > 1 file changed, 32 insertions(+), 16 deletions(-)
> >
> > diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
> > index 151bd3de5936..3468d8230e5f 100644
> > --- a/kernel/locking/lockdep.c
> > +++ b/kernel/locking/lockdep.c
> > @@ -6184,25 +6184,27 @@ static struct pending_free *get_pending_free(void)
> > static void free_zapped_rcu(struct rcu_head *cb);
> >
> > /*
> > - * Schedule an RCU callback if no RCU callback is pending. Must be called with
> > - * the graph lock held.
> > - */
> > -static void call_rcu_zapped(struct pending_free *pf)
> > +* See if we need to queue an RCU callback, must called with
> > +* the lockdep lock held, returns false if either we don't have
> > +* any pending free or the callback is already scheduled.
> > +* Otherwise, a call_rcu() must follow this function call.
> > +*/
> > +static bool prepare_call_rcu_zapped(struct pending_free *pf)
> > {
> > WARN_ON_ONCE(inside_selftest());
> >
> > if (list_empty(&pf->zapped))
> > - return;
> > + return false;
> >
> > if (delayed_free.scheduled)
> > - return;
> > + return false;
> >
> > delayed_free.scheduled = true;
> >
> > WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
> > delayed_free.index ^= 1;
> >
> > - call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
> > + return true;
> > }
> >
> > /* The caller must hold the graph lock. May be called from RCU context. */
> > @@ -6228,6 +6230,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
> > {
> > struct pending_free *pf;
> > unsigned long flags;
> > + bool need_callback;
> >
> > if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
> > return;
> > @@ -6239,14 +6242,18 @@ static void free_zapped_rcu(struct rcu_head *ch)
> > pf = delayed_free.pf + (delayed_free.index ^ 1);
> > __free_zapped_classes(pf);
> > delayed_free.scheduled = false;
> > + need_callback =
> > + prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index);
> > + lockdep_unlock();
> > + raw_local_irq_restore(flags);
> >
> > /*
> > - * If there's anything on the open list, close and start a new callback.
> > - */
> > - call_rcu_zapped(delayed_free.pf + delayed_free.index);
> > + * If there's pending free and its callback has not been scheduled,
> > + * queue an RCU callback.
> > + */
> > + if (need_callback)
> > + call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
> >
> > - lockdep_unlock();
> > - raw_local_irq_restore(flags);
> > }
> >
> > /*
> > @@ -6286,6 +6293,7 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
> > {
> > struct pending_free *pf;
> > unsigned long flags;
> > + bool need_callback;
> >
> > init_data_structures_once();
> >
> > @@ -6293,10 +6301,11 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
> > lockdep_lock();
> > pf = get_pending_free();
> > __lockdep_free_key_range(pf, start, size);
> > - call_rcu_zapped(pf);
> > + need_callback = prepare_call_rcu_zapped(pf);
> > lockdep_unlock();
> > raw_local_irq_restore(flags);
> > -
> > + if (need_callback)
> > + call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
> > /*
> > * Wait for any possible iterators from look_up_lock_class() to pass
> > * before continuing to free the memory they refer to.
> > @@ -6390,6 +6399,7 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
> > struct pending_free *pf;
> > unsigned long flags;
> > int locked;
> > + bool need_callback = false;
> >
> > raw_local_irq_save(flags);
> > locked = graph_lock();
> > @@ -6398,11 +6408,13 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
> >
> > pf = get_pending_free();
> > __lockdep_reset_lock(pf, lock);
> > - call_rcu_zapped(pf);
> > + need_callback = prepare_call_rcu_zapped(pf);
> >
> > graph_unlock();
> > out_irq:
> > raw_local_irq_restore(flags);
> > + if (need_callback)
> > + call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
> > }
> >
> > /*
> > @@ -6446,6 +6458,7 @@ void lockdep_unregister_key(struct lock_class_key *key)
> > struct pending_free *pf;
> > unsigned long flags;
> > bool found = false;
> > + bool need_callback = false;
> >
> > might_sleep();
> >
> > @@ -6466,11 +6479,14 @@ void lockdep_unregister_key(struct lock_class_key *key)
> > if (found) {
> > pf = get_pending_free();
> > __lockdep_free_key_range(pf, key, 1);
> > - call_rcu_zapped(pf);
> > + need_callback = prepare_call_rcu_zapped(pf);
> > }
> > lockdep_unlock();
> > raw_local_irq_restore(flags);
> >
> > + if (need_callback)
> > + call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
> > +
> > /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
> > synchronize_rcu();
> > }
> > --
> > 2.45.2.741.gdbec12cfda-goog
> >
Powered by blists - more mailing lists