[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAJuCfpE+qHpOVU9JS+CEVKgdJMgZB8jG3KKq=AMFNZXCgvE69Q@mail.gmail.com>
Date: Mon, 27 Apr 2020 11:47:50 -0700
From: Suren Baghdasaryan <surenb@...gle.com>
To: Shakeel Butt <shakeelb@...gle.com>
Cc: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>, will@...nel.org,
Johannes Weiner <hannes@...xchg.org>,
LKML <linux-kernel@...r.kernel.org>,
kernel-team <kernel-team@...roid.com>
Subject: Re: lockdep warning about possible circular dependency in PSI
On Sun, Apr 26, 2020 at 5:00 PM Suren Baghdasaryan <surenb@...gle.com> wrote:
>
> On Fri, Apr 24, 2020 at 1:54 PM Shakeel Butt <shakeelb@...gle.com> wrote:
> >
> > On Fri, Apr 24, 2020 at 10:52 AM Suren Baghdasaryan <surenb@...gle.com> wrote:
> > >
> > > On Fri, Apr 24, 2020 at 10:40 AM Peter Zijlstra <peterz@...radead.org> wrote:
> > > >
> > > > On Fri, Apr 24, 2020 at 09:34:42AM -0700, Suren Baghdasaryan wrote:
> > > > > Sorry to bother you again folks. Any suggestions on how to silence
> > > > > this lockdep warning which I believe to be a false positive?
> > > > >
> > > > > On Wed, Apr 15, 2020 at 4:01 PM Suren Baghdasaryan <surenb@...gle.com> wrote:
> > > > > >
> > > > > > I received a report about possible circular locking dependency warning
> > > > > > generated from PSI polling code. I think we are protected from this
> > > > > > scenario by poll_scheduled atomic but wanted to double-check and I’m
> > > > > > looking for an advice on how to annotate this case to fix the lockdep
> > > > > > warning. I copied the detailed information at the end of this email
> > > > > > but the short story is this:
> > > > > >
> > > > > > "WARNING: possible circular locking dependency detected" is generated
> > > > > > with CONFIG_PSI and CONFIG_LOCKDEP enabled. The dependency chain it
> > > > > > describes is:
> > > > > >
> > > > > > #0
> > > > > > kthread_delayed_work_timer_fn()
> > > > > > |
> > > > > > worker->lock
> > > > > > |
> > > > > > try_to_wake_up()
> > > > > > |
> > > > > > p->pi_lock
> > > > > >
> > > > > > #1
> > > > > > sched_fork()
> > > > > > |
> > > > > > p->pi_lock
> > > > > > |
> > > > > > task_fork_fair()
> > > > > > |
> > > > > > rq->lock
> > > > > >
> > > > > > #2
> > > > > > psi_memstall_enter
> > > > > > |
> > > > > > rq->lock
> > > > > > |
> > > > > > kthread_queue_delayed_work()
> > > > > > |
> > > > > > worker->lock
> > > >
> > > > Irrespective of it actually being a deadlock or not, it is fairly
> > > > fragile. Ideally we'd fix #2, we really should minimize the number of
> > > > locks nested under rq->lock.
> > > >
> > > > That said, here's the easy fix, which breaks #0.
> > > >
> > >
> > > Thanks for the suggestion, Peter. Let me digest this and will post a
> > > patch with your Suggested-by.
> > > Cheers!
> > >
> >
> > I tested on my simple repro and the patch fixes the lockdep splat.
> >
> > You can add
> > Tested-by: Shakeel Butt <shakeelb@...gle.com>
> >
>
> Thanks Shakeel! Will do.
>
> > > > ---
> > > > diff --git a/kernel/kthread.c b/kernel/kthread.c
> > > > index bfbfa481be3a..b443bba7dd21 100644
> > > > --- a/kernel/kthread.c
> > > > +++ b/kernel/kthread.c
> > > > @@ -806,14 +806,15 @@ static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
> > > > /* insert @work before @pos in @worker */
> > > > static void kthread_insert_work(struct kthread_worker *worker,
> > > > struct kthread_work *work,
> > > > - struct list_head *pos)
> > > > + struct list_head *pos,
> > > > + struct wake_q_head *wake_q)
> > > > {
> > > > kthread_insert_work_sanity_check(worker, work);
> > > >
> > > > list_add_tail(&work->node, pos);
> > > > work->worker = worker;
> > > > if (!worker->current_work && likely(worker->task))
> > > > - wake_up_process(worker->task);
> > > > + wake_q_add(wake_q, worker->task);
> > > > }
> > > >
> > > > /**
> > > > @@ -831,15 +832,19 @@ static void kthread_insert_work(struct kthread_worker *worker,
> > > > bool kthread_queue_work(struct kthread_worker *worker,
> > > > struct kthread_work *work)
> > > > {
> > > > - bool ret = false;
> > > > + DEFINE_WAKE_Q(wake_q);
> > > > unsigned long flags;
> > > > + bool ret = false;
> > > >
> > > > raw_spin_lock_irqsave(&worker->lock, flags);
> > > > if (!queuing_blocked(worker, work)) {
> > > > - kthread_insert_work(worker, work, &worker->work_list);
> > > > + kthread_insert_work(worker, work, &worker->work_list, &wake_q);
> > > > ret = true;
> > > > }
> > > > raw_spin_unlock_irqrestore(&worker->lock, flags);
> > > > +
> > > > + wake_up_q(&wake_q);
> > > > +
> > > > return ret;
> > > > }
> > > > EXPORT_SYMBOL_GPL(kthread_queue_work);
> > > > @@ -857,6 +862,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
> > > > struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
> > > > struct kthread_work *work = &dwork->work;
> > > > struct kthread_worker *worker = work->worker;
> > > > + DEFINE_WAKE_Q(wake_q);
> > > > unsigned long flags;
> > > >
> > > > /*
> > > > @@ -873,15 +879,18 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
> > > > /* Move the work from worker->delayed_work_list. */
> > > > WARN_ON_ONCE(list_empty(&work->node));
> > > > list_del_init(&work->node);
> > > > - kthread_insert_work(worker, work, &worker->work_list);
> > > > + kthread_insert_work(worker, work, &worker->work_list, &wake_q);
> > > >
> > > > raw_spin_unlock_irqrestore(&worker->lock, flags);
> > > > +
> > > > + wake_up_q(&wake_q);
> > > > }
> > > > EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
> > > >
> > > > static void __kthread_queue_delayed_work(struct kthread_worker *worker,
> > > > struct kthread_delayed_work *dwork,
> > > > - unsigned long delay)
> > > > + unsigned long delay,
> > > > + struct wake_q_head *wake_q)
> > > > {
> > > > struct timer_list *timer = &dwork->timer;
> > > > struct kthread_work *work = &dwork->work;
> > > > @@ -895,7 +904,7 @@ static void __kthread_queue_delayed_work(struct kthread_worker *worker,
> > > > * on that there's no such delay when @delay is 0.
> > > > */
> > > > if (!delay) {
> > > > - kthread_insert_work(worker, work, &worker->work_list);
> > > > + kthread_insert_work(worker, work, &worker->work_list, wake_q);
> > > > return;
> > > > }
> > > >
> > > > @@ -928,17 +937,21 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker,
> > > > unsigned long delay)
> > > > {
> > > > struct kthread_work *work = &dwork->work;
> > > > + DEFINE_WAKE_Q(wake_q);
> > > > unsigned long flags;
> > > > bool ret = false;
> > > >
> > > > raw_spin_lock_irqsave(&worker->lock, flags);
> > > >
> > > > if (!queuing_blocked(worker, work)) {
> > > > - __kthread_queue_delayed_work(worker, dwork, delay);
> > > > + __kthread_queue_delayed_work(worker, dwork, delay, &wake_q);
> > > > ret = true;
> > > > }
> > > >
> > > > raw_spin_unlock_irqrestore(&worker->lock, flags);
> > > > +
> > > > + wake_up_q(&wake_q);
> > > > +
> > > > return ret;
> > > > }
> > > > EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
> > > > @@ -967,6 +980,7 @@ void kthread_flush_work(struct kthread_work *work)
> > > > KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
> > > > COMPLETION_INITIALIZER_ONSTACK(fwork.done),
> > > > };
> > > > + DEFINE_WAKE_Q(wake_q);
> > > > struct kthread_worker *worker;
> > > > bool noop = false;
> > > >
> > > > @@ -979,15 +993,17 @@ void kthread_flush_work(struct kthread_work *work)
> > > > WARN_ON_ONCE(work->worker != worker);
> > > >
> > > > if (!list_empty(&work->node))
> > > > - kthread_insert_work(worker, &fwork.work, work->node.next);
> > > > + kthread_insert_work(worker, &fwork.work, work->node.next, &wake_q);
> > > > else if (worker->current_work == work)
> > > > kthread_insert_work(worker, &fwork.work,
> > > > - worker->work_list.next);
> > > > + worker->work_list.next, &wake_q);
> > > > else
> > > > noop = true;
> > > >
> > > > raw_spin_unlock_irq(&worker->lock);
> > > >
> > > > + wake_up_q(&wake_q);
> > > > +
> > > > if (!noop)
> > > > wait_for_completion(&fwork.done);
> > > > }
> > > > @@ -1065,6 +1081,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
> > > > unsigned long delay)
> > > > {
> > > > struct kthread_work *work = &dwork->work;
> > > > + DEFINE_WAKE_Q(wake_q);
> > > > unsigned long flags;
> > > > int ret = false;
> > > >
> > > > @@ -1083,9 +1100,12 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
> > > >
> > > > ret = __kthread_cancel_work(work, true, &flags);
> > > > fast_queue:
> > > > - __kthread_queue_delayed_work(worker, dwork, delay);
> > > > + __kthread_queue_delayed_work(worker, dwork, delay, &wake_q);
> > > > out:
> > > > raw_spin_unlock_irqrestore(&worker->lock, flags);
> > > > +
> > > > + wake_up_q(&wake_q);
> > > > +
> > > > return ret;
> > > > }
> > > > EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
> > > >
Patch is posted at https://lkml.org/lkml/2020/4/27/985 . Had to
include linux/sched/wake_q.h and fix a long line but other than that
it's unchanged.
Thanks!
> > > > --
> > > > To unsubscribe from this group and stop receiving emails from it, send an email to kernel-team+unsubscribe@...roid.com.
> > > >
Powered by blists - more mailing lists