[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CACT4Y+YxQ-v4Ym7QOwsa_Rm+iE-eauLq_W-rq6g=jsQCBYPdrQ@mail.gmail.com>
Date: Mon, 28 Sep 2020 10:03:19 +0200
From: Dmitry Vyukov <dvyukov@...gle.com>
To: Boqun Feng <boqun.feng@...il.com>
Cc: LKML <linux-kernel@...r.kernel.org>,
Waiman Long <longman@...hat.com>, Qian Cai <cai@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>, Will Deacon <will@...nel.org>
Subject: Re: [PATCH] lockdep: Optimize the memory usage of circular queue
On Thu, Sep 24, 2020 at 5:13 PM Boqun Feng <boqun.feng@...il.com> wrote:
>
> Ping ;-)
>
> Regards,
> Boqun
Hi Boqun,
Peter says this may also fix this issue:
https://syzkaller.appspot.com/bug?extid=62ebe501c1ce9a91f68c
please add the following tag to the patch so that the bug report will
be closed on merge:
Reported-by: syzbot+62ebe501c1ce9a91f68c@...kaller.appspotmail.com
> On Thu, Sep 17, 2020 at 04:01:50PM +0800, Boqun Feng wrote:
> > Qian Cai reported a BFS_EQUEUEFULL warning [1] after read recursive
> > deadlock detection merged into tip tree recently. Unlike the previous
> > lockep graph searching, which iterate every lock class (every node in
> > the graph) exactly once, the graph searching for read recurisve deadlock
> > detection needs to iterate every lock dependency (every edge in the
> > graph) once, as a result, the maximum memory cost of the circular queue
> > changes from O(V), where V is the number of lock classes (nodes or
> > vertices) in the graph, to O(E), where E is the number of lock
> > dependencies (edges), because every lock class or dependency gets
> > enqueued once in the BFS. Therefore we hit the BFS_EQUEUEFULL case.
> >
> > However, actually we don't need to enqueue all dependencies for the BFS,
> > because every time we enqueue a dependency, we almostly enqueue all
> > other dependencies in the same dependency list ("almostly" is because
> > we currently check before enqueue, so if a dependency doesn't pass the
> > check stage we won't enqueue it, however, we can always do in reverse
> > ordering), based on this, we can only enqueue the first dependency from
> > a dependency list and every time we want to fetch a new dependency to
> > work, we can either:
> >
> > 1) fetch the dependency next to the current dependency in the
> > dependency list
> > or
> > 2) if the dependency in 1) doesn't exist, fetch the dependency from
> > the queue.
> >
> > With this approach, the "max bfs queue depth" for a x86_64_defconfig +
> > lockdep and selftest config kernel can get descreased from:
> >
> > max bfs queue depth: 201
> >
> > to (after apply this patch)
> >
> > max bfs queue depth: 61
> >
> > While I'm at it, clean up the code logic a little (e.g. directly return
> > other than set a "ret" value and goto the "exit" label).
> >
> > [1]: https://lore.kernel.org/lkml/17343f6f7f2438fc376125384133c5ba70c2a681.camel@redhat.com/
> >
> > Reported-by: Qian Cai <cai@...hat.com>
> > Signed-off-by: Boqun Feng <boqun.feng@...il.com>
> > ---
> > kernel/locking/lockdep.c | 108 ++++++++++++++++++++++++---------------
> > 1 file changed, 67 insertions(+), 41 deletions(-)
> >
> > diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
> > index cccf4bc759c6..761c2327e9cf 100644
> > --- a/kernel/locking/lockdep.c
> > +++ b/kernel/locking/lockdep.c
> > @@ -1640,35 +1640,22 @@ static enum bfs_result __bfs(struct lock_list *source_entry,
> > int offset)
> > {
> > struct lock_list *entry;
> > - struct lock_list *lock;
> > + struct lock_list *lock = NULL;
> > struct list_head *head;
> > struct circular_queue *cq = &lock_cq;
> > - enum bfs_result ret = BFS_RNOMATCH;
> >
> > lockdep_assert_locked();
> >
> > - if (match(source_entry, data)) {
> > - *target_entry = source_entry;
> > - ret = BFS_RMATCH;
> > - goto exit;
> > - }
> > -
> > - head = get_dep_list(source_entry, offset);
> > - if (list_empty(head))
> > - goto exit;
> > -
> > __cq_init(cq);
> > __cq_enqueue(cq, source_entry);
> >
> > - while ((lock = __cq_dequeue(cq))) {
> > - bool prev_only_xr;
> > -
> > - if (!lock->class) {
> > - ret = BFS_EINVALIDNODE;
> > - goto exit;
> > - }
> > + while (lock || (lock = __cq_dequeue(cq))) {
> > + if (!lock->class)
> > + return BFS_EINVALIDNODE;
> >
> > /*
> > + * Step 1: check whether we already finish on this one.
> > + *
> > * If we have visited all the dependencies from this @lock to
> > * others (iow, if we have visited all lock_list entries in
> > * @lock->class->locks_{after,before}) we skip, otherwise go
> > @@ -1676,17 +1663,17 @@ static enum bfs_result __bfs(struct lock_list *source_entry,
> > * list accessed.
> > */
> > if (lock_accessed(lock))
> > - continue;
> > + goto next;
> > else
> > mark_lock_accessed(lock);
> >
> > - head = get_dep_list(lock, offset);
> > -
> > - prev_only_xr = lock->only_xr;
> > -
> > - list_for_each_entry_rcu(entry, head, entry) {
> > - unsigned int cq_depth;
> > - u8 dep = entry->dep;
> > + /*
> > + * Step 2: check whether prev dependency and this form a strong
> > + * dependency path.
> > + */
> > + if (lock->parent) { /* Parent exists, check prev dependency */
> > + u8 dep = lock->dep;
> > + bool prev_only_xr = lock->parent->only_xr;
> >
> > /*
> > * Mask out all -(S*)-> if we only have *R in previous
> > @@ -1698,29 +1685,68 @@ static enum bfs_result __bfs(struct lock_list *source_entry,
> >
> > /* If nothing left, we skip */
> > if (!dep)
> > - continue;
> > + goto next;
> >
> > /* If there are only -(*R)-> left, set that for the next step */
> > - entry->only_xr = !(dep & (DEP_SN_MASK | DEP_EN_MASK));
> > + lock->only_xr = !(dep & (DEP_SN_MASK | DEP_EN_MASK));
> > + }
> >
> > - visit_lock_entry(entry, lock);
> > - if (match(entry, data)) {
> > - *target_entry = entry;
> > - ret = BFS_RMATCH;
> > - goto exit;
> > - }
> > + /*
> > + * Step 3: we haven't visited this and there is a strong
> > + * dependency path to this, so check with @match.
> > + */
> > + if (match(lock, data)) {
> > + *target_entry = lock;
> > + return BFS_RMATCH;
> > + }
> > +
> > + /*
> > + * Step 4: if not match, expand the path by adding the
> > + * afterwards or backwards dependencis in the search
> > + *
> > + * Note we only enqueue the first of the list into the queue,
> > + * because we can always find a sibling dependency from one
> > + * (see label 'next'), as a result the space of queue is saved.
> > + */
> > + head = get_dep_list(lock, offset);
> > + entry = list_first_or_null_rcu(head, struct lock_list, entry);
> > + if (entry) {
> > + unsigned int cq_depth;
> > +
> > + if (__cq_enqueue(cq, entry))
> > + return BFS_EQUEUEFULL;
> >
> > - if (__cq_enqueue(cq, entry)) {
> > - ret = BFS_EQUEUEFULL;
> > - goto exit;
> > - }
> > cq_depth = __cq_get_elem_count(cq);
> > if (max_bfs_queue_depth < cq_depth)
> > max_bfs_queue_depth = cq_depth;
> > }
> > +
> > + /*
> > + * Update the ->parent, so when @entry is iterated, we know the
> > + * previous dependency.
> > + */
> > + list_for_each_entry_rcu(entry, head, entry)
> > + visit_lock_entry(entry, lock);
> > +next:
> > + /*
> > + * Step 5: fetch the next dependency to process.
> > + *
> > + * If there is a previous dependency, we fetch the sibling
> > + * dependency in the dep list of previous dependency.
> > + *
> > + * Otherwise set @lock to NULL to fetch the next entry from
> > + * queue.
> > + */
> > + if (lock->parent) {
> > + head = get_dep_list(lock->parent, offset);
> > + lock = list_next_or_null_rcu(head, &lock->entry,
> > + struct lock_list, entry);
> > + } else {
> > + lock = NULL;
> > + }
> > }
> > -exit:
> > - return ret;
> > +
> > + return BFS_RNOMATCH;
> > }
> >
> > static inline enum bfs_result
> > --
> > 2.28.0
> >
Powered by blists - more mailing lists