[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YNSMjZmuuuphg+aa@boqun-archlinux>
Date: Thu, 24 Jun 2021 21:45:49 +0800
From: Boqun Feng <boqun.feng@...il.com>
To: Xiongwei Song <sxwjean@...com>
Cc: peterz@...radead.org, mingo@...hat.com, will@...nel.org,
longman@...hat.com, linux-kernel@...r.kernel.org,
Xiongwei Song <sxwjean@...il.com>
Subject: Re: [PATCH v2 3/3] locking/lockdep: Print possible warning after
counting deps
On Fri, Jun 18, 2021 at 10:55:34PM +0800, Xiongwei Song wrote:
> From: Xiongwei Song <sxwjean@...il.com>
>
> The graph walk might hit error when counting dependencies. Once the
> return value is negative, print a warning to reminder users.
>
> However, lockdep_unlock() would be called twice if we call print_bfs_bug()
> directly in __lockdep_count_*_deps(), so as the suggestion from Boqun:
> "
> Here print_bfs_bug() will eventually call debug_locks_off_graph_unlock()
> to release the graph lock, and the caller (lockdep_count_fowards_deps())
> will also call graph_unlock() afterwards, and that means we unlock
> *twice* if a BFS error happens... although in that case, lockdep should
> stop working so messing up with the graph lock may not hurt anything,
> but still, I don't think we want to do that.
>
> So probably you can open-code __lockdep_count_forward_deps() into
> lockdep_count_forwards_deps(), and call print_bfs_bug() or
> graph_unlock() accordingly. The body of __lockdep_count_forward_deps()
> is really small, so I think it's OK to open-code it into its caller.
> "
> we put the code in __lockdep_count_*_deps() into lockdep_count_*_deps().
>
> Suggested-by: Waiman Long <longman@...hat.com>
> Suggested-by: Boqun Feng <boqun.feng@...il.com>
> Signed-off-by: Xiongwei Song <sxwjean@...il.com>
Reviewed-by: Boqun Feng <boqun.feng@...il.com>
Thanks!
Regards,
Boqun
> ---
> kernel/locking/lockdep.c | 45 +++++++++++++++++++---------------------
> 1 file changed, 21 insertions(+), 24 deletions(-)
>
> diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
> index cb94097014d8..c29453b1df50 100644
> --- a/kernel/locking/lockdep.c
> +++ b/kernel/locking/lockdep.c
> @@ -2024,55 +2024,52 @@ static bool noop_count(struct lock_list *entry, void *data)
> return false;
> }
>
> -static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
> -{
> - unsigned long count = 0;
> - struct lock_list *target_entry;
> -
> - __bfs_forwards(this, (void *)&count, noop_count, NULL, &target_entry);
> -
> - return count;
> -}
> unsigned long lockdep_count_forward_deps(struct lock_class *class)
> {
> - unsigned long ret, flags;
> + unsigned long count = 0, flags;
> struct lock_list this;
> + struct lock_list *target_entry;
> + enum bfs_result result;
>
> __bfs_init_root(&this, class);
>
> raw_local_irq_save(flags);
> lockdep_lock();
> - ret = __lockdep_count_forward_deps(&this);
> - lockdep_unlock();
> - raw_local_irq_restore(flags);
>
> - return ret;
> -}
> + result = __bfs_forwards(&this, (void *)&count, noop_count, NULL, &target_entry);
>
> -static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
> -{
> - unsigned long count = 0;
> - struct lock_list *target_entry;
> + if (bfs_error(result))
> + print_bfs_bug(result);
> + else
> + lockdep_unlock();
>
> - __bfs_backwards(this, (void *)&count, noop_count, NULL, &target_entry);
> + raw_local_irq_restore(flags);
>
> return count;
> }
>
> unsigned long lockdep_count_backward_deps(struct lock_class *class)
> {
> - unsigned long ret, flags;
> + unsigned long count = 0, flags;
> struct lock_list this;
> + struct lock_list *target_entry;
> + enum bfs_result result;
>
> __bfs_init_root(&this, class);
>
> raw_local_irq_save(flags);
> lockdep_lock();
> - ret = __lockdep_count_backward_deps(&this);
> - lockdep_unlock();
> +
> + result = __bfs_backwards(&this, (void *)&count, noop_count, NULL, &target_entry);
> +
> + if (bfs_error(result))
> + print_bfs_bug(result);
> + else
> + lockdep_unlock();
> +
> raw_local_irq_restore(flags);
>
> - return ret;
> + return count;
> }
>
> /*
> --
> 2.30.2
>
Powered by blists - more mailing lists