[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20170825085245.GF3858@X58A-UD3R>
Date: Fri, 25 Aug 2017 17:52:46 +0900
From: Byungchul Park <byungchul.park@....com>
To: tj@...nel.org, johannes.berg@...el.com
Cc: peterz@...radead.org, mingo@...nel.org, tglx@...utronix.de,
linux-kernel@...r.kernel.org, kernel-team@....com, oleg@...hat.com,
david@...morbit.com
Subject: Re: [RFC] workqueue: remove manual lockdep uses to detect deadlocks
On Fri, Aug 25, 2017 at 05:41:03PM +0900, Byungchul Park wrote:
> Hello all,
>
> This is _RFC_.
>
> I want to request for comments about if it's reasonable conceptually. If
> yes, I want to resend after working it more carefully.
>
> Could you let me know your opinions about this?
+cc oleg@...hat.com
+cc david@...morbit.com
> ----->8-----
> >From 448360c343477fff63df766544eec4620657a59e Mon Sep 17 00:00:00 2001
> From: Byungchul Park <byungchul.park@....com>
> Date: Fri, 25 Aug 2017 17:35:07 +0900
> Subject: [RFC] workqueue: remove manual lockdep uses to detect deadlocks
>
> We introduced the following commit to detect deadlocks caused by
> wait_for_completion() in flush_{workqueue, work}() and other locks. But
> now LOCKDEP_COMPLETIONS is introduced, such works are automatically done
> by LOCKDEP_COMPLETIONS. So it doesn't have to be done manually anymore.
> Removed it.
>
> commit 4e6045f134784f4b158b3c0f7a282b04bd816887
> workqueue: debug flushing deadlocks with lockdep
>
> Signed-off-by: Byungchul Park <byungchul.park@....com>
> ---
> include/linux/workqueue.h | 43 -------------------------------------------
> kernel/workqueue.c | 38 --------------------------------------
> 2 files changed, 81 deletions(-)
>
> diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
> index db6dc9d..91d0e14 100644
> --- a/include/linux/workqueue.h
> +++ b/include/linux/workqueue.h
> @@ -8,7 +8,6 @@
> #include <linux/timer.h>
> #include <linux/linkage.h>
> #include <linux/bitops.h>
> -#include <linux/lockdep.h>
> #include <linux/threads.h>
> #include <linux/atomic.h>
> #include <linux/cpumask.h>
> @@ -101,9 +100,6 @@ struct work_struct {
> atomic_long_t data;
> struct list_head entry;
> work_func_t func;
> -#ifdef CONFIG_LOCKDEP
> - struct lockdep_map lockdep_map;
> -#endif
> };
>
> #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
> @@ -154,23 +150,10 @@ struct execute_work {
> struct work_struct work;
> };
>
> -#ifdef CONFIG_LOCKDEP
> -/*
> - * NB: because we have to copy the lockdep_map, setting _key
> - * here is required, otherwise it could get initialised to the
> - * copy of the lockdep_map!
> - */
> -#define __WORK_INIT_LOCKDEP_MAP(n, k) \
> - .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
> -#else
> -#define __WORK_INIT_LOCKDEP_MAP(n, k)
> -#endif
> -
> #define __WORK_INITIALIZER(n, f) { \
> .data = WORK_DATA_STATIC_INIT(), \
> .entry = { &(n).entry, &(n).entry }, \
> .func = (f), \
> - __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
> }
>
> #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
> @@ -211,26 +194,13 @@ static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
> * assignment of the work data initializer allows the compiler
> * to generate better code.
> */
> -#ifdef CONFIG_LOCKDEP
> #define __INIT_WORK(_work, _func, _onstack) \
> do { \
> - static struct lock_class_key __key; \
> - \
> __init_work((_work), _onstack); \
> (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
> - lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
> INIT_LIST_HEAD(&(_work)->entry); \
> (_work)->func = (_func); \
> } while (0)
> -#else
> -#define __INIT_WORK(_work, _func, _onstack) \
> - do { \
> - __init_work((_work), _onstack); \
> - (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
> - INIT_LIST_HEAD(&(_work)->entry); \
> - (_work)->func = (_func); \
> - } while (0)
> -#endif
>
> #define INIT_WORK(_work, _func) \
> __INIT_WORK((_work), (_func), 0)
> @@ -392,22 +362,9 @@ enum {
> * RETURNS:
> * Pointer to the allocated workqueue on success, %NULL on failure.
> */
> -#ifdef CONFIG_LOCKDEP
> -#define alloc_workqueue(fmt, flags, max_active, args...) \
> -({ \
> - static struct lock_class_key __key; \
> - const char *__lock_name; \
> - \
> - __lock_name = #fmt#args; \
> - \
> - __alloc_workqueue_key((fmt), (flags), (max_active), \
> - &__key, __lock_name, ##args); \
> -})
> -#else
> #define alloc_workqueue(fmt, flags, max_active, args...) \
> __alloc_workqueue_key((fmt), (flags), (max_active), \
> NULL, NULL, ##args)
> -#endif
>
> /**
> * alloc_ordered_workqueue - allocate an ordered workqueue
> diff --git a/kernel/workqueue.c b/kernel/workqueue.c
> index f128b3b..87d4bc2 100644
> --- a/kernel/workqueue.c
> +++ b/kernel/workqueue.c
> @@ -40,7 +40,6 @@
> #include <linux/freezer.h>
> #include <linux/kallsyms.h>
> #include <linux/debug_locks.h>
> -#include <linux/lockdep.h>
> #include <linux/idr.h>
> #include <linux/jhash.h>
> #include <linux/hashtable.h>
> @@ -260,9 +259,6 @@ struct workqueue_struct {
> #ifdef CONFIG_SYSFS
> struct wq_device *wq_dev; /* I: for sysfs interface */
> #endif
> -#ifdef CONFIG_LOCKDEP
> - struct lockdep_map lockdep_map;
> -#endif
> char name[WQ_NAME_LEN]; /* I: workqueue name */
>
> /*
> @@ -2024,18 +2020,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
> bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
> int work_color;
> struct worker *collision;
> -#ifdef CONFIG_LOCKDEP
> - /*
> - * It is permissible to free the struct work_struct from
> - * inside the function that is called from it, this we need to
> - * take into account for lockdep too. To avoid bogus "held
> - * lock freed" warnings as well as problems when looking into
> - * work->lockdep_map, make a copy and use that here.
> - */
> - struct lockdep_map lockdep_map;
>
> - lockdep_copy_map(&lockdep_map, &work->lockdep_map);
> -#endif
> /* ensure we're on the correct CPU */
> WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
> raw_smp_processor_id() != pool->cpu);
> @@ -2091,8 +2076,6 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
>
> spin_unlock_irq(&pool->lock);
>
> - lock_map_acquire_read(&pwq->wq->lockdep_map);
> - lock_map_acquire(&lockdep_map);
> crossrelease_hist_start(XHLOCK_PROC);
> trace_workqueue_execute_start(work);
> worker->current_func(work);
> @@ -2102,8 +2085,6 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
> */
> trace_workqueue_execute_end(work);
> crossrelease_hist_end(XHLOCK_PROC);
> - lock_map_release(&lockdep_map);
> - lock_map_release(&pwq->wq->lockdep_map);
>
> if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
> pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
> @@ -2598,9 +2579,6 @@ void flush_workqueue(struct workqueue_struct *wq)
> if (WARN_ON(!wq_online))
> return;
>
> - lock_map_acquire(&wq->lockdep_map);
> - lock_map_release(&wq->lockdep_map);
> -
> mutex_lock(&wq->mutex);
>
> /*
> @@ -2825,18 +2803,6 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
> insert_wq_barrier(pwq, barr, work, worker);
> spin_unlock_irq(&pool->lock);
>
> - /*
> - * If @max_active is 1 or rescuer is in use, flushing another work
> - * item on the same workqueue may lead to deadlock. Make sure the
> - * flusher is not running on the same workqueue by verifying write
> - * access.
> - */
> - if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)
> - lock_map_acquire(&pwq->wq->lockdep_map);
> - else
> - lock_map_acquire_read(&pwq->wq->lockdep_map);
> - lock_map_release(&pwq->wq->lockdep_map);
> -
> return true;
> already_gone:
> spin_unlock_irq(&pool->lock);
> @@ -2861,9 +2827,6 @@ bool flush_work(struct work_struct *work)
> if (WARN_ON(!wq_online))
> return false;
>
> - lock_map_acquire(&work->lockdep_map);
> - lock_map_release(&work->lockdep_map);
> -
> if (start_flush_work(work, &barr)) {
> wait_for_completion(&barr.done);
> destroy_work_on_stack(&barr.work);
> @@ -3996,7 +3959,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
> INIT_LIST_HEAD(&wq->flusher_overflow);
> INIT_LIST_HEAD(&wq->maydays);
>
> - lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
> INIT_LIST_HEAD(&wq->list);
>
> if (alloc_and_link_pwqs(wq) < 0)
> --
> 1.9.1
Powered by blists - more mailing lists