[<prev] [next>] [day] [month] [year] [list]
Message-Id: <a14c878bc8a8f14d35e1b03b0c07220368e0114f.1597634369.git.zhaoqianli@xiaomi.com>
Date: Mon, 17 Aug 2020 12:48:56 +0800
From: Qianli Zhao <zhaoqianligood@...il.com>
To: tglx@...utronix.de, axboe@...nel.dk, akpm@...ux-foundation.org,
Felix.Kuehling@....com
Cc: john.stultz@...aro.org, sboyd@...nel.org,
ben.dooks@...ethink.co.uk, bfields@...hat.com, cl@...k-chips.com,
linux-kernel@...r.kernel.org, zhaoqianli@...omi.com
Subject: [PATCH v4] kthread: Add debugobject support
From: Qianli Zhao <zhaoqianli@...omi.com>
Add debugobject support to track the life time of kthread_work
which is used to detect reinitialization/free active object problems
Add kthread_init_work_onstack()/kthread_init_delayed_work_onstack() for
kthread onstack support
If we reinitialize a kthread_work that has been activated,
this will cause delayed_work_list/work_list corruption.
enable this config,there is an chance to fixup these errors
or WARNING the wrong use of kthread_work
[30858.395766] list_del corruption. next->prev should be ffffffe388ebbf88, but was ffffffe388ebb588
[30858.395788] WARNING: CPU: 2 PID: 387 at /home/work/data/codes/build_home/kernel/msm-4.19/lib/list_debug.c:56 __list_del_entry_valid+0xc8/0xd0
...
[30858.395906] Call trace:
[30858.395909] __list_del_entry_valid+0xc8/0xd0
[30858.395912] __kthread_cancel_work_sync+0x98/0x138
[30858.395915] kthread_cancel_delayed_work_sync+0x10/0x20
[30858.395917] sde_encoder_resource_control+0xe8/0x12c0
[30858.395920] sde_encoder_prepare_for_kickoff+0x5dc/0x2008
[30858.395923] sde_crtc_commit_kickoff+0x280/0x890
[30858.395925] sde_kms_commit+0x16c/0x278
[30858.395928] complete_commit+0x3c4/0x760
[30858.395931] _msm_drm_commit_work_cb+0xec/0x1e0
[30858.395933] kthread_worker_fn+0xf8/0x190
[30858.395935] kthread+0x118/0x128
[30858.395938] ret_from_fork+0x10/0x18
crash> struct kthread_worker.delayed_work_list 0xffffffe3893925f0
[ffffffe389392620] delayed_work_list = {
next = 0xffffffe388ebbf88,
prev = 0xffffffe388ebb588
}
crash> list 0xffffffe388ebbf88
ffffffe388ebbf88
Signed-off-by: Qianli Zhao <zhaoqianli@...omi.com>
---
V4:
- Changelog update
- Add comment for KWORK_ENTRY_STATIC
- Code format modification
- Check worker availability early in kthread_flush_work
V3:
- changelog update
- add fixup_assert_init support
- move debug_kwork_activate/debug_kwork_deactivate before list operation
- name the kconfig CONFIG_DEBUG_OBJECTS_KTHREAD_WORK
- use kthread_init_work_onstack/destroy_kwork_on_stack when kthread_work used on stack
- __init_kwork before clear kthread_work in kthread_init_work
---
include/linux/kthread.h | 30 ++++++++++-
include/linux/poison.h | 4 ++
kernel/kthread.c | 135 ++++++++++++++++++++++++++++++++++++++++++++++--
lib/Kconfig.debug | 10 ++++
4 files changed, 173 insertions(+), 6 deletions(-)
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 65b81e0..706302b 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -108,6 +108,17 @@ struct kthread_delayed_work {
struct timer_list timer;
};
+#ifdef CONFIG_DEBUG_OBJECTS_KTHREAD_WORK
+extern void __init_kwork(struct kthread_work *kwork, int onstack);
+extern void destroy_kwork_on_stack(struct kthread_work *kwork);
+extern void destroy_delayed_kwork_on_stack(struct kthread_delayed_work *kdwork);
+#else
+static inline void __init_kwork(struct kthread_work *kwork, int onstack) { }
+static inline void destroy_kwork_on_stack(struct kthread_work *kwork) { }
+static inline void destroy_delayed_kwork_on_stack(struct kthread_delayed_work *kdwork) { }
+#endif
+
+
#define KTHREAD_WORKER_INIT(worker) { \
.lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \
.work_list = LIST_HEAD_INIT((worker).work_list), \
@@ -115,7 +126,7 @@ struct kthread_delayed_work {
}
#define KTHREAD_WORK_INIT(work, fn) { \
- .node = LIST_HEAD_INIT((work).node), \
+ .node = { .next = KWORK_ENTRY_STATIC }, \
.func = (fn), \
}
@@ -159,6 +170,15 @@ extern void __kthread_init_worker(struct kthread_worker *worker,
#define kthread_init_work(work, fn) \
do { \
+ __init_kwork(work, 0); \
+ memset((work), 0, sizeof(struct kthread_work)); \
+ INIT_LIST_HEAD(&(work)->node); \
+ (work)->func = (fn); \
+ } while (0)
+
+#define kthread_init_work_onstack(work, fn) \
+ do { \
+ __init_kwork(work, 1); \
memset((work), 0, sizeof(struct kthread_work)); \
INIT_LIST_HEAD(&(work)->node); \
(work)->func = (fn); \
@@ -172,6 +192,14 @@ extern void __kthread_init_worker(struct kthread_worker *worker,
TIMER_IRQSAFE); \
} while (0)
+#define kthread_init_delayed_work_onstack(dwork, fn) \
+ do { \
+ kthread_init_work_onstack(&(dwork)->work, (fn)); \
+ __init_timer_on_stack(&(dwork)->timer, \
+ kthread_delayed_work_timer_fn, \
+ TIMER_IRQSAFE); \
+ } while (0)
+
int kthread_worker_fn(void *worker_ptr);
__printf(2, 3)
diff --git a/include/linux/poison.h b/include/linux/poison.h
index df34330..d9095ca 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -86,4 +86,8 @@
/********** security/ **********/
#define KEY_DESTROY 0xbd
+/********** kernel/kthread **********/
+/*indicate a static kthread_work initializer for the object debugging code.*/
+#define KWORK_ENTRY_STATIC ((void *) 0x600 + POISON_POINTER_DELTA)
+
#endif
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 132f84a..ff73218 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -67,6 +67,118 @@ enum KTHREAD_BITS {
KTHREAD_SHOULD_PARK,
};
+#ifdef CONFIG_DEBUG_OBJECTS_KTHREAD_WORK
+static struct debug_obj_descr kwork_debug_descr;
+
+static void *kwork_debug_hint(void *addr)
+{
+ return ((struct kthread_work *) addr)->func;
+}
+
+static bool kwork_is_static_object(void *addr)
+{
+ struct kthread_work *kwork = addr;
+
+ return (kwork->node.prev == NULL &&
+ kwork->node.next == KWORK_ENTRY_STATIC);
+}
+
+static bool kwork_fixup_init(void *addr, enum debug_obj_state state)
+{
+ struct kthread_work *kwork = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ kthread_cancel_work_sync(kwork);
+ debug_object_init(kwork, &kwork_debug_descr);
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool kwork_fixup_free(void *addr, enum debug_obj_state state)
+{
+ struct kthread_work *kwork = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ kthread_cancel_work_sync(kwork);
+ debug_object_free(kwork, &kwork_debug_descr);
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void stub_kthread_work(struct kthread_work *unuse)
+{
+ WARN_ON(1);
+}
+
+static bool kwork_fixup_assert_init(void *addr, enum debug_obj_state state)
+{
+ struct kthread_work *kwork = addr;
+ switch (state) {
+ case ODEBUG_STATE_NOTAVAILABLE:
+ kthread_init_work(kwork, stub_kthread_work);
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct debug_obj_descr kwork_debug_descr = {
+ .name = "kthread_work",
+ .debug_hint = kwork_debug_hint,
+ .is_static_object = kwork_is_static_object,
+ .fixup_init = kwork_fixup_init,
+ .fixup_free = kwork_fixup_free,
+ .fixup_assert_init = kwork_fixup_assert_init,
+};
+
+static inline void debug_kwork_activate(struct kthread_work *kwork)
+{
+ debug_object_activate(kwork, &kwork_debug_descr);
+}
+
+static inline void debug_kwork_deactivate(struct kthread_work *kwork)
+{
+ debug_object_deactivate(kwork, &kwork_debug_descr);
+}
+
+static inline void debug_kwork_assert_init(struct kthread_work *kwork)
+{
+ debug_object_assert_init(kwork, &kwork_debug_descr);
+}
+
+void __init_kwork(struct kthread_work *kwork, int onstack)
+{
+ if (onstack)
+ debug_object_init_on_stack(kwork, &kwork_debug_descr);
+ else
+ debug_object_init(kwork, &kwork_debug_descr);
+}
+EXPORT_SYMBOL_GPL(__init_kwork);
+
+void destroy_kwork_on_stack(struct kthread_work *kwork)
+{
+ debug_object_free(kwork, &kwork_debug_descr);
+}
+EXPORT_SYMBOL_GPL(destroy_kwork_on_stack);
+
+void destroy_delayed_kwork_on_stack(struct kthread_delayed_work *kdwork)
+{
+ destroy_timer_on_stack(&kdwork->timer);
+ debug_object_free(&kdwork->work, &kwork_debug_descr);
+}
+EXPORT_SYMBOL_GPL(destroy_delayed_kwork_on_stack);
+#else
+static inline void debug_kwork_activate(struct kthread_work *kwork) { }
+static inline void debug_kwork_deactivate(struct kthread_work *kwork) { }
+static inline void debug_kwork_assert_init(struct kthread_work *kwork) { }
+#endif
+
static inline void set_kthread_struct(void *kthread)
{
/*
@@ -697,6 +809,7 @@ int kthread_worker_fn(void *worker_ptr)
if (!list_empty(&worker->work_list)) {
work = list_first_entry(&worker->work_list,
struct kthread_work, node);
+ debug_kwork_deactivate(work);
list_del_init(&work->node);
}
worker->current_work = work;
@@ -833,6 +946,7 @@ static void kthread_insert_work(struct kthread_worker *worker,
{
kthread_insert_work_sanity_check(worker, work);
+ debug_kwork_activate(work);
list_add_tail(&work->node, pos);
work->worker = worker;
if (!worker->current_work && likely(worker->task))
@@ -857,6 +971,7 @@ bool kthread_queue_work(struct kthread_worker *worker,
bool ret = false;
unsigned long flags;
+ debug_kwork_assert_init(work);
raw_spin_lock_irqsave(&worker->lock, flags);
if (!queuing_blocked(worker, work)) {
kthread_insert_work(worker, work, &worker->work_list);
@@ -895,6 +1010,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
/* Move the work from worker->delayed_work_list. */
WARN_ON_ONCE(list_empty(&work->node));
+ debug_kwork_deactivate(work);
list_del_init(&work->node);
kthread_insert_work(worker, work, &worker->work_list);
@@ -925,6 +1041,7 @@ static void __kthread_queue_delayed_work(struct kthread_worker *worker,
/* Be paranoid and try to detect possible races already now. */
kthread_insert_work_sanity_check(worker, work);
+ debug_kwork_activate(work);
list_add(&work->node, &worker->delayed_work_list);
work->worker = worker;
timer->expires = jiffies + delay;
@@ -954,6 +1071,7 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker,
unsigned long flags;
bool ret = false;
+ debug_kwork_assert_init(work);
raw_spin_lock_irqsave(&worker->lock, flags);
if (!queuing_blocked(worker, work)) {
@@ -987,16 +1105,17 @@ static void kthread_flush_work_fn(struct kthread_work *work)
void kthread_flush_work(struct kthread_work *work)
{
struct kthread_flush_work fwork = {
- KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
- COMPLETION_INITIALIZER_ONSTACK(fwork.done),
+ .done = COMPLETION_INITIALIZER_ONSTACK(fwork.done),
};
struct kthread_worker *worker;
bool noop = false;
+ debug_kwork_assert_init(work);
worker = work->worker;
if (!worker)
return;
+ kthread_init_work_onstack(&fwork.work, kthread_flush_work_fn);
raw_spin_lock_irq(&worker->lock);
/* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);
@@ -1013,6 +1132,7 @@ void kthread_flush_work(struct kthread_work *work)
if (!noop)
wait_for_completion(&fwork.done);
+ destroy_kwork_on_stack(&fwork.work);
}
EXPORT_SYMBOL_GPL(kthread_flush_work);
@@ -1053,6 +1173,7 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
* be from worker->work_list or from worker->delayed_work_list.
*/
if (!list_empty(&work->node)) {
+ debug_kwork_deactivate(work);
list_del_init(&work->node);
return true;
}
@@ -1091,6 +1212,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
unsigned long flags;
int ret = false;
+ debug_kwork_assert_init(work);
raw_spin_lock_irqsave(&worker->lock, flags);
/* Do not bother with canceling when never queued. */
@@ -1115,10 +1237,12 @@ EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
{
- struct kthread_worker *worker = work->worker;
+ struct kthread_worker *worker;
unsigned long flags;
int ret = false;
+ debug_kwork_assert_init(work);
+ worker = work->worker;
if (!worker)
goto out;
@@ -1194,12 +1318,13 @@ EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
void kthread_flush_worker(struct kthread_worker *worker)
{
struct kthread_flush_work fwork = {
- KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
- COMPLETION_INITIALIZER_ONSTACK(fwork.done),
+ .done = COMPLETION_INITIALIZER_ONSTACK(fwork.done),
};
+ kthread_init_work_onstack(&fwork.work, kthread_flush_work_fn);
kthread_queue_work(worker, &fwork.work);
wait_for_completion(&fwork.done);
+ destroy_kwork_on_stack(&fwork.work);
}
EXPORT_SYMBOL_GPL(kthread_flush_worker);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 9ad9210..71d6696 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -540,6 +540,16 @@ config DEBUG_OBJECTS_WORK
work queue routines to track the life time of work objects and
validate the work operations.
+config DEBUG_OBJECTS_KTHREAD_WORK
+ bool "Debug kthread work objects"
+ depends on DEBUG_OBJECTS
+ help
+ If you say Y here, additional code will be inserted into the
+ kthread routines to track the life time of kthread_work objects
+ and validate the kthread_work operations.
+
+ If unsure, say N.
+
config DEBUG_OBJECTS_RCU_HEAD
bool "Debug RCU callbacks objects"
depends on DEBUG_OBJECTS
--
2.7.4
Powered by blists - more mailing lists