[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190321075725.14054-8-duyuyang@gmail.com>
Date: Thu, 21 Mar 2019 15:57:14 +0800
From: Yuyang Du <duyuyang@...il.com>
To: peterz@...radead.org, will.deacon@....com, mingo@...nel.org
Cc: bvanassche@....org, ming.lei@...hat.com,
linux-kernel@...r.kernel.org, joe@...ches.com,
Yuyang Du <duyuyang@...il.com>
Subject: [PATCH v3 07/18] locking/lockdep: Use lockdep_init_task for task initiation consistently
Despite that there is a lockdep_init_task() which does nothing, lockdep
initiates tasks by assigning lockdep fields and does so inconsistently. Fix
this by using lockdep_init_task().
Signed-off-by: Yuyang Du <duyuyang@...il.com>
---
include/linux/lockdep.h | 7 ++++++-
init/init_task.c | 2 ++
kernel/fork.c | 3 ---
kernel/locking/lockdep.c | 11 ++++++++---
4 files changed, 16 insertions(+), 7 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 37706ad..267087e 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -282,6 +282,8 @@ struct held_lock {
extern asmlinkage void lockdep_sys_exit(void);
extern void lockdep_set_selftest_task(struct task_struct *task);
+extern inline void lockdep_init_task(struct task_struct *task);
+
extern void lockdep_off(void);
extern void lockdep_on(void);
@@ -406,6 +408,10 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
#else /* !CONFIG_LOCKDEP */
+static inline void lockdep_init_task(struct task_struct *task)
+{
+}
+
static inline void lockdep_off(void)
{
}
@@ -498,7 +504,6 @@ enum xhlock_context_t {
{ .name = (_name), .key = (void *)(_key), }
static inline void lockdep_invariant_state(bool force) {}
-static inline void lockdep_init_task(struct task_struct *task) {}
static inline void lockdep_free_task(struct task_struct *task) {}
#ifdef CONFIG_LOCK_STAT
diff --git a/init/init_task.c b/init/init_task.c
index 46dbf54..9460878 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -165,6 +165,8 @@ struct task_struct init_task
.softirqs_enabled = 1,
#endif
#ifdef CONFIG_LOCKDEP
+ .lockdep_depth = 0, /* no locks held yet */
+ .curr_chain_key = 0,
.lockdep_recursion = 0,
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/kernel/fork.c b/kernel/fork.c
index 77059b2..c0d2000 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1869,9 +1869,6 @@ static __latent_entropy struct task_struct *copy_process(
p->pagefault_disabled = 0;
#ifdef CONFIG_LOCKDEP
- p->lockdep_depth = 0; /* no locks held yet */
- p->curr_chain_key = 0;
- p->lockdep_recursion = 0;
lockdep_init_task(p);
#endif
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 05c31d6..ef59651 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -358,6 +358,13 @@ static inline u64 iterate_chain_key(u64 key, u32 idx)
return k0 | (u64)k1 << 32;
}
+inline void lockdep_init_task(struct task_struct *task)
+{
+ task->lockdep_depth = 0; /* no locks held yet */
+ task->curr_chain_key = 0;
+ task->lockdep_recursion = 0;
+}
+
void lockdep_off(void)
{
current->lockdep_recursion++;
@@ -4496,9 +4503,7 @@ void lockdep_reset(void)
int i;
raw_local_irq_save(flags);
- current->curr_chain_key = 0;
- current->lockdep_depth = 0;
- current->lockdep_recursion = 0;
+ lockdep_init_task(current);
memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
nr_hardirq_chains = 0;
nr_softirq_chains = 0;
--
1.8.3.1
Powered by blists - more mailing lists