[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190318085733.3143-9-duyuyang@gmail.com>
Date: Mon, 18 Mar 2019 16:57:22 +0800
From: Yuyang Du <duyuyang@...il.com>
To: peterz@...radead.org, will.deacon@....com, mingo@...nel.org
Cc: bvanassche@....org, ming.lei@...hat.com,
linux-kernel@...r.kernel.org, Yuyang Du <duyuyang@...il.com>
Subject: [PATCH v2 08/19] locking/lockdep: Use lockdep_init_task for task initiation consistently
Despite that there is a lockdep_init_task() which does nothing, lockdep
initiates tasks by assigning lockdep fields and does so inconsistently. Fix
this by using lockdep_init_task().
Signed-off-by: Yuyang Du <duyuyang@...il.com>
---
include/linux/lockdep.h | 7 ++++++-
init/init_task.c | 2 ++
kernel/fork.c | 3 ---
kernel/locking/lockdep.c | 11 ++++++++---
4 files changed, 16 insertions(+), 7 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 1258a62..49b928f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -280,6 +280,8 @@ struct held_lock {
extern asmlinkage void lockdep_sys_exit(void);
extern void lockdep_set_selftest_task(struct task_struct *task);
+extern inline void lockdep_init_task(struct task_struct *task);
+
extern void lockdep_off(void);
extern void lockdep_on(void);
@@ -404,6 +406,10 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
#else /* !CONFIG_LOCKDEP */
+static inline void lockdep_init_task(struct task_struct *task)
+{
+}
+
static inline void lockdep_off(void)
{
}
@@ -496,7 +502,6 @@ enum xhlock_context_t {
{ .name = (_name), .key = (void *)(_key), }
static inline void lockdep_invariant_state(bool force) {}
-static inline void lockdep_init_task(struct task_struct *task) {}
static inline void lockdep_free_task(struct task_struct *task) {}
#ifdef CONFIG_LOCK_STAT
diff --git a/init/init_task.c b/init/init_task.c
index 46dbf54..9460878 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -165,6 +165,8 @@ struct task_struct init_task
.softirqs_enabled = 1,
#endif
#ifdef CONFIG_LOCKDEP
+ .lockdep_depth = 0, /* no locks held yet */
+ .curr_chain_key = 0,
.lockdep_recursion = 0,
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/kernel/fork.c b/kernel/fork.c
index 77059b2..c0d2000 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1869,9 +1869,6 @@ static __latent_entropy struct task_struct *copy_process(
p->pagefault_disabled = 0;
#ifdef CONFIG_LOCKDEP
- p->lockdep_depth = 0; /* no locks held yet */
- p->curr_chain_key = 0;
- p->lockdep_recursion = 0;
lockdep_init_task(p);
#endif
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 3df0a5e..737fe0a 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -358,6 +358,13 @@ static inline u64 iterate_chain_key(u64 key, u32 idx)
return k0 | (u64)k1 << 32;
}
+inline void lockdep_init_task(struct task_struct *task)
+{
+ task->lockdep_depth = 0; /* no locks held yet */
+ task->curr_chain_key = 0;
+ task->lockdep_recursion = 0;
+}
+
void lockdep_off(void)
{
current->lockdep_recursion++;
@@ -4492,9 +4499,7 @@ void lockdep_reset(void)
int i;
raw_local_irq_save(flags);
- current->curr_chain_key = 0;
- current->lockdep_depth = 0;
- current->lockdep_recursion = 0;
+ lockdep_init_task(current);
memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
nr_hardirq_chains = 0;
nr_softirq_chains = 0;
--
1.8.3.1
Powered by blists - more mailing lists