[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20170713081442.GA439@worktop>
Date: Thu, 13 Jul 2017 10:14:42 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Byungchul Park <byungchul.park@....com>
Cc: mingo@...nel.org, tglx@...utronix.de, walken@...gle.com,
boqun.feng@...il.com, kirill@...temov.name,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
akpm@...ux-foundation.org, willy@...radead.org, npiggin@...il.com,
kernel-team@....com
Subject: Re: [PATCH v7 06/16] lockdep: Detect and handle hist_lock ring
buffer overwrite
On Thu, Jul 13, 2017 at 11:07:45AM +0900, Byungchul Park wrote:
> Does my approach have problems, rewinding to 'original idx' on exit and
> deciding whether overwrite or not? I think, this way, no need to do the
> drastic work. Or.. does my one get more overhead in usual case?
So I think that invalidating just the one entry doesn't work; the moment
you fill that up the iteration in commit_xhlocks() will again use the
next one etc.. even though you wanted it not to.
So we need to wipe the _entire_ history.
So I _think_ the below should work, but its not been near a compiler.
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -822,6 +822,7 @@ struct task_struct {
unsigned int xhlock_idx_soft; /* For restoring at softirq exit */
unsigned int xhlock_idx_hard; /* For restoring at hardirq exit */
unsigned int xhlock_idx_hist; /* For restoring at history boundaries */
+ unsigned int xhlock_idX_max;
#endif
#ifdef CONFIG_UBSAN
unsigned int in_ubsan;
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4746,6 +4746,14 @@ EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious
static atomic_t cross_gen_id; /* Can be wrapped */
/*
+ * make xhlock_valid() false.
+ */
+static inline void invalidate_xhlock(struct hist_lock *xhlock)
+{
+ xhlock->hlock.instance = NULL;
+}
+
+/*
* Lock history stacks; we have 3 nested lock history stacks:
*
* Hard IRQ
@@ -4764,28 +4772,58 @@ static atomic_t cross_gen_id; /* Can be
* MAX_XHLOCKS_NR ? Possibly re-instroduce hist_gen_id ?
*/
-void crossrelease_hardirq_start(void)
+static inline void __crossrelease_start(unsigned int *stamp)
{
if (current->xhlocks)
- current->xhlock_idx_hard = current->xhlock_idx;
+ *stamp = current->xhlock_idx;
+}
+
+static void __crossrelease_end(unsigned int *stamp)
+{
+ int i;
+
+ if (!current->xhlocks)
+ return;
+
+ current->xhlock_idx = *stamp;
+
+ /*
+ * If we rewind past the tail; all of history is lost.
+ */
+ if ((current->xhlock_idx_max - *stamp) < MAX_XHLOCKS_NR)
+ return;
+
+ /*
+ * Invalidate the entire history..
+ */
+ for (i = 0; i < MAX_XHLOCKS_NR; i++)
+ invalidate_xhlock(&xhlock(i));
+
+ current->xhlock_idx = 0;
+ current->xhlock_idx_hard = 0;
+ current->xhlock_idx_soft = 0;
+ current->xhlock_idx_hist = 0;
+ current->xhlock_idx_max = 0;
+}
+
+void crossrelease_hardirq_start(void)
+{
+ __crossrelease_start(¤t->xhlock_idx_hard);
}
void crossrelease_hardirq_end(void)
{
- if (current->xhlocks)
- current->xhlock_idx = current->xhlock_idx_hard;
+ __crossrelease_end(¤t->xhlock_idx_hard);
}
void crossrelease_softirq_start(void)
{
- if (current->xhlocks)
- current->xhlock_idx_soft = current->xhlock_idx;
+ __crossrelease_start(¤t->xhlock_idx_soft);
}
void crossrelease_softirq_end(void)
{
- if (current->xhlocks)
- current->xhlock_idx = current->xhlock_idx_soft;
+ __crossrelease_end(¤t->xhlock_idx_soft);
}
/*
@@ -4806,14 +4844,12 @@ void crossrelease_softirq_end(void)
*/
void crossrelease_hist_start(void)
{
- if (current->xhlocks)
- current->xhlock_idx_hist = current->xhlock_idx;
+ __crossrelease_start(¤t->xhlock_idx_hist);
}
void crossrelease_hist_end(void)
{
- if (current->xhlocks)
- current->xhlock_idx = current->xhlock_idx_hist;
+ __crossrelease_end(¤t->xhlock_idx_hist);
}
static int cross_lock(struct lockdep_map *lock)
@@ -4880,6 +4916,9 @@ static void add_xhlock(struct held_lock
unsigned int idx = ++current->xhlock_idx;
struct hist_lock *xhlock = &xhlock(idx);
+ if ((int)(current->xhlock_idx_max - idx) < 0)
+ current->xhlock_idx_max = idx;
+
#ifdef CONFIG_DEBUG_LOCKDEP
/*
* This can be done locklessly because they are all task-local
Powered by blists - more mailing lists