[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1428375350-9213-16-git-send-email-Waiman.Long@hp.com>
Date: Mon, 6 Apr 2015 22:55:50 -0400
From: Waiman Long <Waiman.Long@...com>
To: Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>,
Peter Zijlstra <peterz@...radead.org>
Cc: linux-arch@...r.kernel.org, x86@...nel.org,
linux-kernel@...r.kernel.org,
virtualization@...ts.linux-foundation.org,
xen-devel@...ts.xenproject.org, kvm@...r.kernel.org,
Paolo Bonzini <paolo.bonzini@...il.com>,
Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
"Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
Rik van Riel <riel@...hat.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Raghavendra K T <raghavendra.kt@...ux.vnet.ibm.com>,
David Vrabel <david.vrabel@...rix.com>,
Oleg Nesterov <oleg@...hat.com>,
Daniel J Blueman <daniel@...ascale.com>,
Scott J Norton <scott.norton@...com>,
Douglas Hatch <doug.hatch@...com>,
Waiman Long <Waiman.Long@...com>
Subject: [PATCH v15 15/15] pvqspinlock: Add debug code to check for PV lock hash sanity
The current code for PV lock hash table processing will panic the
system if pv_hash_find() can't find the desired hash bucket. However,
there is no check to see if there is more than one entry for a given
lock which should never happen.
This patch adds a pv_hash_check_duplicate() function to do that which
will only be enabled if CONFIG_DEBUG_SPINLOCK is defined because of
the performance overhead it introduces.
Signed-off-by: Waiman Long <Waiman.Long@...com>
---
kernel/locking/qspinlock_paravirt.h | 58 +++++++++++++++++++++++++++++++++++
1 files changed, 58 insertions(+), 0 deletions(-)
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index a9fe10d..4d39c8b 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -107,6 +107,63 @@ static inline u32 hash_align(u32 hash)
}
/*
+ * Hash table debugging code
+ */
+#ifdef CONFIG_DEBUG_SPINLOCK
+
+#define _NODE_IDX(pn) ((((unsigned long)pn) & (SMP_CACHE_BYTES - 1)) /\
+ sizeof(struct mcs_spinlock))
+/*
+ * Check if there is additional hash buckets with the same lock which
+ * should not happen.
+ */
+static inline void pv_hash_check_duplicate(struct qspinlock *lock)
+{
+ struct pv_hash_bucket *hb, *end, *hb1 = NULL;
+ int count = 0, used = 0;
+
+ end = &pv_lock_hash[1 << pv_lock_hash_bits];
+ for (hb = pv_lock_hash; hb < end; hb++) {
+ struct qspinlock *l = READ_ONCE(hb->lock);
+ struct pv_node *pn;
+
+ if (l)
+ used++;
+ if (l != lock)
+ continue;
+ if (++count == 1) {
+ hb1 = hb;
+ continue;
+ }
+ WARN_ON(count == 2);
+ if (hb1) {
+ pn = READ_ONCE(hb1->node);
+ printk(KERN_ERR "PV lock hash error: duplicated entry "
+ "#%d - hash %ld, node %ld, cpu %d\n", 1,
+ hb1 - pv_lock_hash, _NODE_IDX(pn),
+ pn ? pn->cpu : -1);
+ hb1 = NULL;
+ }
+ pn = READ_ONCE(hb->node);
+ printk(KERN_ERR "PV lock hash error: duplicated entry #%d - "
+ "hash %ld, node %ld, cpu %d\n", count, hb - pv_lock_hash,
+ _NODE_IDX(pn), pn ? pn->cpu : -1);
+ }
+ /*
+ * Warn if more than half of the buckets are used
+ */
+ if (used > (1 << (pv_lock_hash_bits - 1)))
+ printk(KERN_WARNING "PV lock hash warning: "
+ "%d hash entries used!\n", used);
+}
+
+#else /* CONFIG_DEBUG_SPINLOCK */
+
+static inline void pv_hash_check_duplicate(struct qspinlock *lock) {}
+
+#endif /* CONFIG_DEBUG_SPINLOCK */
+
+/*
* Set up an entry in the lock hash table
* This is not inlined to reduce size of generated code as it is included
* twice and is used only in the slowest path of handling CPU halting.
@@ -141,6 +198,7 @@ pv_hash(struct qspinlock *lock, struct pv_node *node)
}
done:
+ pv_hash_check_duplicate(lock);
return &hb->lock;
}
--
1.7.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists