lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1483466430-8028-8-git-send-email-longman@redhat.com>
Date:   Tue,  3 Jan 2017 13:00:30 -0500
From:   Waiman Long <longman@...hat.com>
To:     Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        "H. Peter Anvin" <hpa@...or.com>
Cc:     linux-kernel@...r.kernel.org, Waiman Long <longman@...hat.com>
Subject: [RFC PATCH 7/7] locking/rtqspinlock: Enable collection of event counts

This patch enables the collection of event counts in the slowpath of the
realtime queued spinlocks. The following events are being tracked if
CONFIG_QUEUED_LOCK_STAT is defined:

 - # of interrupt context RT spinnings
 - # of task context RT spinnings
 - # of nested spinlock RT spinnings
 - # of unqueue operations due to RT priority
 - # of unqueue operations due to need_resched()
 - # of CPU yieldings

Signed-off-by: Waiman Long <longman@...hat.com>
---
 arch/x86/Kconfig                | 16 ++++----
 kernel/locking/qspinlock_rt.h   | 15 +++++++-
 kernel/locking/qspinlock_stat.h | 81 ++++++++++++++++++++++++++++++++++++-----
 3 files changed, 92 insertions(+), 20 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7a97b31..e0dc3c8 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -694,6 +694,14 @@ config SCHED_OMIT_FRAME_POINTER
 
 	  If in doubt, say "Y".
 
+config QUEUED_LOCK_STAT
+	bool "Queued spinlock statistics"
+	depends on (PARAVIRT_SPINLOCKS || REALTIME_QUEUED_SPINLOCKS) && DEBUG_FS
+	---help---
+	  Enable the collection of statistical data on the slowpath
+	  behavior of paravirtualized or realtime queued spinlocks
+	  and report them on debugfs.
+
 menuconfig HYPERVISOR_GUEST
 	bool "Linux guest support"
 	---help---
@@ -734,14 +742,6 @@ config PARAVIRT_SPINLOCKS
 
 	  If you are unsure how to answer this question, answer Y.
 
-config QUEUED_LOCK_STAT
-	bool "Paravirt queued spinlock statistics"
-	depends on PARAVIRT_SPINLOCKS && DEBUG_FS
-	---help---
-	  Enable the collection of statistical data on the slowpath
-	  behavior of paravirtualized queued spinlocks and report
-	  them on debugfs.
-
 source "arch/x86/xen/Kconfig"
 
 config KVM_GUEST
diff --git a/kernel/locking/qspinlock_rt.h b/kernel/locking/qspinlock_rt.h
index 18ec1f8..f6f8498 100644
--- a/kernel/locking/qspinlock_rt.h
+++ b/kernel/locking/qspinlock_rt.h
@@ -55,6 +55,7 @@
  * inner lock, finish up its work, release the locks and reenable preemption.
  */
 #include <linux/sched.h>
+#include "qspinlock_stat.h"
 
 #ifndef MAX
 #define MAX(a, b)	(((a) >= (b)) ? (a) : (b))
@@ -157,6 +158,7 @@ static bool __rt_spin_trylock(struct qspinlock *lock,
 	if (!(prio = rt_task_priority(task, min_prio)))
 		return false;
 
+	qstat_inc_either(qstat_rt_spin_task, qstat_rt_spin_irq, task);
 
 	/*
 	 * Spin on the lock and try to set its priority into the pending byte.
@@ -237,6 +239,7 @@ static bool __rt_spin_trylock(struct qspinlock *lock,
 			if (pdprio == mypdprio)
 				cmpxchg_relaxed(&l->pending, pdprio, 0);
 		}
+		qstat_inc(qstat_rt_resched, true);
 		schedule_preempt_disabled();
 	}
 	return true;
@@ -349,6 +352,9 @@ static bool rt_wait_node_or_unqueue(struct qspinlock *lock,
 	return false;
 
 unqueue:
+	qstat_inc_either(qstat_rt_unqueue_sched, qstat_rt_unqueue_prio,
+			 need_resched());
+
 	/*
 	 * Step - A  -- stabilize @prev
 	 *
@@ -406,8 +412,10 @@ static bool rt_wait_node_or_unqueue(struct qspinlock *lock,
 	/*
 	 * Yield the CPU if needed by another task with the right condition.
 	 */
-	if (rt_should_resched())
+	if (rt_should_resched()) {
+		qstat_inc(qstat_rt_resched, true);
 		schedule_preempt_disabled();
+	}
 
 	return true;	/* Need to retry RT spinning */
 }
@@ -486,8 +494,10 @@ static u32 rt_spin_lock_or_retry(struct qspinlock *lock,
 	/*
 	 * Yield the CPU if needed by another task with the right condition.
 	 */
-	if (retry && rt_should_resched())
+	if (retry && rt_should_resched()) {
+		qstat_inc(qstat_rt_resched, true);
 		schedule_preempt_disabled();
+	}
 
 	return retry ? RT_RETRY : 1;
 }
@@ -514,6 +524,7 @@ void __lockfunc _rt_raw_spin_lock_nested(raw_spinlock_t *lock, int subclass,
 				  _RET_IP_);
 	}
 #endif
+	qstat_inc(qstat_rt_spin_nest, true);
 	__acquire(lock);
 	__rt_spin_trylock(&lock->raw_lock,
 			  outerlock ? &outerlock->raw_lock : NULL, 1);
diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h
index e852be4..d212c71 100644
--- a/kernel/locking/qspinlock_stat.h
+++ b/kernel/locking/qspinlock_stat.h
@@ -14,9 +14,10 @@
 
 /*
  * When queued spinlock statistical counters are enabled, the following
- * debugfs files will be created for reporting the counter values:
+ * debugfs files will be created under the <debugfs>/qlockstat directory
+ * for reporting the counter values:
  *
- * <debugfs>/qlockstat/
+ * PV qspinlock specific files:
  *   pv_hash_hops	- average # of hops per hashing operation
  *   pv_kick_unlock	- # of vCPU kicks issued at unlock time
  *   pv_kick_wake	- # of vCPU kicks used for computing pv_latency_wake
@@ -30,6 +31,14 @@
  *   pv_wait_head	- # of vCPU wait's at the queue head
  *   pv_wait_node	- # of vCPU wait's at a non-head queue node
  *
+ * RT qspinlock specific files:
+ *   rt_resched		- # of voluntary CPU yieldings
+ *   rt_spin_irq	- # of interrupt context RT spinnings
+ *   rt_spin_nest	- # of nested spinlock RT spinnings
+ *   rt_spin_task	- # of task context RT spinnings
+ *   rt_unqueue_prio	- # of unqueue operations due to RT priority
+ *   rt_unqueue_sched	- # of unqueue operations due to need_resched()
+ *
  * Writing to the "reset_counters" file will reset all the above counter
  * values.
  *
@@ -40,12 +49,11 @@
  *
  * There may be slight difference between pv_kick_wake and pv_kick_unlock.
  */
+#ifndef __KERNEL_LOCKING_QSPINLOCK_STAT_H
+#define __KERNEL_LOCKING_QSPINLOCK_STAT_H
+
 enum qlock_stats {
-	qstat_pv_hash_hops,
-	qstat_pv_kick_unlock,
-	qstat_pv_kick_wake,
-	qstat_pv_latency_kick,
-	qstat_pv_latency_wake,
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
 	qstat_pv_lock_slowpath,
 	qstat_pv_lock_stealing,
 	qstat_pv_spurious_wakeup,
@@ -53,6 +61,26 @@ enum qlock_stats {
 	qstat_pv_wait_early,
 	qstat_pv_wait_head,
 	qstat_pv_wait_node,
+#endif
+	/*
+	 * These enums are needed to avoid compilation error even though
+	 * they are not used when CONFIG_PARAVIRT_SPINLOCKS isn't defined.
+	 */
+	qstat_pv_hash_hops,
+	qstat_pv_latency_kick,
+	qstat_pv_latency_wake,
+	qstat_pv_kick_unlock,
+	qstat_pv_kick_wake,
+
+#ifdef CONFIG_REALTIME_QUEUED_SPINLOCKS
+	qstat_rt_resched,
+	qstat_rt_spin_irq,
+	qstat_rt_spin_nest,
+	qstat_rt_spin_task,
+	qstat_rt_unqueue_prio,
+	qstat_rt_unqueue_sched,
+#endif
+
 	qstat_num,	/* Total number of statistical counters */
 	qstat_reset_cnts = qstat_num,
 };
@@ -66,6 +94,7 @@ enum qlock_stats {
 #include <linux/fs.h>
 
 static const char * const qstat_names[qstat_num + 1] = {
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
 	[qstat_pv_hash_hops]	   = "pv_hash_hops",
 	[qstat_pv_kick_unlock]     = "pv_kick_unlock",
 	[qstat_pv_kick_wake]       = "pv_kick_wake",
@@ -78,6 +107,17 @@ enum qlock_stats {
 	[qstat_pv_wait_early]      = "pv_wait_early",
 	[qstat_pv_wait_head]       = "pv_wait_head",
 	[qstat_pv_wait_node]       = "pv_wait_node",
+#endif
+
+#ifdef CONFIG_REALTIME_QUEUED_SPINLOCKS
+	[qstat_rt_resched]         = "rt_resched",
+	[qstat_rt_spin_irq]        = "rt_spin_irq",
+	[qstat_rt_spin_nest]       = "rt_spin_nest",
+	[qstat_rt_spin_task]       = "rt_spin_task",
+	[qstat_rt_unqueue_prio]    = "rt_unqueue_prio",
+	[qstat_rt_unqueue_sched]   = "rt_unqueue_sched",
+#endif
+
 	[qstat_reset_cnts]         = "reset_counters",
 };
 
@@ -85,7 +125,9 @@ enum qlock_stats {
  * Per-cpu counters
  */
 static DEFINE_PER_CPU(unsigned long, qstats[qstat_num]);
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
 static DEFINE_PER_CPU(u64, pv_kick_time);
+#endif
 
 /*
  * Function to read and return the qlock statistical counter values
@@ -214,8 +256,8 @@ static int __init init_qspinlock_stat(void)
 	 * performance.
 	 */
 	for (i = 0; i < qstat_num; i++)
-		if (!debugfs_create_file(qstat_names[i], 0400, d_qstat,
-					 (void *)(long)i, &fops_qstat))
+		if (qstat_names[i] && !debugfs_create_file(qstat_names[i],
+				0400, d_qstat, (void *)(long)i, &fops_qstat))
 			goto fail_undo;
 
 	if (!debugfs_create_file(qstat_names[qstat_reset_cnts], 0200, d_qstat,
@@ -232,7 +274,7 @@ static int __init init_qspinlock_stat(void)
 fs_initcall(init_qspinlock_stat);
 
 /*
- * Increment the PV qspinlock statistical counters
+ * Increment the qspinlock statistical counter.
  */
 static inline void qstat_inc(enum qlock_stats stat, bool cond)
 {
@@ -241,6 +283,20 @@ static inline void qstat_inc(enum qlock_stats stat, bool cond)
 }
 
 /*
+ * Increment either one of the qspinlock statistical counters depending
+ * on the given condition.
+ */
+static inline void qstat_inc_either(enum qlock_stats true_stat,
+				    enum qlock_stats false_stat, bool cond)
+{
+	if (cond)
+		this_cpu_inc(qstats[true_stat]);
+	else
+		this_cpu_inc(qstats[false_stat]);
+}
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+/*
  * PV hash hop count
  */
 static inline void qstat_hop(int hopcnt)
@@ -279,9 +335,14 @@ static inline void __pv_wait(u8 *ptr, u8 val)
 #define pv_kick(c)	__pv_kick(c)
 #define pv_wait(p, v)	__pv_wait(p, v)
 
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
 #else /* CONFIG_QUEUED_LOCK_STAT */
 
 static inline void qstat_inc(enum qlock_stats stat, bool cond)	{ }
+static inline void qstat_inc_either(enum qlock_stats true_stat,
+		   enum qlock_stats false_stat, bool cond)	{ }
 static inline void qstat_hop(int hopcnt)			{ }
 
 #endif /* CONFIG_QUEUED_LOCK_STAT */
+#endif /* __KERNEL_LOCKING_QSPINLOCK_STAT_H */
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ