lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:	Mon, 23 Apr 2007 11:42:30 -0400
From:	Steven Rostedt <rostedt@...dmis.org>
To:	LKML <linux-kernel@...r.kernel.org>
Cc:	Ingo Molnar <mingo@...e.hu>,
	Peter Zijlstra <a.p.zijlstra@...llo.nl>,
	Thomas Gleixner <tglx@...utronix.de>
Subject: [PATCH -rt] trace cpu idle enter and exit

When tracing both hard irqs off, as well as preemption off, we get
ridiculous latency times caused by cpu_idle.

What is happening is that the cpu_idle code in i386 and x86_64 has
either interrupts off or preemption off.  Since it calls __schedule
instead of schedule, it can do this.

The loop looks something like this:

/* start with interrupts on but preemption off */
while (1) {
     idle();
     local_irq_disable();
     preempt_enable();
     __schedule();
     preempt_disable();
     local_irq_enable();
}

So when the system goes idle, that whole time is recorded as a latency.

This patch adds the calls of trace_preempt_enter_idle and exit.  Where
the preempt_enable will ignore interrupts disabled, and turn off/on
tracing.

Signed-off-by: Steven Rostedt <rostedt@...dmis.org>

Index: linux-2.6.21-rc6-rt0/arch/x86_64/kernel/process.c
===================================================================
--- linux-2.6.21-rc6-rt0.orig/arch/x86_64/kernel/process.c
+++ linux-2.6.21-rc6-rt0/arch/x86_64/kernel/process.c
@@ -231,9 +231,11 @@ void cpu_idle (void)
 
 		tick_nohz_restart_sched_tick();
 		local_irq_disable();
+		trace_preempt_exit_idle();
 		__preempt_enable_no_resched();
 		__schedule();
 		preempt_disable();
+		trace_preempt_enter_idle();
 		local_irq_enable();
 	}
 }
@@ -264,9 +266,10 @@ static void mwait_idle(void)
 	if (!need_resched() && !need_resched_delayed()) {
 		__monitor((void *)&current_thread_info()->flags, 0, 0);
 		smp_mb();
-		if (!need_resched() && !need_resched_delayed())
+		if (!need_resched() && !need_resched_delayed()) {
+			trace_hardirqs_on();
 			__sti_mwait(0, 0);
-		else
+		} else
 			local_irq_enable();
 	} else {
 		local_irq_enable();
Index: linux-2.6.21-rc6-rt0/include/linux/irqflags.h
===================================================================
--- linux-2.6.21-rc6-rt0.orig/include/linux/irqflags.h
+++ linux-2.6.21-rc6-rt0/include/linux/irqflags.h
@@ -22,6 +22,13 @@
   extern void trace_hardirqs_off(void);
   extern void trace_softirqs_on(unsigned long ip);
   extern void trace_softirqs_off(unsigned long ip);
+# ifdef CONFIG_CRITICAL_PREEMPT_TIMING
+   extern void trace_preempt_enter_idle(void);
+   extern void trace_preempt_exit_idle(void);
+# else
+#  define trace_preempt_enter_idle()	do { } while (0)
+#  define trace_preempt_exit_idle()	do { } while (0)
+# endif
 # define trace_hardirq_context(p)	((p)->hardirq_context)
 # define trace_softirq_context(p)	((p)->softirq_context)
 # define trace_hardirqs_enabled(p)	((p)->hardirqs_enabled)
@@ -32,6 +39,8 @@
 # define trace_softirq_exit()	do { current->softirq_context--; } while (0)
 # define INIT_TRACE_IRQFLAGS	.softirqs_enabled = 1,
 #else
+# define trace_preempt_enter_idle()	do { } while (0)
+# define trace_preempt_exit_idle()	do { } while (0)
 # define trace_hardirqs_on()		do { } while (0)
 # define trace_hardirqs_off()		do { } while (0)
 # define trace_softirqs_on(ip)		do { } while (0)
Index: linux-2.6.21-rc6-rt0/kernel/latency_trace.c
===================================================================
--- linux-2.6.21-rc6-rt0.orig/kernel/latency_trace.c
+++ linux-2.6.21-rc6-rt0/kernel/latency_trace.c
@@ -84,7 +84,8 @@ static inline int DEBUG_WARN_ON(int cond
 
 #ifdef CONFIG_CRITICAL_IRQSOFF_TIMING
 # ifdef CONFIG_CRITICAL_PREEMPT_TIMING
-#  define irqs_off_preempt_count() preempt_count()
+   static DEFINE_PER_CPU(int, trace_cpu_idle);
+#  define irqs_off_preempt_count() (!__get_cpu_var(trace_cpu_idle) && preempt_count())
 # else
 #  define irqs_off_preempt_count() 0
 # endif
@@ -2153,6 +2154,20 @@ void notrace unmask_preempt_count(unsign
 }
 EXPORT_SYMBOL(unmask_preempt_count);
 
+#ifdef CONFIG_CRITICAL_PREEMPT_TIMING
+
+/* Some archs do their cpu_idle with preemption on. Don't measure it */
+void notrace trace_preempt_enter_idle(void)
+{
+	__get_cpu_var(trace_cpu_idle) = 1;
+}
+
+void notrace trace_preempt_exit_idle(void)
+{
+	__get_cpu_var(trace_cpu_idle) = 0;
+}
+
+#endif /* CONFIG_CRITICAL_PREEMPT_TIMING */
 
 #endif
 
Index: linux-2.6.21-rc6-rt0/arch/i386/kernel/process.c
===================================================================
--- linux-2.6.21-rc6-rt0.orig/arch/i386/kernel/process.c
+++ linux-2.6.21-rc6-rt0/arch/i386/kernel/process.c
@@ -196,10 +196,12 @@ void cpu_idle(void)
 			idle();
 		}
 		local_irq_disable();
+		trace_preempt_exit_idle();
 		tick_nohz_restart_sched_tick();
 		__preempt_enable_no_resched();
 		__schedule();
 		preempt_disable();
+		trace_preempt_enter_idle();
 		local_irq_enable();
 	}
 }


-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ