lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Fri, 27 Jun 2008 18:34:12 -0300
From:	Glauber Costa <gcosta@...hat.com>
To:	linux-kernel@...r.kernel.org
Cc:	tglx@...utronix.de, mingo@...e.hu, x86@...nel.org
Subject: [PATCH 05/39] integrate delay functions

delay_32.c, delay_64.c are now equal, and are integrated into delay.c

Signed-off-by: Glauber Costa <gcosta@...hat.com>
---
 arch/x86/lib/Makefile   |    2 +-
 arch/x86/lib/delay.c    |  137 ++++++++++++++++++++++++++++++++++++++++++++++
 arch/x86/lib/delay_32.c |  138 -----------------------------------------------
 arch/x86/lib/delay_64.c |  128 -------------------------------------------
 4 files changed, 138 insertions(+), 267 deletions(-)
 create mode 100644 arch/x86/lib/delay.c
 delete mode 100644 arch/x86/lib/delay_32.c
 delete mode 100644 arch/x86/lib/delay_64.c

diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 84aa288..700c2c3 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -4,7 +4,7 @@
 
 obj-$(CONFIG_SMP) := msr-on-cpu.o
 
-lib-y := delay_$(BITS).o
+lib-y := delay.o
 lib-y += thunk_$(BITS).o
 lib-y += usercopy_$(BITS).o getuser_$(BITS).o putuser_$(BITS).o
 lib-y += memcpy_$(BITS).o
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
new file mode 100644
index 0000000..f456860
--- /dev/null
+++ b/arch/x86/lib/delay.c
@@ -0,0 +1,137 @@
+/*
+ *	Precise Delay Loops for i386
+ *
+ *	Copyright (C) 1993 Linus Torvalds
+ *	Copyright (C) 1997 Martin Mares <mj@...ey.karlin.mff.cuni.cz>
+ *	Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
+ *
+ *	The __delay function must _NOT_ be inlined as its execution time
+ *	depends wildly on alignment on many x86 processors. The additional
+ *	jump magic is needed to get the timing stable on all the CPU's
+ *	we have to worry about.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/timex.h>
+#include <linux/preempt.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <asm/processor.h>
+#include <asm/delay.h>
+#include <asm/timer.h>
+
+#ifdef CONFIG_SMP
+# include <asm/smp.h>
+#endif
+
+/* simple loop based delay: */
+static void delay_loop(unsigned long loops)
+{
+	asm volatile(
+		"	test %0,%0	\n"
+		"	jz 3f		\n"
+		"	jmp 1f		\n"
+
+		".align 16		\n"
+		"1:	jmp 2f		\n"
+
+		".align 16		\n"
+		"2:	dec %0		\n"
+		"	jnz 2b		\n"
+		"3:	dec %0		\n"
+
+		: /* we don't need output */
+		:"a" (loops)
+	);
+}
+
+/* TSC based delay: */
+static void delay_tsc(unsigned long loops)
+{
+	unsigned long bclock, now;
+	int cpu;
+
+	preempt_disable();
+	cpu = smp_processor_id();
+	rdtscl(bclock);
+	for (;;) {
+		rdtscl(now);
+		if ((now - bclock) >= loops)
+			break;
+
+		/* Allow RT tasks to run */
+		preempt_enable();
+		rep_nop();
+		preempt_disable();
+
+		/*
+		 * It is possible that we moved to another CPU, and
+		 * since TSC's are per-cpu we need to calculate
+		 * that. The delay must guarantee that we wait "at
+		 * least" the amount of time. Being moved to another
+		 * CPU could make the wait longer but we just need to
+		 * make sure we waited long enough. Rebalance the
+		 * counter for this CPU.
+		 */
+		if (unlikely(cpu != smp_processor_id())) {
+			loops -= (now - bclock);
+			cpu = smp_processor_id();
+			rdtscl(bclock);
+		}
+	}
+	preempt_enable();
+}
+
+/*
+ * Since we calibrate only once at boot, this
+ * function should be set once at boot and not changed
+ */
+static void (*delay_fn)(unsigned long) = delay_loop;
+
+void use_tsc_delay(void)
+{
+	delay_fn = delay_tsc;
+}
+
+int __devinit read_current_timer(unsigned long *timer_val)
+{
+	if (delay_fn == delay_tsc) {
+		rdtscll(*timer_val);
+		return 0;
+	}
+	return -1;
+}
+
+void __delay(unsigned long loops)
+{
+	delay_fn(loops);
+}
+EXPORT_SYMBOL(__delay);
+
+inline void __const_udelay(unsigned long xloops)
+{
+	int d0;
+
+	xloops *= 4;
+	asm("mull %%edx"
+		:"=d" (xloops), "=&a" (d0)
+		:"1" (xloops), "0"
+		(cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4)));
+
+	__delay(++xloops);
+}
+EXPORT_SYMBOL(__const_udelay);
+
+void __udelay(unsigned long usecs)
+{
+	__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
+}
+EXPORT_SYMBOL(__udelay);
+
+void __ndelay(unsigned long nsecs)
+{
+	__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
+}
+EXPORT_SYMBOL(__ndelay);
diff --git a/arch/x86/lib/delay_32.c b/arch/x86/lib/delay_32.c
deleted file mode 100644
index 0b659a3..0000000
--- a/arch/x86/lib/delay_32.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- *	Precise Delay Loops for i386
- *
- *	Copyright (C) 1993 Linus Torvalds
- *	Copyright (C) 1997 Martin Mares <mj@...ey.karlin.mff.cuni.cz>
- *	Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
- *
- *	The __delay function must _NOT_ be inlined as its execution time
- *	depends wildly on alignment on many x86 processors. The additional
- *	jump magic is needed to get the timing stable on all the CPU's
- *	we have to worry about.
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/timex.h>
-#include <linux/preempt.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-
-#include <asm/processor.h>
-#include <asm/delay.h>
-#include <asm/timer.h>
-
-#ifdef CONFIG_SMP
-# include <asm/smp.h>
-#endif
-
-/* simple loop based delay: */
-static void delay_loop(unsigned long loops)
-{
-	__asm__ __volatile__(
-		"	test %0,%0	\n"
-		"	jz 3f		\n"
-		"	jmp 1f		\n"
-
-		".align 16		\n"
-		"1:	jmp 2f		\n"
-
-		".align 16		\n"
-		"2:	dec %0		\n"
-		"	jnz 2b		\n"
-		"3:	dec %0		\n"
-
-		: /* we don't need output */
-		:"a" (loops)
-	);
-}
-
-/* TSC based delay: */
-static void delay_tsc(unsigned long loops)
-{
-	unsigned long bclock, now;
-	int cpu;
-
-	preempt_disable();
-	cpu = smp_processor_id();
-	rdtscl(bclock);
-	for (;;) {
-		rdtscl(now);
-		if ((now - bclock) >= loops)
-			break;
-
-		/* Allow RT tasks to run */
-		preempt_enable();
-		rep_nop();
-		preempt_disable();
-
-		/*
-		 * It is possible that we moved to another CPU, and
-		 * since TSC's are per-cpu we need to calculate
-		 * that. The delay must guarantee that we wait "at
-		 * least" the amount of time. Being moved to another
-		 * CPU could make the wait longer but we just need to
-		 * make sure we waited long enough. Rebalance the
-		 * counter for this CPU.
-		 */
-		if (unlikely(cpu != smp_processor_id())) {
-			loops -= (now - bclock);
-			cpu = smp_processor_id();
-			rdtscl(bclock);
-		}
-	}
-	preempt_enable();
-}
-
-/*
- * Since we calibrate only once at boot, this
- * function should be set once at boot and not changed
- */
-static void (*delay_fn)(unsigned long) = delay_loop;
-
-void use_tsc_delay(void)
-{
-	delay_fn = delay_tsc;
-}
-
-int __devinit read_current_timer(unsigned long *timer_val)
-{
-	if (delay_fn == delay_tsc) {
-		rdtscll(*timer_val);
-		return 0;
-	}
-	return -1;
-}
-
-void __delay(unsigned long loops)
-{
-	delay_fn(loops);
-}
-
-inline void __const_udelay(unsigned long xloops)
-{
-	int d0;
-
-	xloops *= 4;
-	__asm__("mull %%edx"
-		:"=d" (xloops), "=&a" (d0)
-		:"1" (xloops), "0"
-		(cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4)));
-
-	__delay(++xloops);
-}
-
-void __udelay(unsigned long usecs)
-{
-	__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
-}
-
-void __ndelay(unsigned long nsecs)
-{
-	__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
-}
-
-EXPORT_SYMBOL(__delay);
-EXPORT_SYMBOL(__const_udelay);
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__ndelay);
diff --git a/arch/x86/lib/delay_64.c b/arch/x86/lib/delay_64.c
deleted file mode 100644
index ff3dfec..0000000
--- a/arch/x86/lib/delay_64.c
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- *	Precise Delay Loops for x86-64
- *
- *	Copyright (C) 1993 Linus Torvalds
- *	Copyright (C) 1997 Martin Mares <mj@...ey.karlin.mff.cuni.cz>
- *
- *	The __delay function must _NOT_ be inlined as its execution time
- *	depends wildly on alignment on many x86 processors. 
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/timex.h>
-#include <linux/preempt.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-
-#include <asm/delay.h>
-#include <asm/msr.h>
-
-#ifdef CONFIG_SMP
-#include <asm/smp.h>
-#endif
-
-/* simple loop based delay: */
-static void delay_loop(unsigned long loops)
-{
-	asm volatile(
-		"	test %0,%0	\n"
-		"	jz 3f		\n"
-		"	jmp 1f		\n"
-
-		".align 16		\n"
-		"1:	jmp 2f		\n"
-
-		".align 16		\n"
-		"2:	dec %0		\n"
-		"	jnz 2b		\n"
-		"3:	dec %0		\n"
-
-		: /* we don't need output */
-		:"a" (loops)
-	);
-}
-
-static void delay_tsc(unsigned long loops)
-{
-	unsigned bclock, now;
-	int cpu;
-
-	preempt_disable();
-	cpu = smp_processor_id();
-	rdtscl(bclock);
-	for (;;) {
-		rdtscl(now);
-		if ((now - bclock) >= loops)
-			break;
-
-		/* Allow RT tasks to run */
-		preempt_enable();
-		rep_nop();
-		preempt_disable();
-
-		/*
-		 * It is possible that we moved to another CPU, and
-		 * since TSC's are per-cpu we need to calculate
-		 * that. The delay must guarantee that we wait "at
-		 * least" the amount of time. Being moved to another
-		 * CPU could make the wait longer but we just need to
-		 * make sure we waited long enough. Rebalance the
-		 * counter for this CPU.
-		 */
-		if (unlikely(cpu != smp_processor_id())) {
-			loops -= (now - bclock);
-			cpu = smp_processor_id();
-			rdtscl(bclock);
-		}
-	}
-	preempt_enable();
-}
-
-static void (*delay_fn)(unsigned long) = delay_loop;
-
-void use_tsc_delay(void)
-{
-	delay_fn = delay_tsc;
-}
-
-int __devinit read_current_timer(unsigned long *timer_value)
-{
-	if (delay_fn == delay_tsc) {
-		rdtscll(*timer_value);
-		return 0;
-	}
-	return -1;
-}
-
-void __delay(unsigned long loops)
-{
-	delay_fn(loops);
-}
-EXPORT_SYMBOL(__delay);
-
-inline void __const_udelay(unsigned long xloops)
-{
-	int d0;
-	xloops *= 4;
-	__asm__("mull %%edx"
-		:"=d" (xloops), "=&a" (d0)
-		:"1" (xloops), "0"
-		(cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4)));
-
-	__delay(++xloops);
-}
-
-EXPORT_SYMBOL(__const_udelay);
-
-void __udelay(unsigned long usecs)
-{
-	__const_udelay(usecs * 0x000010c7);  /* 2**32 / 1000000 (rounded up) */
-}
-EXPORT_SYMBOL(__udelay);
-
-void __ndelay(unsigned long nsecs)
-{
-	__const_udelay(nsecs * 0x00005);  /* 2**32 / 1000000000 (rounded up) */
-}
-EXPORT_SYMBOL(__ndelay);
-- 
1.5.5.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ