[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <158886080725.8414.13575285074483659672.tip-bot2@tip-bot2>
Date: Thu, 07 May 2020 14:13:27 -0000
From: "tip-bot2 for Thomas Gleixner" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Thomas Gleixner <tglx@...utronix.de>,
Kyung Min Park <kyung.min.park@...el.com>,
x86 <x86@...nel.org>, LKML <linux-kernel@...r.kernel.org>
Subject: [tip: x86/timers] x86/delay: Preparatory code cleanup
The following commit has been merged into the x86/timers branch of tip:
Commit-ID: e8824890249355656968d8846908a313fe231f11
Gitweb: https://git.kernel.org/tip/e8824890249355656968d8846908a313fe231f11
Author: Thomas Gleixner <tglx@...utronix.de>
AuthorDate: Fri, 24 Apr 2020 12:37:54 -07:00
Committer: Thomas Gleixner <tglx@...utronix.de>
CommitterDate: Thu, 07 May 2020 16:06:19 +02:00
x86/delay: Preparatory code cleanup
The naming conventions in the delay code are confusing at best.
All delay variants use a loops argument and or variable which originates
from the original delay_loop() implementation. But all variants except
delay_loop() are based on TSC cycles.
Rename the argument to cycles and make it type u64 to avoid these weird
expansions to u64 in the functions.
Rename MWAITX_MAX_LOOPS to MWAITX_MAX_WAIT_CYCLES for the same reason
and fixup the comment of delay_mwaitx() as well.
Mark the delay_fn function pointer __ro_after_init and fixup the comment
for it.
No functional change and preparation for the upcoming TPAUSE based delay
variant.
[ Kyung Min Park: Added __init to use_tsc_delay() ]
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Signed-off-by: Kyung Min Park <kyung.min.park@...el.com>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Link: https://lkml.kernel.org/r/1587757076-30337-2-git-send-email-kyung.min.park@intel.com
---
arch/x86/include/asm/delay.h | 3 +-
arch/x86/include/asm/mwait.h | 2 +-
arch/x86/lib/delay.c | 45 ++++++++++++++++++-----------------
3 files changed, 27 insertions(+), 23 deletions(-)
diff --git a/arch/x86/include/asm/delay.h b/arch/x86/include/asm/delay.h
index de9e784..9aa38de 100644
--- a/arch/x86/include/asm/delay.h
+++ b/arch/x86/include/asm/delay.h
@@ -3,8 +3,9 @@
#define _ASM_X86_DELAY_H
#include <asm-generic/delay.h>
+#include <linux/init.h>
-void use_tsc_delay(void);
+void __init use_tsc_delay(void);
void use_mwaitx_delay(void);
#endif /* _ASM_X86_DELAY_H */
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index b809f11..a43b35b 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -20,7 +20,7 @@
#define MWAIT_ECX_INTERRUPT_BREAK 0x1
#define MWAITX_ECX_TIMER_ENABLE BIT(1)
-#define MWAITX_MAX_LOOPS ((u32)-1)
+#define MWAITX_MAX_WAIT_CYCLES UINT_MAX
#define MWAITX_DISABLE_CSTATES 0xf0
u32 get_umwait_control_msr(void);
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index c126571..887d52d 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -27,9 +27,19 @@
# include <asm/smp.h>
#endif
+static void delay_loop(u64 __loops);
+
+/*
+ * Calibration and selection of the delay mechanism happens only once
+ * during boot.
+ */
+static void (*delay_fn)(u64) __ro_after_init = delay_loop;
+
/* simple loop based delay: */
-static void delay_loop(unsigned long loops)
+static void delay_loop(u64 __loops)
{
+ unsigned long loops = (unsigned long)__loops;
+
asm volatile(
" test %0,%0 \n"
" jz 3f \n"
@@ -49,9 +59,9 @@ static void delay_loop(unsigned long loops)
}
/* TSC based delay: */
-static void delay_tsc(unsigned long __loops)
+static void delay_tsc(u64 cycles)
{
- u64 bclock, now, loops = __loops;
+ u64 bclock, now;
int cpu;
preempt_disable();
@@ -59,7 +69,7 @@ static void delay_tsc(unsigned long __loops)
bclock = rdtsc_ordered();
for (;;) {
now = rdtsc_ordered();
- if ((now - bclock) >= loops)
+ if ((now - bclock) >= cycles)
break;
/* Allow RT tasks to run */
@@ -77,7 +87,7 @@ static void delay_tsc(unsigned long __loops)
* counter for this CPU.
*/
if (unlikely(cpu != smp_processor_id())) {
- loops -= (now - bclock);
+ cycles -= (now - bclock);
cpu = smp_processor_id();
bclock = rdtsc_ordered();
}
@@ -87,24 +97,24 @@ static void delay_tsc(unsigned long __loops)
/*
* On some AMD platforms, MWAITX has a configurable 32-bit timer, that
- * counts with TSC frequency. The input value is the loop of the
- * counter, it will exit when the timer expires.
+ * counts with TSC frequency. The input value is the number of TSC cycles
+ * to wait. MWAITX will also exit when the timer expires.
*/
-static void delay_mwaitx(unsigned long __loops)
+static void delay_mwaitx(u64 cycles)
{
- u64 start, end, delay, loops = __loops;
+ u64 start, end, delay;
/*
* Timer value of 0 causes MWAITX to wait indefinitely, unless there
* is a store on the memory monitored by MONITORX.
*/
- if (loops == 0)
+ if (!cycles)
return;
start = rdtsc_ordered();
for (;;) {
- delay = min_t(u64, MWAITX_MAX_LOOPS, loops);
+ delay = min_t(u64, MWAITX_MAX_WAIT_CYCLES, cycles);
/*
* Use cpu_tss_rw as a cacheline-aligned, seldomly
@@ -121,22 +131,15 @@ static void delay_mwaitx(unsigned long __loops)
end = rdtsc_ordered();
- if (loops <= end - start)
+ if (cycles <= end - start)
break;
- loops -= end - start;
-
+ cycles -= end - start;
start = end;
}
}
-/*
- * Since we calibrate only once at boot, this
- * function should be set once at boot and not changed
- */
-static void (*delay_fn)(unsigned long) = delay_loop;
-
-void use_tsc_delay(void)
+void __init use_tsc_delay(void)
{
if (delay_fn == delay_loop)
delay_fn = delay_tsc;
Powered by blists - more mailing lists