[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <fdf96605-a4a0-049b-51c9-1e68cc2a9b93@supermicro.com>
Date: Fri, 13 Jul 2018 12:19:08 -0700
From: patrickg <patrickg@...ermicro.com>
To: <len.brown@...el.com>, <linux-kernel@...r.kernel.org>
CC: <mingo@...nel.org>, <alek.du@...el.com>, <arjan@...ux.intel.com>,
<feng.tang@...el.com>
Subject: [RFC] x86, tsc: Add kcmdline args for skipping tsc calibration
sequences
This RFC patch is intended to allow bypass CPUID, MSR and QuickPIT calibration methods should the user desire to.
The current ordering in ML x86 tsc is to calibrate in the order listed above; returning whenever there's a successful calibration. However there are certain BIOS/HW Designs for overclocking that cause the TSC to change along with the max core clock; and simple 'trusting' calibration methodologies will lead to the TSC running 'faster' and eventually, TSC instability.
I only know that there's a use-case for me to want to be able to skip CPUID calibration, however I included args for skipping all the rest just so that all functionality is covered in the long run instead of just one use-case.
I included some noise; in the end it's probably not too necessary to have, but it could be useful from a debugging standpoint to see if someone is utilizing the flags.
---
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 74392d9..5a07d12 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -47,6 +47,13 @@ static DEFINE_STATIC_KEY_FALSE(__use_tsc);
int tsc_clocksource_reliable;
+/*
+ * TSC calibration sequence disablement
+ */
+int calibrate_cpuid_khz_disabled = 0;
+int calibrate_msr_disabled = 0;
+int calibrate_quick_disabled = 0;
+
static u32 art_to_tsc_numerator;
static u32 art_to_tsc_denominator;
static u64 art_to_tsc_offset;
@@ -281,6 +288,32 @@ static int __init tsc_setup(char *str)
__setup("tsc=", tsc_setup);
+static int __init setup_tsc_calibration_order(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ while (*str) {
+ if (!strncmp(str, "nocpuid", 7)) {
+ calibrate_cpuid_khz_disabled = 1;
+ pr_info("TSC CPUID khz calibrate disabled\n");
+ } else if (!strncmp(str, "nomsr", 5)) {
+ calibrate_msr_disabled = 1;
+ pr_info("TSC msr calibrate disabled\n");
+ } else if (!strncmp(str, "noquick", 7)) {
+ calibrate_quick_disabled = 1;
+ pr_info("TSC quick calibrate disabled\n");
+ }
+
+ str += strcspn(str, ",");
+ while (*str == ',')
+ str++;
+ }
+ return 1;
+}
+
+__setup("tsc_calibrate=", setup_tsc_calibration_order);
+
#define MAX_RETRIES 5
#define SMI_TRESHOLD 50000
@@ -675,19 +708,25 @@ unsigned long native_calibrate_cpu(void)
unsigned long flags, latch, ms, fast_calibrate;
int hpet = is_hpet_enabled(), i, loopmin;
- fast_calibrate = cpu_khz_from_cpuid();
- if (fast_calibrate)
- return fast_calibrate;
+ if (!calibrate_cpuid_khz_disabled) {
+ fast_calibrate = cpu_khz_from_cpuid();
+ if (fast_calibrate)
+ return fast_calibrate;
+ }
- fast_calibrate = cpu_khz_from_msr();
- if (fast_calibrate)
- return fast_calibrate;
+ if (!calibrate_msr_disabled) {
+ fast_calibrate = cpu_khz_from_msr();
+ if (fast_calibrate)
+ return fast_calibrate;
+ }
- local_irq_save(flags);
- fast_calibrate = quick_pit_calibrate();
- local_irq_restore(flags);
- if (fast_calibrate)
- return fast_calibrate;
+ if (!calibrate_quick_disabled) {
+ local_irq_save(flags);
+ fast_calibrate = quick_pit_calibrate();
+ local_irq_restore(flags);
+ if (fast_calibrate)
+ return fast_calibrate;
+ }
/*
* Run 5 calibration loops to get the lowest frequency value
---
Powered by blists - more mailing lists