[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <f718c935-d5b4-111d-abe1-e7a61d08237b@supermicro.com>
Date: Thu, 25 Oct 2018 12:13:26 -0700
From: patrickg <patrickg@...ermicro.com>
To: Prarit Bhargava <prarit@...hat.com>,
"Brown, Len" <len.brown@...el.com>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
CC: "mingo@...nel.org" <mingo@...nel.org>,
"Du, Alek" <alek.du@...el.com>,
"arjan@...ux.intel.com" <arjan@...ux.intel.com>,
"Tang, Feng" <feng.tang@...el.com>
Subject: Re: [RFC] x86, tsc: Add kcmdline args for skipping tsc calibration
sequences
Sorry for the delay; lkml folder sorting gone wrong.
On 10/25/18 11:01 AM, Prarit Bhargava wrote:
> Patrick can you reply back with the entire patch
Yes; watch the editor bork it even more than it originally did, though.
---Copypasta of first RFC patch---
---
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 74392d9..5a07d12 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -47,6 +47,13 @@ static DEFINE_STATIC_KEY_FALSE(__use_tsc);
int tsc_clocksource_reliable;
+/*
+ * TSC calibration sequence disablement
+ */
+int calibrate_cpuid_khz_disabled = 0;
+int calibrate_msr_disabled = 0;
+int calibrate_quick_disabled = 0;
+
static u32 art_to_tsc_numerator;
static u32 art_to_tsc_denominator;
static u64 art_to_tsc_offset;
@@ -281,6 +288,32 @@ static int __init tsc_setup(char *str)
__setup("tsc=", tsc_setup);
+static int __init setup_tsc_calibration_order(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ while (*str) {
+ if (!strncmp(str, "nocpuid", 7)) {
+ calibrate_cpuid_khz_disabled = 1;
+ pr_info("TSC CPUID khz calibrate disabled\n");
+ } else if (!strncmp(str, "nomsr", 5)) {
+ calibrate_msr_disabled = 1;
+ pr_info("TSC msr calibrate disabled\n");
+ } else if (!strncmp(str, "noquick", 7)) {
+ calibrate_quick_disabled = 1;
+ pr_info("TSC quick calibrate disabled\n");
+ }
+
+ str += strcspn(str, ",");
+ while (*str == ',')
+ str++;
+ }
+ return 1;
+}
+
+__setup("tsc_calibrate=", setup_tsc_calibration_order);
+
#define MAX_RETRIES 5
#define SMI_TRESHOLD 50000
@@ -675,19 +708,25 @@ unsigned long native_calibrate_cpu(void)
unsigned long flags, latch, ms, fast_calibrate;
int hpet = is_hpet_enabled(), i, loopmin;
- fast_calibrate = cpu_khz_from_cpuid();
- if (fast_calibrate)
- return fast_calibrate;
+ if (!calibrate_cpuid_khz_disabled) {
+ fast_calibrate = cpu_khz_from_cpuid();
+ if (fast_calibrate)
+ return fast_calibrate;
+ }
- fast_calibrate = cpu_khz_from_msr();
- if (fast_calibrate)
- return fast_calibrate;
+ if (!calibrate_msr_disabled) {
+ fast_calibrate = cpu_khz_from_msr();
+ if (fast_calibrate)
+ return fast_calibrate;
+ }
- local_irq_save(flags);
- fast_calibrate = quick_pit_calibrate();
- local_irq_restore(flags);
- if (fast_calibrate)
- return fast_calibrate;
+ if (!calibrate_quick_disabled) {
+ local_irq_save(flags);
+ fast_calibrate = quick_pit_calibrate();
+ local_irq_restore(flags);
+ if (fast_calibrate)
+ return fast_calibrate;
+ }
/*
* Run 5 calibration loops to get the lowest frequency value
---
Powered by blists - more mailing lists