[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-03821f451d2d2d7599061244734245be139014ea@git.kernel.org>
Date: Thu, 19 Jul 2018 15:34:28 -0700
From: tip-bot for Pavel Tatashin <tipbot@...or.com>
To: linux-tip-commits@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, mingo@...nel.org, tglx@...utronix.de,
hpa@...or.com, pasha.tatashin@...cle.com
Subject: [tip:x86/timers] x86/tsc: Split native_calibrate_cpu() into early
and late parts
Commit-ID: 03821f451d2d2d7599061244734245be139014ea
Gitweb: https://git.kernel.org/tip/03821f451d2d2d7599061244734245be139014ea
Author: Pavel Tatashin <pasha.tatashin@...cle.com>
AuthorDate: Thu, 19 Jul 2018 16:55:44 -0400
Committer: Thomas Gleixner <tglx@...utronix.de>
CommitDate: Fri, 20 Jul 2018 00:02:44 +0200
x86/tsc: Split native_calibrate_cpu() into early and late parts
During early boot TSC and CPU frequency can be calibrated using MSR, CPUID,
and quick PIT calibration methods. The other methods PIT/HPET/PMTIMER are
available only after ACPI is initialized.
Split native_calibrate_cpu() into early and late parts so they can be
called separately during early and late tsc calibration.
Signed-off-by: Pavel Tatashin <pasha.tatashin@...cle.com>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Cc: steven.sistare@...cle.com
Cc: daniel.m.jordan@...cle.com
Cc: linux@...linux.org.uk
Cc: schwidefsky@...ibm.com
Cc: heiko.carstens@...ibm.com
Cc: john.stultz@...aro.org
Cc: sboyd@...eaurora.org
Cc: hpa@...or.com
Cc: douly.fnst@...fujitsu.com
Cc: peterz@...radead.org
Cc: prarit@...hat.com
Cc: feng.tang@...el.com
Cc: pmladek@...e.com
Cc: gnomes@...rguk.ukuu.org.uk
Cc: linux-s390@...r.kernel.org
Cc: boris.ostrovsky@...cle.com
Cc: jgross@...e.com
Cc: pbonzini@...hat.com
Link: https://lkml.kernel.org/r/20180719205545.16512-26-pasha.tatashin@oracle.com
---
arch/x86/include/asm/tsc.h | 1 +
arch/x86/kernel/tsc.c | 54 ++++++++++++++++++++++++++++++----------------
2 files changed, 37 insertions(+), 18 deletions(-)
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index c4368ff73652..88140e4f2292 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -40,6 +40,7 @@ extern int unsynchronized_tsc(void);
extern int check_tsc_unstable(void);
extern void mark_tsc_async_resets(char *reason);
extern unsigned long native_calibrate_cpu(void);
+extern unsigned long native_calibrate_cpu_early(void);
extern unsigned long native_calibrate_tsc(void);
extern unsigned long long native_sched_clock_from_tsc(u64 tsc);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 9277ae9b68b3..60586779b02c 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -680,30 +680,17 @@ static unsigned long cpu_khz_from_cpuid(void)
return eax_base_mhz * 1000;
}
-/**
- * native_calibrate_cpu - calibrate the cpu on boot
+/*
+ * calibrate cpu using pit, hpet, and ptimer methods. They are available
+ * later in boot after acpi is initialized.
*/
-unsigned long native_calibrate_cpu(void)
+static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
{
u64 tsc1, tsc2, delta, ref1, ref2;
unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
- unsigned long flags, latch, ms, fast_calibrate;
+ unsigned long flags, latch, ms;
int hpet = is_hpet_enabled(), i, loopmin;
- fast_calibrate = cpu_khz_from_cpuid();
- if (fast_calibrate)
- return fast_calibrate;
-
- fast_calibrate = cpu_khz_from_msr();
- if (fast_calibrate)
- return fast_calibrate;
-
- local_irq_save(flags);
- fast_calibrate = quick_pit_calibrate();
- local_irq_restore(flags);
- if (fast_calibrate)
- return fast_calibrate;
-
/*
* Run 5 calibration loops to get the lowest frequency value
* (the best estimate). We use two different calibration modes
@@ -846,6 +833,37 @@ unsigned long native_calibrate_cpu(void)
return tsc_pit_min;
}
+/**
+ * native_calibrate_cpu_early - can calibrate the cpu early in boot
+ */
+unsigned long native_calibrate_cpu_early(void)
+{
+ unsigned long flags, fast_calibrate = cpu_khz_from_cpuid();
+
+ if (!fast_calibrate)
+ fast_calibrate = cpu_khz_from_msr();
+ if (!fast_calibrate) {
+ local_irq_save(flags);
+ fast_calibrate = quick_pit_calibrate();
+ local_irq_restore(flags);
+ }
+ return fast_calibrate;
+}
+
+
+/**
+ * native_calibrate_cpu - calibrate the cpu
+ */
+unsigned long native_calibrate_cpu(void)
+{
+ unsigned long tsc_freq = native_calibrate_cpu_early();
+
+ if (!tsc_freq)
+ tsc_freq = pit_hpet_ptimer_calibrate_cpu();
+
+ return tsc_freq;
+}
+
void recalibrate_cpu_khz(void)
{
#ifndef CONFIG_SMP
Powered by blists - more mailing lists