[<prev] [next>] [day] [month] [year] [list]
Message-Id: <8f93706d9e645e949e24d24c095d2f6beb5cda0f.1434087075.git.luto@kernel.org>
Date: Fri, 12 Jun 2015 16:41:35 -0700
From: Andy Lutomirski <luto@...nel.org>
To: x86@...nel.org
Cc: Borislav Petkov <bp@...e.de>,
Peter Zijlstra <peterz@...radead.org>,
John Stultz <john.stultz@...aro.org>,
linux-kernel@...r.kernel.org, Len Brown <lenb@...nel.org>,
Huang Rui <ray.huang@....com>,
Denys Vlasenko <dvlasenk@...hat.com>,
Andy Lutomirski <luto@...nel.org>
Subject: [PATCH 03/17] x86/tsc: Replace rdtscll with native_read_tsc
rdtscll() was a wrapper around native_read_tsc(). Unwrap it.
Signed-off-by: Andy Lutomirski <luto@...nel.org>
---
arch/x86/boot/compressed/aslr.c | 2 +-
arch/x86/include/asm/msr.h | 3 ---
arch/x86/include/asm/paravirt.h | 2 --
arch/x86/include/asm/tsc.h | 5 +----
arch/x86/kernel/apb_timer.c | 4 ++--
arch/x86/kernel/apic/apic.c | 8 ++++----
arch/x86/kernel/cpu/mcheck/mce.c | 4 ++--
arch/x86/kernel/espfix_64.c | 2 +-
arch/x86/kernel/hpet.c | 4 ++--
arch/x86/kernel/trace_clock.c | 2 +-
arch/x86/kernel/tsc.c | 4 ++--
arch/x86/kvm/vmx.c | 2 +-
arch/x86/lib/delay.c | 2 +-
drivers/thermal/intel_powerclamp.c | 4 ++--
tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c | 4 ++--
15 files changed, 22 insertions(+), 30 deletions(-)
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
index d7b1f655b3ef..ea33236190b1 100644
--- a/arch/x86/boot/compressed/aslr.c
+++ b/arch/x86/boot/compressed/aslr.c
@@ -82,7 +82,7 @@ static unsigned long get_random_long(void)
if (has_cpuflag(X86_FEATURE_TSC)) {
debug_putstr(" RDTSC");
- rdtscll(raw);
+ raw = native_read_tsc();
random ^= raw;
use_i8254 = false;
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 88711470af7f..8826780c80dd 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -181,9 +181,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
#define rdtscl(low) \
((low) = (u32)native_read_tsc())
-#define rdtscll(val) \
- ((val) = native_read_tsc())
-
#define rdpmc(counter, low, high) \
do { \
u64 _l = native_read_pmc((counter)); \
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index d143bfad45d7..5dd49e3f472e 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -185,8 +185,6 @@ do { \
low = (int)_l; \
} while (0)
-#define rdtscll(val) (val = paravirt_read_tsc())
-
static inline unsigned long long paravirt_sched_clock(void)
{
return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 3da1cc1218ac..b4883902948b 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -21,15 +21,12 @@ extern void disable_TSC(void);
static inline cycles_t get_cycles(void)
{
- unsigned long long ret = 0;
-
#ifndef CONFIG_X86_TSC
if (!cpu_has_tsc)
return 0;
#endif
- rdtscll(ret);
- return ret;
+ return native_read_tsc();
}
extern void tsc_init(void);
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 9fe111cc50f8..25efa534c4e4 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -263,7 +263,7 @@ static int apbt_clocksource_register(void)
/* Verify whether apbt counter works */
t1 = dw_apb_clocksource_read(clocksource_apbt);
- rdtscll(start);
+ start = native_read_tsc();
/*
* We don't know the TSC frequency yet, but waiting for
@@ -273,7 +273,7 @@ static int apbt_clocksource_register(void)
*/
do {
rep_nop();
- rdtscll(now);
+ now = native_read_tsc();
} while ((now - start) < 200000UL);
/* APBT is the only always on clocksource, it has to work! */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index dcb52850a28f..51af1ed1ae2e 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -457,7 +457,7 @@ static int lapic_next_deadline(unsigned long delta,
{
u64 tsc;
- rdtscll(tsc);
+ tsc = native_read_tsc();
wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
return 0;
}
@@ -592,7 +592,7 @@ static void __init lapic_cal_handler(struct clock_event_device *dev)
unsigned long pm = acpi_pm_read_early();
if (cpu_has_tsc)
- rdtscll(tsc);
+ tsc = native_read_tsc();
switch (lapic_cal_loops++) {
case 0:
@@ -1209,7 +1209,7 @@ void setup_local_APIC(void)
long long max_loops = cpu_khz ? cpu_khz : 1000000;
if (cpu_has_tsc)
- rdtscll(tsc);
+ tsc = native_read_tsc();
if (disable_apic) {
disable_ioapic_support();
@@ -1293,7 +1293,7 @@ void setup_local_APIC(void)
}
if (queued) {
if (cpu_has_tsc && cpu_khz) {
- rdtscll(ntsc);
+ ntsc = native_read_tsc();
max_loops = (cpu_khz << 10) - (ntsc - tsc);
} else
max_loops--;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index df919ff103c3..a5283d2d0094 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -125,7 +125,7 @@ void mce_setup(struct mce *m)
{
memset(m, 0, sizeof(struct mce));
m->cpu = m->extcpu = smp_processor_id();
- rdtscll(m->tsc);
+ m->tsc = native_read_tsc();
/* We hope get_seconds stays lockless */
m->time = get_seconds();
m->cpuvendor = boot_cpu_data.x86_vendor;
@@ -1784,7 +1784,7 @@ static void collect_tscs(void *data)
{
unsigned long *cpu_tsc = (unsigned long *)data;
- rdtscll(cpu_tsc[smp_processor_id()]);
+ cpu_tsc[smp_processor_id()] = native_read_tsc();
}
static int mce_apei_read_done;
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index f5d0730e7b08..334a2a9c034d 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -110,7 +110,7 @@ static void init_espfix_random(void)
*/
if (!arch_get_random_long(&rand)) {
/* The constant is an arbitrary large prime */
- rdtscll(rand);
+ rand = native_read_tsc();
rand *= 0xc345c6b72fd16123UL;
}
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index e2449cf38b06..ccf677cd9adc 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -734,7 +734,7 @@ static int hpet_clocksource_register(void)
/* Verify whether hpet counter works */
t1 = hpet_readl(HPET_COUNTER);
- rdtscll(start);
+ start = native_read_tsc();
/*
* We don't know the TSC frequency yet, but waiting for
@@ -744,7 +744,7 @@ static int hpet_clocksource_register(void)
*/
do {
rep_nop();
- rdtscll(now);
+ now = native_read_tsc();
} while ((now - start) < 200000UL);
if (t1 == hpet_readl(HPET_COUNTER)) {
diff --git a/arch/x86/kernel/trace_clock.c b/arch/x86/kernel/trace_clock.c
index 25b993729f9b..bd8f4d41bd56 100644
--- a/arch/x86/kernel/trace_clock.c
+++ b/arch/x86/kernel/trace_clock.c
@@ -15,7 +15,7 @@ u64 notrace trace_clock_x86_tsc(void)
u64 ret;
rdtsc_barrier();
- rdtscll(ret);
+ ret = native_read_tsc();
return ret;
}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index e7710cd7ba00..e66f5dcaeb63 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -248,7 +248,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
data = cyc2ns_write_begin(cpu);
- rdtscll(tsc_now);
+ tsc_now = native_read_tsc();
ns_now = cycles_2_ns(tsc_now);
/*
@@ -290,7 +290,7 @@ u64 native_sched_clock(void)
}
/* read the Time Stamp Counter: */
- rdtscll(tsc_now);
+ tsc_now = native_read_tsc();
/* return the value in ns */
return cycles_2_ns(tsc_now);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e11dd59398f1..fcff42100948 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2236,7 +2236,7 @@ static u64 guest_read_tsc(void)
{
u64 host_tsc, tsc_offset;
- rdtscll(host_tsc);
+ host_tsc = native_read_tsc();
tsc_offset = vmcs_read64(TSC_OFFSET);
return host_tsc + tsc_offset;
}
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index 39d6a3db0b96..9a52ad0c0758 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -100,7 +100,7 @@ void use_tsc_delay(void)
int read_current_timer(unsigned long *timer_val)
{
if (delay_fn == delay_tsc) {
- rdtscll(*timer_val);
+ *timer_val = native_read_tsc();
return 0;
}
return -1;
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 725718e97a0b..933c5e599d1d 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -340,7 +340,7 @@ static bool powerclamp_adjust_controls(unsigned int target_ratio,
/* check result for the last window */
msr_now = pkg_state_counter();
- rdtscll(tsc_now);
+ tsc_now = native_read_tsc();
/* calculate pkg cstate vs tsc ratio */
if (!msr_last || !tsc_last)
@@ -482,7 +482,7 @@ static void poll_pkg_cstate(struct work_struct *dummy)
u64 val64;
msr_now = pkg_state_counter();
- rdtscll(tsc_now);
+ tsc_now = native_read_tsc();
jiffies_now = jiffies;
/* calculate pkg cstate vs tsc ratio */
diff --git a/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c b/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c
index 5224ee5b392d..f02b0c0bff9b 100644
--- a/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c
+++ b/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c
@@ -81,11 +81,11 @@ static int __init cpufreq_test_tsc(void)
printk(KERN_DEBUG "start--> \n");
then = read_pmtmr();
- rdtscll(then_tsc);
+ then_tsc = native_read_tsc();
for (i=0;i<20;i++) {
mdelay(100);
now = read_pmtmr();
- rdtscll(now_tsc);
+ now_tsc = native_read_tsc();
diff = (now - then) & 0xFFFFFF;
diff_tsc = now_tsc - then_tsc;
printk(KERN_DEBUG "t1: %08u t2: %08u diff_pmtmr: %08u diff_tsc: %016llu\n", then, now, diff, diff_tsc);
--
2.4.2
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists