[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <1426811369-24565-2-git-send-email-andi@firstfloor.org>
Date: Thu, 19 Mar 2015 17:29:27 -0700
From: Andi Kleen <andi@...stfloor.org>
To: x86@...nel.org
Cc: linux-kernel@...r.kernel.org, peterz@...radead.org,
Andi Kleen <ak@...ux.intel.com>
Subject: [PATCH 1/3] x86: Move msr accesses out of line
From: Andi Kleen <ak@...ux.intel.com>
To add trace points to msr accesses we need to include
linux/tracepoint.h. Unfortunately this causes hellish include loops
when with the msr inlines in asm/msr.h, which are included all over.
I tried to fix several of them, but eventually gave up.
This patch moves the MSR functions out of line. A MSR access is typically
40-100 cycles or even slower, a call is a few cycles at best, so the
additional function call is not really significant.
Kernel text size is neutral:
11852945 1671656 1822720 15347321 ea2e79 vmlinux-no-msr
11852969 1671656 1822720 15347345 ea2e91 vmlinux-msr
As requested, some benchmarking on the difference to inline MSR (including
the trace points from the next patch):
The absolute differences are fairly low, 6-8 cycles for out of line +
trace point. 6-7% on Haswell. On Avoton the percentages are higher
because the base costs are lower, but the absolute cycle deltas are
very low too and in the same range.
I think it's reasonable to spend 6-8 cycles/call for much better
debuggability. In fact looking at the traces already exposed a number
of optimization possibilities for optimizing away unnecessary
accesses, that should give much higher gains.
haswell:
136 cycles ool wrmsr
128 cycles inline wrmsr 6%
90 cycles ool rdmsr
84 cycles inline rdmsr 7%
avoton:
68 cycles ool wrmsr
54 cycles inline wrmsr 20%
60 cycles ool rdmsr
44 cycles inline rdmsr 26%
Signed-off-by: Andi Kleen <ak@...ux.intel.com>
---
arch/x86/include/asm/msr.h | 51 ++++----------------------------------------
arch/x86/lib/msr.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 57 insertions(+), 47 deletions(-)
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index de36f22..99d6864 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -57,53 +57,10 @@ static inline unsigned long long native_read_tscp(unsigned int *aux)
#define EAX_EDX_RET(val, low, high) "=A" (val)
#endif
-static inline unsigned long long native_read_msr(unsigned int msr)
-{
- DECLARE_ARGS(val, low, high);
-
- asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
- return EAX_EDX_VAL(val, low, high);
-}
-
-static inline unsigned long long native_read_msr_safe(unsigned int msr,
- int *err)
-{
- DECLARE_ARGS(val, low, high);
-
- asm volatile("2: rdmsr ; xor %[err],%[err]\n"
- "1:\n\t"
- ".section .fixup,\"ax\"\n\t"
- "3: mov %[fault],%[err] ; jmp 1b\n\t"
- ".previous\n\t"
- _ASM_EXTABLE(2b, 3b)
- : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
- : "c" (msr), [fault] "i" (-EIO));
- return EAX_EDX_VAL(val, low, high);
-}
-
-static inline void native_write_msr(unsigned int msr,
- unsigned low, unsigned high)
-{
- asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
-}
-
-/* Can be uninlined because referenced by paravirt */
-notrace static inline int native_write_msr_safe(unsigned int msr,
- unsigned low, unsigned high)
-{
- int err;
- asm volatile("2: wrmsr ; xor %[err],%[err]\n"
- "1:\n\t"
- ".section .fixup,\"ax\"\n\t"
- "3: mov %[fault],%[err] ; jmp 1b\n\t"
- ".previous\n\t"
- _ASM_EXTABLE(2b, 3b)
- : [err] "=a" (err)
- : "c" (msr), "0" (low), "d" (high),
- [fault] "i" (-EIO)
- : "memory");
- return err;
-}
+extern unsigned long long native_read_msr(unsigned int msr);
+extern unsigned long long native_read_msr_safe(unsigned int msr, int *err);
+extern int native_write_msr_safe(unsigned int msr, unsigned low, unsigned high);
+extern void native_write_msr(unsigned int msr, unsigned low, unsigned high);
extern unsigned long long native_read_tsc(void);
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index 4362373..7eed044 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -108,3 +108,56 @@ int msr_clear_bit(u32 msr, u8 bit)
{
return __flip_bit(msr, bit, false);
}
+
+inline unsigned long long native_read_msr(unsigned int msr)
+{
+ DECLARE_ARGS(val, low, high);
+
+ asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
+ return EAX_EDX_VAL(val, low, high);
+}
+EXPORT_SYMBOL(native_read_msr);
+
+inline unsigned long long native_read_msr_safe(unsigned int msr,
+ int *err)
+{
+ DECLARE_ARGS(val, low, high);
+
+ asm volatile("2: rdmsr ; xor %[err],%[err]\n"
+ "1:\n\t"
+ ".section .fixup,\"ax\"\n\t"
+ "3: mov %[fault],%[err] ; jmp 1b\n\t"
+ ".previous\n\t"
+ _ASM_EXTABLE(2b, 3b)
+ : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
+ : "c" (msr), [fault] "i" (-EIO));
+ return EAX_EDX_VAL(val, low, high);
+}
+EXPORT_SYMBOL(native_read_msr_safe);
+
+inline void native_write_msr(unsigned int msr,
+ unsigned low, unsigned high)
+{
+ asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
+}
+EXPORT_SYMBOL(native_write_msr);
+
+/* Can be uninlined because referenced by paravirt */
+notrace inline int native_write_msr_safe(unsigned int msr,
+ unsigned low, unsigned high)
+{
+ int err;
+
+ asm volatile("2: wrmsr ; xor %[err],%[err]\n"
+ "1:\n\t"
+ ".section .fixup,\"ax\"\n\t"
+ "3: mov %[fault],%[err] ; jmp 1b\n\t"
+ ".previous\n\t"
+ _ASM_EXTABLE(2b, 3b)
+ : [err] "=a" (err)
+ : "c" (msr), "0" (low), "d" (high),
+ [fault] "i" (-EIO)
+ : "memory");
+ return err;
+}
+EXPORT_SYMBOL(native_write_msr_safe);
--
1.9.3
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists