[PATCH] x86_64: printout msr commandline show_msr=1 for bsp, show_msr=32 for all 32 cpus. Signed-off-by: Yinghai Lu --- arch/x86/kernel/cpu/common_64.c | 46 ++++++++++++++++++++++++++++++++++++++++ include/asm-x86/msr.h | 23 ++++++++++++++++++++ 2 files changed, 69 insertions(+) Index: linux-2.6/arch/x86/kernel/cpu/common_64.c =================================================================== --- linux-2.6.orig/arch/x86/kernel/cpu/common_64.c +++ linux-2.6/arch/x86/kernel/cpu/common_64.c @@ -430,6 +430,49 @@ static __init int setup_noclflush(char * } __setup("noclflush", setup_noclflush); +struct msr_range { + unsigned min; + unsigned max; +}; + +static struct msr_range msr_range_array[] __cpuinitdata = { + { 0x00000000, 0x00000418}, + { 0xc0000000, 0xc000040b}, + { 0xc0010000, 0xc0010142}, + { 0xc0011000, 0xc001103b}, +}; + +static void __cpuinit print_cpu_msr(void) +{ + unsigned index; + u64 val; + int i; + unsigned index_min, index_max; + + for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { + index_min = msr_range_array[i].min; + index_max = msr_range_array[i].max; + for (index = index_min; index < index_max; index++) { + if (rdmsrl_amd_safe(index, &val)) + continue; + printk(KERN_INFO " MSR%08x: %016llx\n", index, val); + } + } +} + +static int show_msr __cpuinitdata; +static __init int setup_show_msr(char *arg) +{ + int num; + + get_option(&arg, &num); + + if (num > 0) + show_msr = num; + return 1; +} +__setup("show_msr=", setup_show_msr); + void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) { if (c->x86_model_id[0]) @@ -439,6 +482,9 @@ void __cpuinit print_cpu_info(struct cpu printk(KERN_CONT " stepping %02x\n", c->x86_mask); else printk(KERN_CONT "\n"); + + if (c->cpu_index < show_msr) + print_cpu_msr(); } static __init int setup_disablecpuid(char *arg) Index: linux-2.6/include/asm-x86/msr.h =================================================================== --- linux-2.6.orig/include/asm-x86/msr.h +++ linux-2.6/include/asm-x86/msr.h @@ -63,6 +63,22 @@ static inline unsigned long long native_ return EAX_EDX_VAL(val, low, high); } +static inline unsigned long long native_read_msr_amd_safe(unsigned int msr, + int *err) +{ + DECLARE_ARGS(val, low, high); + + asm volatile("2: rdmsr ; xor %0,%0\n" + "1:\n\t" + ".section .fixup,\"ax\"\n\t" + "3: mov %3,%0 ; jmp 1b\n\t" + ".previous\n\t" + _ASM_EXTABLE(2b, 3b) + : "=r" (*err), EAX_EDX_RET(val, low, high) + : "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT)); + return EAX_EDX_VAL(val, low, high); +} + static inline void native_write_msr(unsigned int msr, unsigned low, unsigned high) { @@ -158,6 +174,13 @@ static inline int rdmsrl_safe(unsigned m *p = native_read_msr_safe(msr, &err); return err; } +static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) +{ + int err; + + *p = native_read_msr_amd_safe(msr, &err); + return err; +} #define rdtscl(low) \ ((low) = (u32)native_read_tsc())