lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Sat, 22 Aug 2009 18:37:50 +0200
From:	Borislav Petkov <petkovbb@...glemail.com>
To:	"H. Peter Anvin" <hpa@...or.com>
Cc:	Ingo Molnar <mingo@...e.hu>, mingo@...hat.com,
	linux-kernel@...r.kernel.org, kjwinchester@...il.com,
	tglx@...utronix.de, borislav.petkov@....com,
	linux-tip-commits@...r.kernel.org
Subject: Re: [tip:x86/urgent] x86, AMD: Disable wrongly set
 X86_FEATURE_LAHF_LM CPUID bit

On Fri, Aug 21, 2009 at 10:40:18AM -0700, H. Peter Anvin wrote:
> If we're going to modify the paravirt_crap for yet another MSR
> operation, then let's at least make it something like "write_msr_edi"
> and have it take a parameter for the EDI value.  Perhaps we should let
> it set EBX and ESI as well (and the same for the read operation, even
> though it already exists.)

Ok, how about making all rdmsr* variants indirectly call into a
native_read_msr_safe_reg() helper which takes esi and edi as additional
arguments. There was no room for ebx since paravirt has a PVOP_CALL4
macro which takes the largest number of args - 4 and I am lazy, of
course, to implement a PVOP_CALL5 thing :o)

The wrmsr variants could be done similarly.

Suggestions, comments?

--
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 48ad9d2..2fec363 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -55,8 +55,10 @@ static inline unsigned long long native_read_msr(unsigned int msr)
 	return EAX_EDX_VAL(val, low, high);
 }
 
-static inline unsigned long long native_read_msr_safe(unsigned int msr,
-						      int *err)
+static inline unsigned long long native_read_msr_safe_reg(unsigned int msr,
+							  int *err,
+							  unsigned int edi,
+							  unsigned int esi)
 {
 	DECLARE_ARGS(val, low, high);
 
@@ -67,24 +69,15 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
 		     ".previous\n\t"
 		     _ASM_EXTABLE(2b, 3b)
 		     : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
-		     : "c" (msr), [fault] "i" (-EFAULT));
+		     : "c" (msr), [fault] "i" (-EFAULT), "D" (edi), "S" (esi));
+
 	return EAX_EDX_VAL(val, low, high);
 }
 
-static inline unsigned long long native_read_msr_amd_safe(unsigned int msr,
+static inline unsigned long long native_read_msr_safe(unsigned int msr,
 						      int *err)
 {
-	DECLARE_ARGS(val, low, high);
-
-	asm volatile("2: rdmsr ; xor %0,%0\n"
-		     "1:\n\t"
-		     ".section .fixup,\"ax\"\n\t"
-		     "3:  mov %3,%0 ; jmp 1b\n\t"
-		     ".previous\n\t"
-		     _ASM_EXTABLE(2b, 3b)
-		     : "=r" (*err), EAX_EDX_RET(val, low, high)
-		     : "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT));
-	return EAX_EDX_VAL(val, low, high);
+	return native_read_msr_safe_reg(msr, err, 0, 0);
 }
 
 static inline void native_write_msr(unsigned int msr,
@@ -181,11 +174,12 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
 	*p = native_read_msr_safe(msr, &err);
 	return err;
 }
+
 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
 {
 	int err;
 
-	*p = native_read_msr_amd_safe(msr, &err);
+	*p = native_read_msr_safe_reg(msr, &err, 0x9c5a203a, 0);
 	return err;
 }
 
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 4fb37c8..d49e1ee 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -166,8 +166,9 @@ struct pv_cpu_ops {
 
 	/* MSR, PMC and TSR operations.
 	   err = 0/-EFAULT.  wrmsr returns 0/-EFAULT. */
-	u64 (*read_msr_amd)(unsigned int msr, int *err);
 	u64 (*read_msr)(unsigned int msr, int *err);
+	u64 (*read_msr_reg)(unsigned int msr, int *err, unsigned edi,
+			    unsigned esi);
 	int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
 
 	u64 (*read_tsc)(void);
@@ -820,9 +821,10 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
 {
 	return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
 }
-static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
+static inline u64 paravirt_read_msr_reg(unsigned msr, int *err, unsigned edi,
+					unsigned esi)
 {
-	return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
+	return PVOP_CALL4(u64, pv_cpu_ops.read_msr_reg, msr, err, edi, esi);
 }
 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
 {
@@ -873,7 +875,7 @@ static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
 {
 	int err;
 
-	*p = paravirt_read_msr_amd(msr, &err);
+	*p = paravirt_read_msr_reg(msr, &err, 0x9c5a203a, 0);
 	return err;
 }
 
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 70ec9b9..ecac58d 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -362,7 +362,7 @@ struct pv_cpu_ops pv_cpu_ops = {
 #endif
 	.wbinvd = native_wbinvd,
 	.read_msr = native_read_msr_safe,
-	.read_msr_amd = native_read_msr_amd_safe,
+	.read_msr_reg = native_read_msr_safe_reg,
 	.write_msr = native_write_msr_safe,
 	.read_tsc = native_read_tsc,
 	.read_pmc = native_read_pmc,

-- 
Regards/Gruss,
    Boris.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ