[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <26567b216aae70e795938f4b567eace5a0eb90ba.1459605520.git.luto@kernel.org>
Date: Sat, 2 Apr 2016 07:01:37 -0700
From: Andy Lutomirski <luto@...nel.org>
To: X86 ML <x86@...nel.org>
Cc: Paolo Bonzini <pbonzini@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
KVM list <kvm@...r.kernel.org>,
Arjan van de Ven <arjan@...ux.intel.com>,
xen-devel <Xen-devel@...ts.xen.org>,
linux-kernel@...r.kernel.org,
Linus Torvalds <torvalds@...ux-foundation.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Borislav Petkov <bp@...en8.de>,
Andy Lutomirski <luto@...nel.org>
Subject: [PATCH v5 6/9] x86/msr: Carry on after a non-"safe" MSR access fails
This demotes an OOPS and likely panic due to a failed non-"safe" MSR
access to a WARN_ONCE and, for RDMSR, a return value of zero.
To be clear, this type of failure should *not* happen. This patch
exists to minimize the chance of nasty undebuggable failures
happening when a CONFIG_PARAVIRT=y bug in the non-"safe" MSR helpers
gets fixed.
Signed-off-by: Andy Lutomirski <luto@...nel.org>
---
arch/x86/include/asm/msr.h | 10 ++++++++--
arch/x86/mm/extable.c | 27 +++++++++++++++++++++++++++
2 files changed, 35 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 93fb7c1cffda..1487054a1a70 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -92,7 +92,10 @@ static inline unsigned long long native_read_msr(unsigned int msr)
{
DECLARE_ARGS(val, low, high);
- asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
+ asm volatile("1: rdmsr\n"
+ "2:\n"
+ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_unsafe)
+ : EAX_EDX_RET(val, low, high) : "c" (msr));
if (msr_tracepoint_active(__tracepoint_read_msr))
do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0);
return EAX_EDX_VAL(val, low, high);
@@ -119,7 +122,10 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
static inline void native_write_msr(unsigned int msr,
unsigned low, unsigned high)
{
- asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
+ asm volatile("1: wrmsr\n"
+ "2:\n"
+ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
+ : : "c" (msr), "a"(low), "d" (high) : "memory");
if (msr_tracepoint_active(__tracepoint_read_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
}
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 50dfe438bd91..98b5f45d9d79 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -50,6 +50,33 @@ bool ex_handler_ext(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL(ex_handler_ext);
+bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
+{
+ WARN_ONCE(1, "unchecked MSR access error: RDMSR from 0x%x\n",
+ (unsigned int)regs->cx);
+
+ /* Pretend that the read succeeded and returned 0. */
+ regs->ip = ex_fixup_addr(fixup);
+ regs->ax = 0;
+ regs->dx = 0;
+ return true;
+}
+EXPORT_SYMBOL(ex_handler_rdmsr_unsafe);
+
+bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
+{
+ WARN_ONCE(1, "unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x)\n",
+ (unsigned int)regs->cx,
+ (unsigned int)regs->dx, (unsigned int)regs->ax);
+
+ /* Pretend that the write succeeded. */
+ regs->ip = ex_fixup_addr(fixup);
+ return true;
+}
+EXPORT_SYMBOL(ex_handler_wrmsr_unsafe);
+
bool ex_has_fault_handler(unsigned long ip)
{
const struct exception_table_entry *e;
--
2.5.5
Powered by blists - more mailing lists