[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <a48af224500470a43261b3e865415dcf3a539082.1464129798.git.luto@kernel.org>
Date: Tue, 24 May 2016 15:48:39 -0700
From: Andy Lutomirski <luto@...nel.org>
To: x86@...nel.org
Cc: linux-kernel@...r.kernel.org, Borislav Petkov <bp@...en8.de>,
Kees Cook <keescook@...omium.org>,
Brian Gerst <brgerst@...il.com>,
Andy Lutomirski <luto@...nel.org>
Subject: [PATCH 2/7] x86/extable: Pass error_code and an extra unsigned long to exhandlers
Exception handlers might want to know the error code and, for some
exceptions, some other auxiliarry info. Pass in the error code and
an 'extra' parameter. For page faults, 'extra' is cr2.
The kprobe code is incomprehensible to me. For kprobe fixups, just
pass zeroes.
Signed-off-by: Andy Lutomirski <luto@...nel.org>
---
arch/x86/include/asm/uaccess.h | 3 ++-
arch/x86/kernel/cpu/mcheck/mce.c | 2 +-
arch/x86/kernel/kprobes/core.c | 6 ++++--
arch/x86/kernel/traps.c | 6 +++---
arch/x86/mm/extable.c | 31 ++++++++++++++++++++-----------
arch/x86/mm/fault.c | 2 +-
6 files changed, 31 insertions(+), 19 deletions(-)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index d794fd1f582f..5b65b2110167 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -108,7 +108,8 @@ struct exception_table_entry {
#define ARCH_HAS_RELATIVE_EXTABLE
-extern int fixup_exception(struct pt_regs *regs, int trapnr);
+extern int fixup_exception(struct pt_regs *regs, int trapnr,
+ unsigned long error_code, unsigned long extra);
extern bool ex_has_fault_handler(unsigned long ip);
extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index f0c921b03e42..e4321f167947 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1175,7 +1175,7 @@ out:
local_irq_disable();
ist_end_non_atomic();
} else {
- if (!fixup_exception(regs, X86_TRAP_MC))
+ if (!fixup_exception(regs, X86_TRAP_MC, 0, 0))
mce_panic("Failed kernel mode recovery", &m, NULL);
}
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 38cf7a741250..4ec4de0d79f7 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -988,9 +988,11 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
/*
* In case the user-specified fault handler returned
- * zero, try to fix up.
+ * zero, try to fix up. (This is called via die notifiers,
+ * and die notifiers are a mess. Just pass zero for the
+ * error_code and extra info.
*/
- if (fixup_exception(regs, trapnr))
+ if (fixup_exception(regs, trapnr, 0, 0))
return 1;
/*
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index d1590486204a..563b72912cbe 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -186,7 +186,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
}
if (!user_mode(regs)) {
- if (!fixup_exception(regs, trapnr)) {
+ if (!fixup_exception(regs, trapnr, error_code, 0)) {
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = trapnr;
die(str, regs, error_code);
@@ -438,7 +438,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
tsk = current;
if (!user_mode(regs)) {
- if (fixup_exception(regs, X86_TRAP_GP))
+ if (fixup_exception(regs, X86_TRAP_GP, error_code, 0))
return;
tsk->thread.error_code = error_code;
@@ -742,7 +742,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
cond_local_irq_enable(regs);
if (!user_mode(regs)) {
- if (!fixup_exception(regs, trapnr)) {
+ if (!fixup_exception(regs, trapnr, error_code, 0)) {
task->thread.error_code = error_code;
task->thread.trap_nr = trapnr;
die(str, regs, error_code);
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 4bb53b89f3c5..c1a25aca0365 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -3,7 +3,8 @@
#include <asm/traps.h>
typedef bool (*ex_handler_t)(const struct exception_table_entry *,
- struct pt_regs *, int);
+ struct pt_regs *, int,
+ unsigned long, unsigned long);
static inline unsigned long
ex_fixup_addr(const struct exception_table_entry *x)
@@ -17,7 +18,8 @@ ex_fixup_handler(const struct exception_table_entry *x)
}
bool ex_handler_default(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code, unsigned long extra)
{
regs->ip = ex_fixup_addr(fixup);
return true;
@@ -25,7 +27,8 @@ bool ex_handler_default(const struct exception_table_entry *fixup,
EXPORT_SYMBOL(ex_handler_default);
bool ex_handler_fault(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code, unsigned long extra)
{
regs->ip = ex_fixup_addr(fixup);
regs->ax = trapnr;
@@ -34,7 +37,8 @@ bool ex_handler_fault(const struct exception_table_entry *fixup,
EXPORT_SYMBOL_GPL(ex_handler_fault);
bool ex_handler_ext(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code, unsigned long extra)
{
/* Special hack for uaccess_err */
current_thread_info()->uaccess_err = 1;
@@ -44,7 +48,8 @@ bool ex_handler_ext(const struct exception_table_entry *fixup,
EXPORT_SYMBOL(ex_handler_ext);
bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code, unsigned long extra)
{
WARN_ONCE(1, "unchecked MSR access error: RDMSR from 0x%x\n",
(unsigned int)regs->cx);
@@ -58,7 +63,8 @@ bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
EXPORT_SYMBOL(ex_handler_rdmsr_unsafe);
bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code, unsigned long extra)
{
WARN_ONCE(1, "unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x)\n",
(unsigned int)regs->cx,
@@ -71,12 +77,13 @@ bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
EXPORT_SYMBOL(ex_handler_wrmsr_unsafe);
bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code, unsigned long extra)
{
if (static_cpu_has(X86_BUG_NULL_SEG))
asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS));
asm volatile ("mov %0, %%fs" : : "rm" (0));
- return ex_handler_default(fixup, regs, trapnr);
+ return ex_handler_default(fixup, regs, trapnr, error_code, extra);
}
EXPORT_SYMBOL(ex_handler_clear_fs);
@@ -93,7 +100,8 @@ bool ex_has_fault_handler(unsigned long ip)
return handler == ex_handler_fault;
}
-int fixup_exception(struct pt_regs *regs, int trapnr)
+int fixup_exception(struct pt_regs *regs, int trapnr,
+ unsigned long error_code, unsigned long extra)
{
const struct exception_table_entry *e;
ex_handler_t handler;
@@ -117,7 +125,7 @@ int fixup_exception(struct pt_regs *regs, int trapnr)
return 0;
handler = ex_fixup_handler(e);
- return handler(e, regs, trapnr);
+ return handler(e, regs, trapnr, error_code, extra);
}
extern unsigned int early_recursion_flag;
@@ -149,7 +157,8 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
* Keep in mind that not all vectors actually get here. Early
* fage faults, for example, are special.
*/
- if (fixup_exception(regs, trapnr))
+ if (fixup_exception(regs, trapnr, regs->orig_ax,
+ (regs->orig_ax == X86_TRAP_PF ? read_cr2() : 0)))
return;
fail:
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 5ce1ed02f7e8..3de8dc66fd5c 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -722,7 +722,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
struct vm_area_struct *vma = NULL;
/* Are we prepared to handle this kernel fault? */
- if (fixup_exception(regs, X86_TRAP_PF)) {
+ if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
/*
* Any interrupt that takes a fault gets the fixup. This makes
* the below recursive fault logic only apply to a faults from
--
2.5.5
Powered by blists - more mailing lists