lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 07 Feb 2018 19:14:43 +0300
From:   Kirill Tkhai <ktkhai@...tuozzo.com>
To:     tglx@...utronix.de, mingo@...hat.com, hpa@...or.com,
        aryabinin@...tuozzo.com, glider@...gle.com, dvyukov@...gle.com,
        luto@...nel.org, bp@...en8.de, jpoimboe@...hat.com,
        dave.hansen@...ux.intel.com, jgross@...e.com,
        kirill.shutemov@...ux.intel.com, keescook@...omium.org,
        minipli@...glemail.com, gregkh@...uxfoundation.org,
        kstewart@...uxfoundation.org, linux-kernel@...r.kernel.org,
        kasan-dev@...glegroups.com, linux-mm@...ck.org
Subject: [PATCH RFC] x86: KASAN: Sanitize unauthorized irq stack access

Sometimes it is possible to meet a situation,
when irq stack is corrupted, while innocent
callback function is being executed. This may
happen because of crappy drivers irq handlers,
when they access wrong memory on the irq stack.

This patch aims to catch such the situations
and adds checks of unauthorized stack access.

Every time we enter in interrupt, we check for
irq_count, and allow irq stack usage. After
last nested irq handler is exited, we prohibit
the access back.

I did x86_unpoison_irq_stack() and x86_poison_irq_stack()
calls unconditional, because this requires
to change the order of incl PER_CPU_VAR(irq_count)
and UNWIND_HINT_REGS(), and I'm not sure it's
legitimately to do. So, irq_count is checked in
x86_unpoison_irq_stack().

Signed-off-by: Kirill Tkhai <ktkhai@...tuozzo.com>
---
 arch/x86/entry/entry_64.S        |    6 ++++++
 arch/x86/include/asm/processor.h |    6 ++++++
 arch/x86/kernel/irq_64.c         |   13 +++++++++++++
 include/linux/kasan.h            |    3 +++
 mm/kasan/kasan.c                 |   16 ++++++++++++++++
 5 files changed, 44 insertions(+)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 741d9877b357..1e9d69de2528 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -485,6 +485,9 @@ END(irq_entries_start)
  * The invariant is that, if irq_count != -1, then the IRQ stack is in use.
  */
 .macro ENTER_IRQ_STACK regs=1 old_rsp
+#ifdef CONFIG_KASAN
+	call	x86_unpoison_irq_stack
+#endif
 	DEBUG_ENTRY_ASSERT_IRQS_OFF
 	movq	%rsp, \old_rsp
 
@@ -552,6 +555,9 @@ END(irq_entries_start)
 	 */
 
 	decl	PER_CPU_VAR(irq_count)
+#ifdef CONFIG_KASAN
+	call	x86_poison_irq_stack
+#endif
 .endm
 
 /*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 793bae7e7ce3..4353e3a85b0b 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -404,6 +404,12 @@ union irq_stack_union {
 	};
 };
 
+#define KASAN_IRQ_STACK_SIZE \
+	(sizeof(union irq_stack_union) - \
+		(offsetof(union irq_stack_union, stack_canary) + 8))
+
+#define percpu_irq_stack_addr() this_cpu_ptr(irq_stack_union.irq_stack)
+
 DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
 DECLARE_INIT_PER_CPU(irq_stack_union);
 
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index d86e344f5b3d..ad78f4b3f0b5 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -77,3 +77,16 @@ bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
 	generic_handle_irq_desc(desc);
 	return true;
 }
+
+#ifdef CONFIG_KASAN
+void __visible x86_poison_irq_stack(void)
+{
+	if (this_cpu_read(irq_count) == -1)
+		kasan_poison_irq_stack();
+}
+void __visible x86_unpoison_irq_stack(void)
+{
+	if (this_cpu_read(irq_count) == -1)
+		kasan_unpoison_irq_stack();
+}
+#endif
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index adc13474a53b..cb433f1bf178 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -40,6 +40,9 @@ void kasan_unpoison_shadow(const void *address, size_t size);
 void kasan_unpoison_task_stack(struct task_struct *task);
 void kasan_unpoison_stack_above_sp_to(const void *watermark);
 
+void kasan_poison_irq_stack(void);
+void kasan_unpoison_irq_stack(void);
+
 void kasan_alloc_pages(struct page *page, unsigned int order);
 void kasan_free_pages(struct page *page, unsigned int order);
 
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 0d9d9d268f32..9bc150c87205 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -412,6 +412,22 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
 			KASAN_KMALLOC_REDZONE);
 }
 
+#ifdef KASAN_IRQ_STACK_SIZE
+void kasan_poison_irq_stack(void)
+{
+	void *stack = percpu_irq_stack_addr();
+
+	kasan_poison_shadow(stack, KASAN_IRQ_STACK_SIZE, KASAN_GLOBAL_REDZONE);
+}
+
+void kasan_unpoison_irq_stack(void)
+{
+	void *stack = percpu_irq_stack_addr();
+
+	kasan_unpoison_shadow(stack, KASAN_IRQ_STACK_SIZE);
+}
+#endif /* KASAN_IRQ_STACK_SIZE */
+
 static inline int in_irqentry_text(unsigned long ptr)
 {
 	return (ptr >= (unsigned long)&__irqentry_text_start &&

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ