lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZwNwXF2MqPpHvzqW@liu>
Date: Mon, 7 Oct 2024 13:23:40 +0800
From: Melon Liu <melon1335@....com>
To: linux@...linux.org.uk, lecopzer.chen@...iatek.com,
	linus.walleij@...aro.org
Cc: linux-arm-kernel@...ts.infradead.org, kasan-dev@...glegroups.com,
	linux-kernel@...r.kernel.org, stable@...r.kernel.org
Subject: [PATCH] ARM/mm: Fix stack recursion caused by KASAN

When accessing the KASAN shadow area corresponding to the task stack
which is in vmalloc space, the stack recursion would occur if the area`s
page tables are unpopulated.

Calltrace:
 ...
 __dabt_svc+0x4c/0x80
 __asan_load4+0x30/0x88
 do_translation_fault+0x2c/0x110
 do_DataAbort+0x4c/0xec
 __dabt_svc+0x4c/0x80
 __asan_load4+0x30/0x88
 do_translation_fault+0x2c/0x110
 do_DataAbort+0x4c/0xec
 __dabt_svc+0x4c/0x80
 sched_setscheduler_nocheck+0x60/0x158
 kthread+0xec/0x198
 ret_from_fork+0x14/0x28

Fixes: 565cbaad83d ("ARM: 9202/1: kasan: support CONFIG_KASAN_VMALLOC")
Cc: <stable@...r.kernel.org>
Signed-off-by: Melon Liu <melon1335@....org>
---
 arch/arm/mm/ioremap.c | 23 +++++++++++++++++++----
 1 file changed, 19 insertions(+), 4 deletions(-)

diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 794cfea9f..f952b0b0f 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -115,16 +115,31 @@ int ioremap_page(unsigned long virt, unsigned long phys,
 }
 EXPORT_SYMBOL(ioremap_page);
 
+static inline void sync_pgds(struct mm_struct *mm, unsigned long start,
+			     unsigned long end)
+{
+	end = ALIGN(end, PGDIR_SIZE);
+	memcpy(pgd_offset(mm, start), pgd_offset_k(start),
+	       sizeof(pgd_t) * (pgd_index(end) - pgd_index(start)));
+}
+
+static inline void sync_vmalloc_pgds(struct mm_struct *mm)
+{
+	sync_pgds(mm, VMALLOC_START, VMALLOC_END);
+	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
+		sync_pgds(mm, (unsigned long)kasan_mem_to_shadow(
+					(void *)VMALLOC_START),
+			      (unsigned long)kasan_mem_to_shadow(
+					(void *)VMALLOC_END));
+}
+
 void __check_vmalloc_seq(struct mm_struct *mm)
 {
 	int seq;
 
 	do {
 		seq = atomic_read(&init_mm.context.vmalloc_seq);
-		memcpy(pgd_offset(mm, VMALLOC_START),
-		       pgd_offset_k(VMALLOC_START),
-		       sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
-					pgd_index(VMALLOC_START)));
+		sync_vmalloc_pgds(mm);
 		/*
 		 * Use a store-release so that other CPUs that observe the
 		 * counter's new value are guaranteed to see the results of the
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ