lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 16 Sep 2022 06:59:57 -0700
From:   Kees Cook <keescook@...omium.org>
To:     Matthew Wilcox <willy@...radead.org>
Cc:     Kees Cook <keescook@...omium.org>,
        Uladzislau Rezki <urezki@...il.com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Yu Zhao <yuzhao@...gle.com>, dev@...-flo.net,
        linux-mm@...ck.org, linux-hardening@...r.kernel.org,
        Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>, linux-kernel@...r.kernel.org,
        x86@...nel.org, linux-perf-users@...r.kernel.org,
        linux-arch@...r.kernel.org
Subject: [PATCH 3/3] usercopy: Add find_vmap_area_try() to avoid deadlocks

The check_object_size() checks under CONFIG_HARDENED_USERCOPY need to be
more defensive against running from interrupt context. Use a best-effort
check for VMAP areas when running in interrupt context

Suggested-by: Matthew Wilcox <willy@...radead.org>
Link: https://lore.kernel.org/linux-mm/YyQ2CSdIJdvQPSPO@casper.infradead.org
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Yu Zhao <yuzhao@...gle.com>
Cc: dev@...-flo.net
Cc: linux-mm@...ck.org
Cc: linux-hardening@...r.kernel.org
Signed-off-by: Kees Cook <keescook@...omium.org>
---
 include/linux/vmalloc.h |  1 +
 mm/usercopy.c           | 11 ++++++++++-
 mm/vmalloc.c            | 11 +++++++++++
 3 files changed, 22 insertions(+), 1 deletion(-)

diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 096d48aa3437..c8a00f181a11 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -216,6 +216,7 @@ void free_vm_area(struct vm_struct *area);
 extern struct vm_struct *remove_vm_area(const void *addr);
 extern struct vm_struct *find_vm_area(const void *addr);
 struct vmap_area *find_vmap_area(unsigned long addr);
+struct vmap_area *find_vmap_area_try(unsigned long addr);
 
 static inline bool is_vm_area_hugepages(const void *addr)
 {
diff --git a/mm/usercopy.c b/mm/usercopy.c
index c1ee15a98633..4a371099ac64 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -173,7 +173,16 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
 	}
 
 	if (is_vmalloc_addr(ptr)) {
-		struct vmap_area *area = find_vmap_area(addr);
+		struct vmap_area *area;
+
+		if (unlikely(in_interrupt())) {
+			area = find_vmap_area_try(addr);
+			/* Give up under interrupt to avoid deadlocks. */
+			if (!area)
+				return;
+		} else {
+			area = find_vmap_area(addr);
+		}
 
 		if (!area)
 			usercopy_abort("vmalloc", "no area", to_user, 0, n);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index dd6cdb201195..f14f1902c2f6 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1840,6 +1840,17 @@ struct vmap_area *find_vmap_area(unsigned long addr)
 	return va;
 }
 
+struct vmap_area *find_vmap_area_try(unsigned long addr)
+{
+	struct vmap_area *va = NULL;
+
+	if (spin_trylock(&vmap_area_lock)) {
+		va = __find_vmap_area(addr, &vmap_area_root);
+		spin_unlock(&vmap_area_lock);
+	}
+	return va;
+}
+
 /*** Per cpu kva allocator ***/
 
 /*
-- 
2.34.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ