[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YySML2HfqaE/wXBU@casper.infradead.org>
Date: Fri, 16 Sep 2022 15:46:07 +0100
From: Matthew Wilcox <willy@...radead.org>
To: Kees Cook <keescook@...omium.org>
Cc: Uladzislau Rezki <urezki@...il.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Yu Zhao <yuzhao@...gle.com>, dev@...-flo.net,
linux-mm@...ck.org, linux-hardening@...r.kernel.org,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>, linux-kernel@...r.kernel.org,
x86@...nel.org, linux-perf-users@...r.kernel.org,
linux-arch@...r.kernel.org
Subject: Re: [PATCH 3/3] usercopy: Add find_vmap_area_try() to avoid deadlocks
On Fri, Sep 16, 2022 at 06:59:57AM -0700, Kees Cook wrote:
> The check_object_size() checks under CONFIG_HARDENED_USERCOPY need to be
> more defensive against running from interrupt context. Use a best-effort
> check for VMAP areas when running in interrupt context
I had something more like this in mind:
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 096d48aa3437..2b7c52e76856 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -215,7 +215,7 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size,
void free_vm_area(struct vm_struct *area);
extern struct vm_struct *remove_vm_area(const void *addr);
extern struct vm_struct *find_vm_area(const void *addr);
-struct vmap_area *find_vmap_area(unsigned long addr);
+struct vmap_area *find_vmap_area_try(unsigned long addr);
static inline bool is_vm_area_hugepages(const void *addr)
{
diff --git a/mm/usercopy.c b/mm/usercopy.c
index c1ee15a98633..e0fb605c1b38 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -173,7 +173,11 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
}
if (is_vmalloc_addr(ptr)) {
- struct vmap_area *area = find_vmap_area(addr);
+ struct vmap_area *area = find_vmap_area_try(addr);
+
+ /* We may be in NMI context */
+ if (area == ERR_PTR(-EAGAIN))
+ return;
if (!area)
usercopy_abort("vmalloc", "no area", to_user, 0, n);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index dd6cdb201195..2ea76cb56d4b 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1829,7 +1829,7 @@ static void free_unmap_vmap_area(struct vmap_area *va)
free_vmap_area_noflush(va);
}
-struct vmap_area *find_vmap_area(unsigned long addr)
+static struct vmap_area *find_vmap_area(unsigned long addr)
{
struct vmap_area *va;
@@ -1840,6 +1840,18 @@ struct vmap_area *find_vmap_area(unsigned long addr)
return va;
}
+struct vmap_area *find_vmap_area_try(unsigned long addr)
+{
+ struct vmap_area *va;
+
+ if (!spin_lock(&vmap_area_lock))
+ return ERR_PTR(-EAGAIN);
+ va = __find_vmap_area(addr, &vmap_area_root);
+ spin_unlock(&vmap_area_lock);
+
+ return va;
+}
+
/*** Per cpu kva allocator ***/
/*
Powered by blists - more mailing lists