[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220612213227.3881769-4-willy@infradead.org>
Date: Sun, 12 Jun 2022 22:32:27 +0100
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: Kees Cook <keescook@...omium.org>
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-mm@...ck.org, Uladzislau Rezki <urezki@...il.com>,
Zorro Lang <zlang@...hat.com>, linux-xfs@...r.kernel.org,
linux-hardening@...r.kernel.org
Subject: [PATCH 3/3] usercopy: Make usercopy resilient against ridiculously large copies
If 'n' is so large that it's negative, we might wrap around and mistakenly
think that the copy is OK when it's not. Such a copy would probably
crash, but just doing the arithmetic in a more simple way lets us detect
and refuse this case.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
mm/usercopy.c | 19 +++++++++----------
1 file changed, 9 insertions(+), 10 deletions(-)
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 31deee7dd2f5..ff16083cf1c8 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -162,20 +162,18 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
bool to_user)
{
uintptr_t addr = (uintptr_t)ptr;
+ unsigned long offset;
struct folio *folio;
if (is_kmap_addr(ptr)) {
- unsigned long page_end = addr | (PAGE_SIZE - 1);
-
- if (addr + n - 1 > page_end)
- usercopy_abort("kmap", NULL, to_user,
- offset_in_page(ptr), n);
+ offset = offset_in_page(ptr);
+ if (n > PAGE_SIZE - offset)
+ usercopy_abort("kmap", NULL, to_user, offset, n);
return;
}
if (is_vmalloc_addr(ptr)) {
struct vmap_area *area = find_vmap_area(addr);
- unsigned long offset;
if (!area) {
usercopy_abort("vmalloc", "no area", to_user, 0, n);
@@ -184,9 +182,10 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
/* XXX: We should also abort for free vmap_areas */
- offset = addr - area->va_start;
- if (addr + n > area->va_end)
+ if (n > area->va_end - addr) {
+ offset = addr - area->va_start;
usercopy_abort("vmalloc", NULL, to_user, offset, n);
+ }
return;
}
@@ -199,8 +198,8 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
/* Check slab allocator for flags and size. */
__check_heap_object(ptr, n, folio_slab(folio), to_user);
} else if (folio_test_large(folio)) {
- unsigned long offset = ptr - folio_address(folio);
- if (offset + n > folio_size(folio))
+ offset = ptr - folio_address(folio);
+ if (n > folio_size(folio) - offset)
usercopy_abort("page alloc", NULL, to_user, offset, n);
}
}
--
2.35.1
Powered by blists - more mailing lists