[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220512183613.1069697-1-keescook@chromium.org>
Date: Thu, 12 May 2022 11:36:13 -0700
From: Kees Cook <keescook@...omium.org>
To: Matthew Wilcox <willy@...radead.org>
Cc: Kees Cook <keescook@...omium.org>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Arnd Bergmann <arnd@...db.de>, linux-kernel@...r.kernel.org,
linux-hardening@...r.kernel.org
Subject: [RFC][PATCH] lkdtm/usercopy: Add tests for other memory types
Add coverage for the recently added usercopy checks for vmap, kmap, and
folios.
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Cc: Matthew Wilcox (Oracle) <willy@...radead.org>
Cc: Arnd Bergmann <arnd@...db.de>
Signed-off-by: Kees Cook <keescook@...omium.org>
---
Hi,
So, this kmap test fails the "good" copy step... Is copy_to_user()
limited somewhere else to not touch kmap'd pages? I can't find it...
Also, is my use of folios correct here?
-Kees
---
drivers/misc/lkdtm/usercopy.c | 104 ++++++++++++++++++++++++++++++++++
1 file changed, 104 insertions(+)
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index 945806db2a13..d1c585bb7a8d 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -5,6 +5,7 @@
*/
#include "lkdtm.h"
#include <linux/slab.h>
+#include <linux/highmem.h>
#include <linux/vmalloc.h>
#include <linux/sched/task_stack.h>
#include <linux/mman.h>
@@ -341,6 +342,106 @@ static void lkdtm_USERCOPY_KERNEL(void)
vm_munmap(user_addr, PAGE_SIZE);
}
+/*
+ * This expects "kaddr" to point to a PAGE_SIZE allocation, which means
+ * a more complete test that would include copy_from_user() would risk
+ * memory corruption. Just test copy_to_user() here, as that exercises
+ * almost exactly the same code paths.
+ */
+static void do_usercopy_page_span(const char *name, void *kaddr)
+{
+ unsigned long uaddr;
+
+ uaddr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (uaddr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ /* Initialize contents. */
+ memset(kaddr, 0xAA, PAGE_SIZE);
+
+ /* Bump the kaddr forward to detect a page-spanning overflow. */
+ kaddr += PAGE_SIZE / 2;
+
+ pr_info("attempting good copy_to_user() from kernel %s: %px\n",
+ name, kaddr);
+ if (copy_to_user((void __user *)uaddr, kaddr,
+ unconst + (PAGE_SIZE / 2))) {
+ pr_err("copy_to_user() failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user() from kernel %s: %px\n",
+ name, kaddr);
+ if (copy_to_user((void __user *)uaddr, kaddr,
+ unconst + PAGE_SIZE)) {
+ pr_warn("Good, copy_to_user() failed, but lacked Oops(?!)\n");
+ goto free_user;
+ }
+
+ pr_err("FAIL: bad copy_to_user() not detected!\n");
+ pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
+
+free_user:
+ vm_munmap(uaddr, PAGE_SIZE);
+}
+
+static void lkdtm_USERCOPY_VMALLOC(void)
+{
+ void *addr;
+
+ addr = vmalloc(PAGE_SIZE);
+ if (!addr) {
+ pr_err("vmalloc() failed!?\n");
+ return;
+ }
+ do_usercopy_page_span("vmalloc", addr);
+ vfree(addr);
+}
+
+static void lkdtm_USERCOPY_KMAP(void)
+{
+ struct page *page;
+ void *addr;
+
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
+ if (!page) {
+ pr_err("alloc_pages() failed!?\n");
+ return;
+ }
+ addr = kmap_atomic(page);
+ if (addr) {
+ do_usercopy_page_span("kmap", addr);
+ kunmap_atomic(addr);
+ } else {
+ pr_err("kmap_atomic() failed!?\n");
+ }
+ __free_pages(page, 0);
+}
+
+static void lkdtm_USERCOPY_FOLIO(void)
+{
+ struct folio *folio;
+ void *addr;
+
+ /*
+ * FIXME: Folio checking currently misses 0-order allocations, so
+ * allocate and bump forward to the last page.
+ */
+ folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1);
+ if (!folio) {
+ pr_err("folio_alloc() failed!?\n");
+ return;
+ }
+ addr = page_address(&folio->page);
+ if (addr) {
+ do_usercopy_page_span("folio", addr + PAGE_SIZE);
+ }
+ folio_put(folio);
+}
+
void __init lkdtm_usercopy_init(void)
{
/* Prepare cache that lacks SLAB_USERCOPY flag. */
@@ -365,6 +466,9 @@ static struct crashtype crashtypes[] = {
CRASHTYPE(USERCOPY_STACK_FRAME_TO),
CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
CRASHTYPE(USERCOPY_STACK_BEYOND),
+ CRASHTYPE(USERCOPY_VMALLOC),
+ CRASHTYPE(USERCOPY_KMAP),
+ CRASHTYPE(USERCOPY_FOLIO),
CRASHTYPE(USERCOPY_KERNEL),
};
--
2.32.0
Powered by blists - more mailing lists