[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <eeb851f8f3898640d6e880d9fcabea8b580ee3d8.1597425745.git.andreyknvl@google.com>
Date: Fri, 14 Aug 2020 19:26:46 +0200
From: Andrey Konovalov <andreyknvl@...gle.com>
To: Dmitry Vyukov <dvyukov@...gle.com>,
Vincenzo Frascino <vincenzo.frascino@....com>,
Catalin Marinas <catalin.marinas@....com>,
kasan-dev@...glegroups.com
Cc: Andrey Ryabinin <aryabinin@...tuozzo.com>,
Alexander Potapenko <glider@...gle.com>,
Marco Elver <elver@...gle.com>,
Evgenii Stepanov <eugenis@...gle.com>,
Elena Petrova <lenaptr@...gle.com>,
Branislav Rankov <Branislav.Rankov@....com>,
Kevin Brodsky <kevin.brodsky@....com>,
Will Deacon <will.deacon@....com>,
Andrew Morton <akpm@...ux-foundation.org>,
linux-arm-kernel@...ts.infradead.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
Andrey Konovalov <andreyknvl@...gle.com>
Subject: [PATCH 04/35] kasan: rename (un)poison_shadow to (un)poison_memory
This is a preparatory commit for the upcoming addition of a new hardware
tag-based (MTE-based) KASAN mode.
The new mode won't be using shadow memory, but will reuse the same
functions. Rename kasan_unpoison_shadow to kasan_unpoison_memory,
and kasan_poison_shadow to kasan_poison_memory.
No functional changes.
Signed-off-by: Andrey Konovalov <andreyknvl@...gle.com>
---
include/linux/kasan.h | 6 +++---
kernel/fork.c | 4 ++--
mm/kasan/common.c | 38 +++++++++++++++++++-------------------
mm/kasan/generic.c | 12 ++++++------
mm/kasan/kasan.h | 2 +-
mm/kasan/tags.c | 2 +-
mm/slab_common.c | 2 +-
7 files changed, 33 insertions(+), 33 deletions(-)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 44a9aae44138..18617d5c4cd7 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -60,7 +60,7 @@ extern void kasan_enable_current(void);
/* Disable reporting bugs for current task */
extern void kasan_disable_current(void);
-void kasan_unpoison_shadow(const void *address, size_t size);
+void kasan_unpoison_memory(const void *address, size_t size);
void kasan_unpoison_task_stack(struct task_struct *task);
@@ -97,7 +97,7 @@ struct kasan_cache {
size_t __ksize(const void *);
static inline void kasan_unpoison_slab(const void *ptr)
{
- kasan_unpoison_shadow(ptr, __ksize(ptr));
+ kasan_unpoison_memory(ptr, __ksize(ptr));
}
size_t kasan_metadata_size(struct kmem_cache *cache);
@@ -106,7 +106,7 @@ void kasan_restore_multi_shot(bool enabled);
#else /* CONFIG_KASAN */
-static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
+static inline void kasan_unpoison_memory(const void *address, size_t size) {}
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
diff --git a/kernel/fork.c b/kernel/fork.c
index c9c76a4d1180..c93e93cfbab8 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -224,8 +224,8 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
if (!s)
continue;
- /* Clear the KASAN shadow of the stack. */
- kasan_unpoison_shadow(s->addr, THREAD_SIZE);
+ /* Mark stack accessible for KASAN. */
+ kasan_unpoison_memory(s->addr, THREAD_SIZE);
/* Clear stale pointers from reused stack. */
memset(s->addr, 0, THREAD_SIZE);
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index d1c987f324cd..65933b27df81 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -113,7 +113,7 @@ void *memcpy(void *dest, const void *src, size_t len)
* Poisons the shadow memory for 'size' bytes starting from 'addr'.
* Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
*/
-void kasan_poison_shadow(const void *address, size_t size, u8 value)
+void kasan_poison_memory(const void *address, size_t size, u8 value)
{
void *shadow_start, *shadow_end;
@@ -130,7 +130,7 @@ void kasan_poison_shadow(const void *address, size_t size, u8 value)
__memset(shadow_start, value, shadow_end - shadow_start);
}
-void kasan_unpoison_shadow(const void *address, size_t size)
+void kasan_unpoison_memory(const void *address, size_t size)
{
u8 tag = get_tag(address);
@@ -141,7 +141,7 @@ void kasan_unpoison_shadow(const void *address, size_t size)
*/
address = reset_tag(address);
- kasan_poison_shadow(address, size, tag);
+ kasan_poison_memory(address, size, tag);
if (size & KASAN_SHADOW_MASK) {
u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
@@ -158,7 +158,7 @@ static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
void *base = task_stack_page(task);
size_t size = sp - base;
- kasan_unpoison_shadow(base, size);
+ kasan_unpoison_memory(base, size);
}
/* Unpoison the entire stack for a task. */
@@ -177,7 +177,7 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
*/
void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
- kasan_unpoison_shadow(base, watermark - base);
+ kasan_unpoison_memory(base, watermark - base);
}
void kasan_alloc_pages(struct page *page, unsigned int order)
@@ -191,13 +191,13 @@ void kasan_alloc_pages(struct page *page, unsigned int order)
tag = random_tag();
for (i = 0; i < (1 << order); i++)
page_kasan_tag_set(page + i, tag);
- kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
+ kasan_unpoison_memory(page_address(page), PAGE_SIZE << order);
}
void kasan_free_pages(struct page *page, unsigned int order)
{
if (likely(!PageHighMem(page)))
- kasan_poison_shadow(page_address(page),
+ kasan_poison_memory(page_address(page),
PAGE_SIZE << order,
KASAN_FREE_PAGE);
}
@@ -289,18 +289,18 @@ void kasan_poison_slab(struct page *page)
for (i = 0; i < compound_nr(page); i++)
page_kasan_tag_reset(page + i);
- kasan_poison_shadow(page_address(page), page_size(page),
+ kasan_poison_memory(page_address(page), page_size(page),
KASAN_KMALLOC_REDZONE);
}
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
{
- kasan_unpoison_shadow(object, cache->object_size);
+ kasan_unpoison_memory(object, cache->object_size);
}
void kasan_poison_object_data(struct kmem_cache *cache, void *object)
{
- kasan_poison_shadow(object,
+ kasan_poison_memory(object,
round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
KASAN_KMALLOC_REDZONE);
}
@@ -413,7 +413,7 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
}
rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
- kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
+ kasan_poison_memory(object, rounded_up_size, KASAN_KMALLOC_FREE);
if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
unlikely(!(cache->flags & SLAB_KASAN)))
@@ -453,8 +453,8 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
tag = assign_tag(cache, object, false, keep_tag);
/* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
- kasan_unpoison_shadow(set_tag(object, tag), size);
- kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
+ kasan_unpoison_memory(set_tag(object, tag), size);
+ kasan_poison_memory((void *)redzone_start, redzone_end - redzone_start,
KASAN_KMALLOC_REDZONE);
if (cache->flags & SLAB_KASAN)
@@ -494,8 +494,8 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
KASAN_SHADOW_SCALE_SIZE);
redzone_end = (unsigned long)ptr + page_size(page);
- kasan_unpoison_shadow(ptr, size);
- kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
+ kasan_unpoison_memory(ptr, size);
+ kasan_poison_memory((void *)redzone_start, redzone_end - redzone_start,
KASAN_PAGE_REDZONE);
return (void *)ptr;
@@ -528,7 +528,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
kasan_report_invalid_free(ptr, ip);
return;
}
- kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
+ kasan_poison_memory(ptr, page_size(page), KASAN_FREE_PAGE);
} else {
__kasan_slab_free(page->slab_cache, ptr, ip, false);
}
@@ -714,7 +714,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
* // vmalloc() allocates memory
* // let a = area->addr
* // we reach kasan_populate_vmalloc
- * // and call kasan_unpoison_shadow:
+ * // and call kasan_unpoison_memory:
* STORE shadow(a), unpoison_val
* ...
* STORE shadow(a+99), unpoison_val x = LOAD p
@@ -749,7 +749,7 @@ void kasan_poison_vmalloc(const void *start, unsigned long size)
return;
size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
- kasan_poison_shadow(start, size, KASAN_VMALLOC_INVALID);
+ kasan_poison_memory(start, size, KASAN_VMALLOC_INVALID);
}
void kasan_unpoison_vmalloc(const void *start, unsigned long size)
@@ -757,7 +757,7 @@ void kasan_unpoison_vmalloc(const void *start, unsigned long size)
if (!is_vmalloc_or_module_addr(start))
return;
- kasan_unpoison_shadow(start, size);
+ kasan_unpoison_memory(start, size);
}
static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 248264b9cb76..4b5f905198d8 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -207,9 +207,9 @@ static void register_global(struct kasan_global *global)
{
size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
- kasan_unpoison_shadow(global->beg, global->size);
+ kasan_unpoison_memory(global->beg, global->size);
- kasan_poison_shadow(global->beg + aligned_size,
+ kasan_poison_memory(global->beg + aligned_size,
global->size_with_redzone - aligned_size,
KASAN_GLOBAL_REDZONE);
}
@@ -290,11 +290,11 @@ void __asan_alloca_poison(unsigned long addr, size_t size)
WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
- kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
+ kasan_unpoison_memory((const void *)(addr + rounded_down_size),
size - rounded_down_size);
- kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
+ kasan_poison_memory(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
KASAN_ALLOCA_LEFT);
- kasan_poison_shadow(right_redzone,
+ kasan_poison_memory(right_redzone,
padding_size + KASAN_ALLOCA_REDZONE_SIZE,
KASAN_ALLOCA_RIGHT);
}
@@ -306,7 +306,7 @@ void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
if (unlikely(!stack_top || stack_top > stack_bottom))
return;
- kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
+ kasan_unpoison_memory(stack_top, stack_bottom - stack_top);
}
EXPORT_SYMBOL(__asan_allocas_unpoison);
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index ac499456740f..03450d3b31f7 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -150,7 +150,7 @@ static inline bool addr_has_shadow(const void *addr)
return (addr >= kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
}
-void kasan_poison_shadow(const void *address, size_t size, u8 value);
+void kasan_poison_memory(const void *address, size_t size, u8 value);
/**
* check_memory_region - Check memory region, and report if invalid access.
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
index e02a36a51f42..4d5a1fe8251f 100644
--- a/mm/kasan/tags.c
+++ b/mm/kasan/tags.c
@@ -158,7 +158,7 @@ EXPORT_SYMBOL(__hwasan_storeN_noabort);
void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size)
{
- kasan_poison_shadow((void *)addr, size, tag);
+ kasan_poison_memory((void *)addr, size, tag);
}
EXPORT_SYMBOL(__hwasan_tag_memory);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 37d48a56431d..24bbf3704a51 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1772,7 +1772,7 @@ size_t ksize(const void *objp)
* We assume that ksize callers could use whole allocated area,
* so we need to unpoison this area.
*/
- kasan_unpoison_shadow(objp, size);
+ kasan_unpoison_memory(objp, size);
return size;
}
EXPORT_SYMBOL(ksize);
--
2.28.0.220.ged08abb693-goog
Powered by blists - more mailing lists