[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1512362600-40838-1-git-send-email-maninder1.s@samsung.com>
Date: Mon, 4 Dec 2017 10:13:20 +0530
From: Maninder Singh <maninder1.s@...sung.com>
To: aryabinin@...tuozzo.com, glider@...gle.com, vyukov@...gle.com,
mbenes@...e.cz, tglx@...utronix.de, pombredanne@...b.com,
mingo@...nel.org, gregkh@...uxfoundation.org, jpoimboe@...hat.com,
akpm@...ux-foundation.org, vbabka@...e.cz, sfr@...b.auug.org.au,
mhocko@...e.com
Cc: linux-kernel@...r.kernel.org, kasan-dev@...glegroups.com,
linux-mm@...ck.org, a.sahrawat@...sung.com, pankaj.m@...sung.com,
Maninder Singh <maninder1.s@...sung.com>,
Vaneet Narang <v.narang@...sung.com>
Subject: [PATCH 1/1] mm/page_owner: ignore everything below the IRQ entry
point
Check whether the allocation happens in an IRQ handler.
This lets us strip everything below the IRQ entry point to reduce the
number of unique stack traces needed to be stored.
so moved code of KASAN in generic file so that page_owner can also
do same filteration.
Initial KASAN commit
id=be7635e7287e0e8013af3c89a6354a9e0182594c
Signed-off-by: Vaneet Narang <v.narang@...sung.com>
Signed-off-by: Maninder Singh <maninder1.s@...sung.com>
---
include/linux/stacktrace.h | 25 +++++++++++++++++++++++++
mm/kasan/kasan.c | 22 ----------------------
mm/page_owner.c | 1 +
3 files changed, 26 insertions(+), 22 deletions(-)
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index ba29a06..2c1a562 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -3,6 +3,7 @@
#define __LINUX_STACKTRACE_H
#include <linux/types.h>
+#include <asm-generic/sections.h>
struct task_struct;
struct pt_regs;
@@ -26,6 +27,28 @@ extern int save_stack_trace_tsk_reliable(struct task_struct *tsk,
extern int snprint_stack_trace(char *buf, size_t size,
struct stack_trace *trace, int spaces);
+static inline int in_irqentry_text(unsigned long ptr)
+{
+ return (ptr >= (unsigned long)&__irqentry_text_start &&
+ ptr < (unsigned long)&__irqentry_text_end) ||
+ (ptr >= (unsigned long)&__softirqentry_text_start &&
+ ptr < (unsigned long)&__softirqentry_text_end);
+}
+
+static inline void filter_irq_stacks(struct stack_trace *trace)
+{
+ int i;
+
+ if (!trace->nr_entries)
+ return;
+ for (i = 0; i < trace->nr_entries; i++)
+ if (in_irqentry_text(trace->entries[i])) {
+ /* Include the irqentry function into the stack. */
+ trace->nr_entries = i + 1;
+ break;
+ }
+}
+
#ifdef CONFIG_USER_STACKTRACE_SUPPORT
extern void save_stack_trace_user(struct stack_trace *trace);
#else
@@ -38,6 +61,8 @@ extern int snprint_stack_trace(char *buf, size_t size,
# define save_stack_trace_user(trace) do { } while (0)
# define print_stack_trace(trace, spaces) do { } while (0)
# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0)
+# define filter_irq_stacks(trace) do { } while (0)
+# define in_irqentry_text(ptr) do { } while (0)
# define save_stack_trace_tsk_reliable(tsk, trace) ({ -ENOSYS; })
#endif /* CONFIG_STACKTRACE */
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 405bba4..129e7b8 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -412,28 +412,6 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
KASAN_KMALLOC_REDZONE);
}
-static inline int in_irqentry_text(unsigned long ptr)
-{
- return (ptr >= (unsigned long)&__irqentry_text_start &&
- ptr < (unsigned long)&__irqentry_text_end) ||
- (ptr >= (unsigned long)&__softirqentry_text_start &&
- ptr < (unsigned long)&__softirqentry_text_end);
-}
-
-static inline void filter_irq_stacks(struct stack_trace *trace)
-{
- int i;
-
- if (!trace->nr_entries)
- return;
- for (i = 0; i < trace->nr_entries; i++)
- if (in_irqentry_text(trace->entries[i])) {
- /* Include the irqentry function into the stack. */
- trace->nr_entries = i + 1;
- break;
- }
-}
-
static inline depot_stack_handle_t save_stack(gfp_t flags)
{
unsigned long entries[KASAN_STACK_DEPTH];
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 8602fb4..30e9cb2 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -148,6 +148,7 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
depot_stack_handle_t handle;
save_stack_trace(&trace);
+ filter_irq_stacks(&trace);
if (trace.nr_entries != 0 &&
trace.entries[trace.nr_entries-1] == ULONG_MAX)
trace.nr_entries--;
--
1.9.1
Powered by blists - more mailing lists