[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1522150907-33547-1-git-send-email-maninder1.s@samsung.com>
Date: Tue, 27 Mar 2018 17:11:47 +0530
From: Maninder Singh <maninder1.s@...sung.com>
To: aryabinin@...tuozzo.com, glider@...gle.com, dvyukov@...gle.com,
kstewart@...uxfoundation.org, tglx@...utronix.de,
pombredanne@...b.com, gregkh@...uxfoundation.org,
akpm@...ux-foundation.org, vbabka@...e.cz, sfr@...b.auug.org.au,
mhocko@...e.com, vinmenon@...eaurora.org, gomonovych@...il.com,
ayush.m@...sung.com
Cc: linux-kernel@...r.kernel.org, kasan-dev@...glegroups.com,
linux-mm@...ck.org, a.sahrawat@...sung.com, pankaj.m@...sung.com,
v.narang@...sung.com, Maninder Singh <maninder1.s@...sung.com>
Subject: [PATCH v3] mm/page_owner: ignore everything below the IRQ entry
point
Check whether the allocation happens in an IRQ handler.
This lets us strip everything below the IRQ entry point to reduce the
number of unique stack traces needed to be stored.
so moved code of KASAN in generic file so that page_owner can also
do same filteration.
Initial KASAN commit
id=be7635e7287e0e8013af3c89a6354a9e0182594c
Original:-
__alloc_pages_nodemask+0xfc/0x220
page_frag_alloc+0x84/0x140
__napi_alloc_skb+0x83/0xe0
rtl8169_poll+0x1e5/0x670
net_rx_action+0x132/0x3a0
__do_softirq+0xce/0x298
irq_exit+0xa3/0xb0
do_IRQ+0x72/0xc0
ret_from_intr+0x0/0x18
cpuidle_enter_state+0x96/0x290
do_idle+0x163/0x1a0
After patch:-
__alloc_pages_nodemask+0xfc/0x220
page_frag_alloc+0x84/0x140
__napi_alloc_skb+0x83/0xe0
rtl8169_poll+0x1e5/0x670
net_rx_action+0x132/0x3a0
__do_softirq+0xce/0x298
Signed-off-by: Vaneet Narang <v.narang@...sung.com>
Signed-off-by: Maninder Singh <maninder1.s@...sung.com>
---
v1->v2: fix build break for tile and blackfin
(https://lkml.org/lkml/2017/12/3/287, verified for blackfin)
v2->v3: remove inline as functions are larger,
Declarations for __irqentry_text_start and friends are redundant.
include/linux/stacktrace.h | 3 +++
kernel/stacktrace.c | 22 ++++++++++++++++++++++
mm/kasan/kasan.c | 22 ----------------------
mm/page_owner.c | 1 +
4 files changed, 26 insertions(+), 22 deletions(-)
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index ba29a06..1a37fcae 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -26,6 +26,8 @@ extern int save_stack_trace_tsk_reliable(struct task_struct *tsk,
extern int snprint_stack_trace(char *buf, size_t size,
struct stack_trace *trace, int spaces);
+void filter_irq_stacks(struct stack_trace *trace);
+
#ifdef CONFIG_USER_STACKTRACE_SUPPORT
extern void save_stack_trace_user(struct stack_trace *trace);
#else
@@ -38,6 +40,7 @@ extern int snprint_stack_trace(char *buf, size_t size,
# define save_stack_trace_user(trace) do { } while (0)
# define print_stack_trace(trace, spaces) do { } while (0)
# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0)
+# define filter_irq_stacks(trace) do { } while (0)
# define save_stack_trace_tsk_reliable(tsk, trace) ({ -ENOSYS; })
#endif /* CONFIG_STACKTRACE */
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
index f8edee9..b28c4e1 100644
--- a/kernel/stacktrace.c
+++ b/kernel/stacktrace.c
@@ -77,3 +77,25 @@ int snprint_stack_trace(char *buf, size_t size,
WARN_ONCE(1, KERN_INFO "save_stack_tsk_reliable() not implemented yet.\n");
return -ENOSYS;
}
+
+static bool in_irqentry_text(unsigned long ptr)
+{
+ return (ptr >= (unsigned long)&__irqentry_text_start &&
+ ptr < (unsigned long)&__irqentry_text_end) ||
+ (ptr >= (unsigned long)&__softirqentry_text_start &&
+ ptr < (unsigned long)&__softirqentry_text_end);
+}
+
+void filter_irq_stacks(struct stack_trace *trace)
+{
+ int i;
+
+ if (!trace->nr_entries)
+ return;
+ for (i = 0; i < trace->nr_entries; i++)
+ if (in_irqentry_text(trace->entries[i])) {
+ /* Include the irqentry function into the stack. */
+ trace->nr_entries = i + 1;
+ break;
+ }
+}
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 405bba4..129e7b8 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -412,28 +412,6 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
KASAN_KMALLOC_REDZONE);
}
-static inline int in_irqentry_text(unsigned long ptr)
-{
- return (ptr >= (unsigned long)&__irqentry_text_start &&
- ptr < (unsigned long)&__irqentry_text_end) ||
- (ptr >= (unsigned long)&__softirqentry_text_start &&
- ptr < (unsigned long)&__softirqentry_text_end);
-}
-
-static inline void filter_irq_stacks(struct stack_trace *trace)
-{
- int i;
-
- if (!trace->nr_entries)
- return;
- for (i = 0; i < trace->nr_entries; i++)
- if (in_irqentry_text(trace->entries[i])) {
- /* Include the irqentry function into the stack. */
- trace->nr_entries = i + 1;
- break;
- }
-}
-
static inline depot_stack_handle_t save_stack(gfp_t flags)
{
unsigned long entries[KASAN_STACK_DEPTH];
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 8602fb4..30e9cb2 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -148,6 +148,7 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
depot_stack_handle_t handle;
save_stack_trace(&trace);
+ filter_irq_stacks(&trace);
if (trace.nr_entries != 0 &&
trace.entries[trace.nr_entries-1] == ULONG_MAX)
trace.nr_entries--;
--
1.9.1
Powered by blists - more mailing lists