[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241104114506.GC24862@noisy.programming.kicks-ass.net>
Date: Mon, 4 Nov 2024 12:45:06 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: Vlastimil Babka <vbabka@...e.cz>
Cc: syzbot <syzbot+39f85d612b7c20d8db48@...kaller.appspotmail.com>,
Liam.Howlett@...cle.com, akpm@...ux-foundation.org,
jannh@...gle.com, linux-kernel@...r.kernel.org, linux-mm@...ck.org,
lorenzo.stoakes@...cle.com, syzkaller-bugs@...glegroups.com,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Marco Elver <elver@...gle.com>,
Andrey Konovalov <andreyknvl@...il.com>,
kasan-dev <kasan-dev@...glegroups.com>,
Andrey Ryabinin <ryabinin.a.a@...il.com>,
Alexander Potapenko <glider@...gle.com>,
Waiman Long <longman@...hat.com>, dvyukov@...gle.com,
vincenzo.frascino@....com, paulmck@...nel.org, frederic@...nel.org,
neeraj.upadhyay@...nel.org, joel@...lfernandes.org,
josh@...htriplett.org, boqun.feng@...il.com, urezki@...il.com,
rostedt@...dmis.org, mathieu.desnoyers@...icios.com,
jiangshanlai@...il.com, qiang.zhang1211@...il.com, mingo@...hat.com,
peterz@...radead.org, juri.lelli@...hat.com,
vincent.guittot@...aro.org, dietmar.eggemann@....com,
bsegall@...gle.com, mgorman@...e.de, vschneid@...hat.com,
tj@...nel.org, cl@...ux.com, penberg@...nel.org,
rientjes@...gle.com, iamjoonsoo.kim@....com, vbabka@...e.cz,
roman.gushchin@...ux.dev, 42.hyeyoo@...il.com, rcu@...r.kernel.org
Subject: Re: [syzbot] [mm?] WARNING: locking bug in __rmqueue_pcplist
On Mon, Nov 04, 2024 at 12:25:03PM +0100, Vlastimil Babka wrote:
> On 11/4/24 12:11, Vlastimil Babka wrote:
> >> __alloc_pages_noprof+0x292/0x710 mm/page_alloc.c:4771
> >> alloc_pages_mpol_noprof+0x3e8/0x680 mm/mempolicy.c:2265
> >> stack_depot_save_flags+0x666/0x830 lib/stackdepot.c:627
> >> kasan_save_stack+0x4f/0x60 mm/kasan/common.c:48
> >> __kasan_record_aux_stack+0xac/0xc0 mm/kasan/generic.c:544
> >> task_work_add+0xd9/0x490 kernel/task_work.c:77
> >
> > It seems the decision if stack depot is allowed to allocate here depends on
> > TWAF_NO_ALLOC added only recently. So does it mean it doesn't work as intended?
>
> I guess __run_posix_cpu_timers() needs to pass TWAF_NO_ALLOC too?
Yeah, or we just accept that kasan_record_aux_stack() is a horrible
thing and shouldn't live in functions that try their bestest to
locklessly setup async work at all.
That thing has only ever caused trouble :/
Also see 156172a13ff0.
How about we do the below at the very least?
---
include/linux/kasan.h | 2 --
include/linux/task_work.h | 1 -
kernel/irq_work.c | 2 +-
kernel/rcu/tiny.c | 2 +-
kernel/rcu/tree.c | 4 ++--
kernel/sched/core.c | 2 +-
kernel/task_work.c | 12 +-----------
kernel/workqueue.c | 2 +-
mm/kasan/generic.c | 16 +++-------------
mm/slub.c | 2 +-
10 files changed, 11 insertions(+), 34 deletions(-)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 00a3bf7c0d8f..1a623818e8b3 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -488,7 +488,6 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
void kasan_record_aux_stack(void *ptr);
-void kasan_record_aux_stack_noalloc(void *ptr);
#else /* CONFIG_KASAN_GENERIC */
@@ -506,7 +505,6 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
static inline void kasan_record_aux_stack(void *ptr) {}
-static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
#endif /* CONFIG_KASAN_GENERIC */
diff --git a/include/linux/task_work.h b/include/linux/task_work.h
index 2964171856e0..db1690e01346 100644
--- a/include/linux/task_work.h
+++ b/include/linux/task_work.h
@@ -21,7 +21,6 @@ enum task_work_notify_mode {
TWA_NMI_CURRENT,
TWA_FLAGS = 0xff00,
- TWAF_NO_ALLOC = 0x0100,
};
static inline bool task_work_pending(struct task_struct *task)
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 2f4fb336dda1..73f7e1fd4ab4 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -147,7 +147,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
if (!irq_work_claim(work))
return false;
- kasan_record_aux_stack_noalloc(work);
+ kasan_record_aux_stack(work);
preempt_disable();
if (cpu != smp_processor_id()) {
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index b3b3ce34df63..4b3f31911465 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -250,7 +250,7 @@ EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
void kvfree_call_rcu(struct rcu_head *head, void *ptr)
{
if (head)
- kasan_record_aux_stack_noalloc(ptr);
+ kasan_record_aux_stack(ptr);
__kvfree_call_rcu(head, ptr);
}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index b1f883fcd918..7eae9bd818a9 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3083,7 +3083,7 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
}
head->func = func;
head->next = NULL;
- kasan_record_aux_stack_noalloc(head);
+ kasan_record_aux_stack(head);
local_irq_save(flags);
rdp = this_cpu_ptr(&rcu_data);
lazy = lazy_in && !rcu_async_should_hurry();
@@ -3807,7 +3807,7 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
return;
}
- kasan_record_aux_stack_noalloc(ptr);
+ kasan_record_aux_stack(ptr);
success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
if (!success) {
run_page_cache_worker(krcp);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5de31c312189..dafc668a156e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -10519,7 +10519,7 @@ void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
return;
/* No page allocation under rq lock */
- task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
+ task_work_add(curr, work, TWA_RESUME);
}
void sched_mm_cid_exit_signals(struct task_struct *t)
diff --git a/kernel/task_work.c b/kernel/task_work.c
index c969f1f26be5..2ffd5a6db91b 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -64,17 +64,7 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
if (!IS_ENABLED(CONFIG_IRQ_WORK))
return -EINVAL;
} else {
- /*
- * Record the work call stack in order to print it in KASAN
- * reports.
- *
- * Note that stack allocation can fail if TWAF_NO_ALLOC flag
- * is set and new page is needed to expand the stack buffer.
- */
- if (flags & TWAF_NO_ALLOC)
- kasan_record_aux_stack_noalloc(work);
- else
- kasan_record_aux_stack(work);
+ kasan_record_aux_stack(work);
}
head = READ_ONCE(task->task_works);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 9949ffad8df0..65b8314b2d53 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2180,7 +2180,7 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
debug_work_activate(work);
/* record the work call stack in order to print it in KASAN reports */
- kasan_record_aux_stack_noalloc(work);
+ kasan_record_aux_stack(work);
/* we own @work, set data and link */
set_work_pwq(work, pwq, extra_flags);
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 6310a180278b..ac9f6682bb2f 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -521,12 +521,12 @@ size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
sizeof(struct kasan_free_meta) : 0);
}
-static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
+void kasan_record_aux_stack(void *addr)
{
struct slab *slab = kasan_addr_to_slab(addr);
struct kmem_cache *cache;
struct kasan_alloc_meta *alloc_meta;
- void *object;
+ void *object
if (is_kfence_address(addr) || !slab)
return;
@@ -538,17 +538,7 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
return;
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
- alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
-}
-
-void kasan_record_aux_stack(void *addr)
-{
- return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
-}
-
-void kasan_record_aux_stack_noalloc(void *addr)
-{
- return __kasan_record_aux_stack(addr, 0);
+ alloc_meta->aux_stack[0] = kasan_save_stack(0, 0);
}
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
diff --git a/mm/slub.c b/mm/slub.c
index 5b832512044e..b8c4bf3fe0d0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2300,7 +2300,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
* We have to do this manually because the rcu_head is
* not located inside the object.
*/
- kasan_record_aux_stack_noalloc(x);
+ kasan_record_aux_stack(x);
delayed_free->object = x;
call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
Powered by blists - more mailing lists