[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Tue, 12 Dec 2023 19:16:21 +0100
From: Thomas Gleixner <tglx@...utronix.de>
To: Martin Zaharinov <micron10@...il.com>
Cc: peterz@...radead.org, netdev <netdev@...r.kernel.org>, Paolo Abeni
<pabeni@...hat.com>, patchwork-bot+netdevbpf@...nel.org, Jakub Kicinski
<kuba@...nel.org>, Stephen Hemminger <stephen@...workplumber.org>,
kuba+netdrv@...nel.org, dsahern@...il.com, Eric Dumazet
<edumazet@...gle.com>
Subject: Re: Urgent Bug Report Kernel crash 6.5.2
Martin!
On Sat, Dec 09 2023 at 01:01, Martin Zaharinov wrote:
>> On 9 Dec 2023, at 0:20, Thomas Gleixner <tglx@...utronix.de> wrote:
>> That's definitely not a RCU problem. It's a simple refcount fail.
>>
> Is this a problem or only simple fail , and is it possible to catch
> what is a problem and fix this fail.
Underaccounting a reference count is potentially Use After Free.
if (rcuref_put(ref))
call_rcu(ref....);
So after the grace period is over @ref will be freed. Depending on the
timing the context which does the extra put() might already operate on a
freed object.
How to catch that, that's a good question. There is no instrumentation
so far for this. Below is a straight forward trace_printk() based
tracking of rcurefs, which should help to narrow down the context.
Btw, how easy is this to reproduce?
Thanks,
tglx
---
--- a/include/linux/rcuref.h
+++ b/include/linux/rcuref.h
@@ -64,8 +64,10 @@ static inline __must_check bool rcuref_g
* Unconditionally increase the reference count. The saturation and
* dead zones provide enough tolerance for this.
*/
- if (likely(!atomic_add_negative_relaxed(1, &ref->refcnt)))
+ if (likely(!atomic_add_negative_relaxed(1, &ref->refcnt))) {
+ trace_printk("get(FASTPATH): %px\n", ref);
return true;
+ }
/* Handle the cases inside the saturation and dead zones */
return rcuref_get_slowpath(ref);
@@ -84,8 +86,10 @@ static __always_inline __must_check bool
* Unconditionally decrease the reference count. The saturation and
* dead zones provide enough tolerance for this.
*/
- if (likely(!atomic_add_negative_release(-1, &ref->refcnt)))
+ if (likely(!atomic_add_negative_release(-1, &ref->refcnt))) {
+ trace_printk("put(FASTPATH): %px\n", ref);
return false;
+ }
/*
* Handle the last reference drop and cases inside the saturation
--- a/lib/rcuref.c
+++ b/lib/rcuref.c
@@ -200,6 +200,7 @@ bool rcuref_get_slowpath(rcuref_t *ref)
*/
if (cnt >= RCUREF_RELEASED) {
atomic_set(&ref->refcnt, RCUREF_DEAD);
+ trace_printk("get(DEAD): %px %pS\n", ref, __builtin_return_address(0));
return false;
}
@@ -211,8 +212,15 @@ bool rcuref_get_slowpath(rcuref_t *ref)
* object memory, but prevents the obvious reference count overflow
* damage.
*/
- if (WARN_ONCE(cnt > RCUREF_MAXREF, "rcuref saturated - leaking memory"))
+ if (cnt > RCUREF_MAXREF) {
+ trace_printk("get(SATURATED): %px %pS\n", ref, __builtin_return_address(0));
+ WARN_ONCE(1, "rcuref saturated - leaking memory");
atomic_set(&ref->refcnt, RCUREF_SATURATED);
+ } else {
+ trace_printk("get(UNDEFINED): %px %pS\n", ref, __builtin_return_address(0));
+ WARN_ON_ONCE(1);
+ }
+
return true;
}
EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
@@ -248,9 +256,12 @@ bool rcuref_put_slowpath(rcuref_t *ref)
* require a retry. If this fails the caller is not
* allowed to deconstruct the object.
*/
- if (!atomic_try_cmpxchg_release(&ref->refcnt, &cnt, RCUREF_DEAD))
+ if (!atomic_try_cmpxchg_release(&ref->refcnt, &cnt, RCUREF_DEAD)) {
+ trace_printk("put(NOTDEAD): %px %pS\n", ref, __builtin_return_address(0));
return false;
+ }
+ trace_printk("put(NOWDEAD): %px %pS\n", ref, __builtin_return_address(0));
/*
* The caller can safely schedule the object for
* deconstruction. Provide acquire ordering.
@@ -264,7 +275,9 @@ bool rcuref_put_slowpath(rcuref_t *ref)
* put() operation is imbalanced. Warn, put the reference count back to
* DEAD and tell the caller to not deconstruct the object.
*/
- if (WARN_ONCE(cnt >= RCUREF_RELEASED, "rcuref - imbalanced put()")) {
+ if (cnt >= RCUREF_RELEASED) {
+ trace_printk("put(WASDEAD): %px %pS\n", ref, __builtin_return_address(0));
+ WARN_ONCE(1, "rcuref - imbalanced put()");
atomic_set(&ref->refcnt, RCUREF_DEAD);
return false;
}
@@ -274,8 +287,13 @@ bool rcuref_put_slowpath(rcuref_t *ref)
* mean saturation value and tell the caller to not deconstruct the
* object.
*/
- if (cnt > RCUREF_MAXREF)
+ if (cnt > RCUREF_MAXREF) {
+ trace_printk("put(SATURATED): %px %pS\n", ref, __builtin_return_address(0));
atomic_set(&ref->refcnt, RCUREF_SATURATED);
+ } else {
+ trace_printk("put(UNDEFINED): %px %pS\n", ref, __builtin_return_address(0));
+ WARN_ON_ONCE(1);
+ }
return false;
}
EXPORT_SYMBOL_GPL(rcuref_put_slowpath);
Powered by blists - more mailing lists