[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20211208183906.468934357@infradead.org>
Date: Wed, 08 Dec 2021 19:36:57 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: will@...nel.org, boqun.feng@...il.com
Cc: linux-kernel@...r.kernel.org, x86@...nel.org, peterz@...radead.org,
mark.rutland@....com, elver@...gle.com, keescook@...omium.org,
hch@...radead.org, torvalds@...ux-foundation.org
Subject: [RFC][PATCH 2/5] refcount: Use atomic_*_ofl()
Use the shiny new atomic_*_ofl() functions in order to have better
code-gen.
Notably refcount_inc() case no longer distinguishes between
inc-from-zero and inc-negative in the fast path, this improves
code-gen:
4b88: b8 01 00 00 00 mov $0x1,%eax
4b8d: f0 0f c1 43 28 lock xadd %eax,0x28(%rbx)
4b92: 85 c0 test %eax,%eax
4b94: 74 1b je 4bb1 <alloc_perf_context+0xf1>
4b96: 8d 50 01 lea 0x1(%rax),%edx
4b99: 09 c2 or %eax,%edx
4b9b: 78 20 js 4bbd <alloc_perf_context+0xfd>
to:
4768: b8 01 00 00 00 mov $0x1,%eax
476d: f0 0f c1 43 28 lock xadd %eax,0x28(%rbx)
4772: 85 c0 test %eax,%eax
4774: 7e 14 jle 478a <alloc_perf_context+0xea>
without sacrificing on functionality; the only thing that suffers is
the reported error condition, which might now 'spuriously' report
'saturated' instead of 'inc-from-zero'.
refcount_dec_and_test() is also improved:
aa40: b8 ff ff ff ff mov $0xffffffff,%eax
aa45: f0 0f c1 07 lock xadd %eax,(%rdi)
aa49: 83 f8 01 cmp $0x1,%eax
aa4c: 74 05 je aa53 <ring_buffer_put+0x13>
aa4e: 85 c0 test %eax,%eax
aa50: 7e 1e jle aa70 <ring_buffer_put+0x30>
aa52: c3 ret
to:
a980: b8 ff ff ff ff mov $0xffffffff,%eax
a985: f0 0f c1 07 lock xadd %eax,(%rdi)
a989: 83 e8 01 sub $0x1,%eax
a98c: 78 20 js a9ae <ring_buffer_put+0x2e>
a98e: 74 01 je a991 <ring_buffer_put+0x11>
a990: c3 ret
XXX atomic_dec_and_test_ofl() is strictly stronger ordered than
refcount_dec_and_test() is defined -- Power and Arrghh64 ?
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
include/linux/refcount.h | 15 ++++++++++++---
lib/refcount.c | 5 +++++
2 files changed, 17 insertions(+), 3 deletions(-)
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -264,7 +264,10 @@ static inline void __refcount_inc(refcou
*/
static inline void refcount_inc(refcount_t *r)
{
- __refcount_inc(r, NULL);
+ atomic_inc_ofl(&r->refs, Eoverflow);
+ return;
+Eoverflow:
+ refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
}
static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
@@ -330,7 +333,10 @@ static inline __must_check bool __refcou
*/
static inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
- return __refcount_dec_and_test(r, NULL);
+ return atomic_dec_and_test_ofl(&r->refs, Eoverflow);
+Eoverflow:
+ refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
+ return false;
}
static inline void __refcount_dec(refcount_t *r, int *oldp)
@@ -356,7 +362,10 @@ static inline void __refcount_dec(refcou
*/
static inline void refcount_dec(refcount_t *r)
{
- __refcount_dec(r, NULL);
+ atomic_dec_ofl(&r->refs, Eoverflow);
+ return;
+Eoverflow:
+ refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
}
extern __must_check bool refcount_dec_if_one(refcount_t *r);
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -12,8 +12,13 @@
void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t)
{
+ int old = refcount_read(r);
refcount_set(r, REFCOUNT_SATURATED);
+ /* racy; who cares */
+ if (old == 1 && t == REFCOUNT_ADD_OVF)
+ t = REFCOUNT_ADD_UAF;
+
switch (t) {
case REFCOUNT_ADD_NOT_ZERO_OVF:
REFCOUNT_WARN("saturated; leaking memory");
Powered by blists - more mailing lists