[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <1320192874.4728.19.camel@edumazet-laptop>
Date: Wed, 02 Nov 2011 01:14:34 +0100
From: Eric Dumazet <eric.dumazet@...il.com>
To: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Andi Kleen <andi@...stfloor.org>,
Ben Hutchings <bhutchings@...arflare.com>,
linux-kernel <linux-kernel@...r.kernel.org>,
netdev <netdev@...r.kernel.org>,
Andrew Morton <akpm@...ux-foundation.org>
Subject: Re: >Re: [RFC] should VM_BUG_ON(cond) really evaluate cond
Le dimanche 30 octobre 2011 à 11:09 -0700, Linus Torvalds a écrit :
> Argh. Ok. Testing a refcount in a const struct doesn't make much
> sense, but there does seem to be perfectly valid uses of it
> (sk_wmem_alloc etc).
>
> Annoying. I guess we have to have those casts. Grr.
>
OK, please check following patch then.
Thanks !
[PATCH v3] atomic: introduce ACCESS_AT_MOST_ONCE() helper
In commit 4e60c86bd9e (gcc-4.6: mm: fix unused but set warnings)
Andi forced VM_BUG_ON(cond) to evaluate cond, even if CONFIG_DEBUG_VM is
not set :
#ifdef CONFIG_DEBUG_VM
#define VM_BUG_ON(cond) BUG_ON(cond)
#else
#define VM_BUG_ON(cond) do { (void)(cond); } while (0)
#endif
As a side effect, get_page()/put_page_testzero() are performing more bus
transactions on contended cache line on some workloads (tcp_sendmsg()
for example, where a page is acting as a shared buffer)
0,05 : ffffffff815e4775: je ffffffff815e4970 <tcp_sendmsg+0xc80>
0,05 : ffffffff815e477b: mov 0x1c(%r9),%eax // useless
3,32 : ffffffff815e477f: mov (%r9),%rax // useless
0,51 : ffffffff815e4782: lock incl 0x1c(%r9)
3,87 : ffffffff815e4787: mov (%r9),%rax
0,00 : ffffffff815e478a: test $0x80,%ah
0,00 : ffffffff815e478d: jne ffffffff815e49f2 <tcp_sendmsg+0xd02>
Thats because both atomic_read() and constant_test_bit() use a volatile
attribute and thus compiler is forced to perform a read, even if the
result is optimized away.
Linus suggested using an asm("") trick and place it in a variant of
ACCESS_ONCE(), allowing compiler to omit reading memory if result is
unused.
This patch introduces ACCESS_AT_MOST_ONCE() helper and use it in the x86
implementation of atomic_read() and constant_test_bit()
It's also used on x86_64 atomic64_read() implementation.
on x86_64, we thus reduce vmlinux text a bit (if CONFIG_DEBUG_VM=n)
# size vmlinux.old vmlinux.new
text data bss dec hex filename
10706848 2894216 1540096 15141160 e70928 vmlinux.old
10704040 2894216 1540096 15138352 e6fe30 vmlinux.new
Basically an extension of a prior patch from Linus
Signed-off-by: Eric Dumazet <eric.dumazet@...il.com>
---
arch/x86/include/asm/atomic.h | 5 +----
arch/x86/include/asm/atomic64_64.h | 6 ++----
arch/x86/include/asm/bitops.h | 6 ++++--
include/asm-generic/atomic.h | 2 +-
include/linux/compiler.h | 10 ++++++++++
5 files changed, 18 insertions(+), 11 deletions(-)
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 58cb6d4..2581008 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -20,10 +20,7 @@
*
* Atomically reads the value of @v.
*/
-static inline int atomic_read(const atomic_t *v)
-{
- return (*(volatile int *)&(v)->counter);
-}
+#define atomic_read(v) ACCESS_AT_MOST_ONCE(*(int *)&(v)->counter)
/**
* atomic_set - set atomic variable
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 0e1cbfc..15bbad4 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -1,6 +1,7 @@
#ifndef _ASM_X86_ATOMIC64_64_H
#define _ASM_X86_ATOMIC64_64_H
+#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/alternative.h>
#include <asm/cmpxchg.h>
@@ -16,10 +17,7 @@
* Atomically reads the value of @v.
* Doesn't imply a read memory barrier.
*/
-static inline long atomic64_read(const atomic64_t *v)
-{
- return (*(volatile long *)&(v)->counter);
-}
+#define atomic64_read(v) ACCESS_AT_MOST_ONCE(*(long *)&(v)->counter);
/**
* atomic64_set - set atomic64 variable
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 1775d6e..6dcf4b1 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -308,8 +308,10 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
{
- return ((1UL << (nr % BITS_PER_LONG)) &
- (addr[nr / BITS_PER_LONG])) != 0;
+ unsigned long *word = (unsigned long *)addr + (nr / BITS_PER_LONG);
+ unsigned long bit = 1UL << (nr % BITS_PER_LONG);
+
+ return (bit & ACCESS_AT_MOST_ONCE(*word)) != 0;
}
static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index e37963c..76ab683 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -39,7 +39,7 @@
* Atomically reads the value of @v.
*/
#ifndef atomic_read
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic_read(v) ACCESS_AT_MOST_ONCE(*(int *)&(v)->counter)
#endif
/**
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 320d6c9..307f342 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -308,4 +308,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
*/
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+/*
+ * Like ACCESS_ONCE, but can be optimized away if nothing uses the value,
+ * and/or merged with previous non-ONCE accesses.
+ */
+#define ACCESS_AT_MOST_ONCE(x) \
+ ({ typeof(x) __y; \
+ asm("":"=r" (__y):"0" (x)); \
+ __y; \
+ })
+
#endif /* __LINUX_COMPILER_H */
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists