[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1436812263-15243-1-git-send-email-dvlasenk@redhat.com>
Date: Mon, 13 Jul 2015 20:31:03 +0200
From: Denys Vlasenko <dvlasenk@...hat.com>
To: Ingo Molnar <mingo@...nel.org>
Cc: Denys Vlasenko <dvlasenk@...hat.com>, Thomas Graf <tgraf@...g.ch>,
Bart Van Assche <bvanassche@....org>,
Peter Zijlstra <peterz@...radead.org>,
David Rientjes <rientjes@...gle.com>,
Andrew Morton <akpm@...ux-foundation.org>,
"Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
linux-kernel@...r.kernel.org
Subject: [PATCH] force inlining of spinlock ops
With both gcc 4.7.2 and 4.9.2, sometimes gcc mysteriously doesn't inline
very small functions we expect to be inlined. See
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66122
In particular,
with this config: http://busybox.net/~vda/kernel_config
there are more than a thousand copies of tiny spinlock-related functions:
$ nm --size-sort vmlinux | grep -iF ' t ' | uniq -c | grep -v '^ *1 ' | sort -rn | grep ' spin'
473 000000000000000b t spin_unlock_irqrestore
292 000000000000000b t spin_unlock
215 000000000000000b t spin_lock
134 000000000000000b t spin_unlock_irq
130 000000000000000b t spin_unlock_bh
120 000000000000000b t spin_lock_irq
106 000000000000000b t spin_lock_bh
Disassembly:
ffffffff81004720 <spin_lock>:
ffffffff81004720: 55 push %rbp
ffffffff81004721: 48 89 e5 mov %rsp,%rbp
ffffffff81004724: e8 f8 4e e2 02 callq <_raw_spin_lock>
ffffffff81004729: 5d pop %rbp
ffffffff8100472a: c3 retq
This patch fixes this via s/inline/__always_inline/ in spinlock.h.
This decreases vmlinux by about 40k:
text data bss dec hex filename
82375570 22255544 20627456 125258570 7774b4a vmlinux.before
82335059 22255416 20627456 125217931 776ac8b vmlinux
Signed-off-by: Denys Vlasenko <dvlasenk@...hat.com>
Cc: Ingo Molnar <mingo@...nel.org>
Cc: Thomas Graf <tgraf@...g.ch>
Cc: Bart Van Assche <bvanassche@....org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: David Rientjes <rientjes@...gle.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Paul E. McKenney <paulmck@...ux.vnet.ibm.com>
Cc: linux-kernel@...r.kernel.org
---
include/linux/spinlock.h | 30 +++++++++++++++---------------
1 file changed, 15 insertions(+), 15 deletions(-)
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 3e18379..073925d 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -296,7 +296,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
-static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
+static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
{
return &lock->rlock;
}
@@ -307,17 +307,17 @@ do { \
raw_spin_lock_init(&(_lock)->rlock); \
} while (0)
-static inline void spin_lock(spinlock_t *lock)
+static __always_inline void spin_lock(spinlock_t *lock)
{
raw_spin_lock(&lock->rlock);
}
-static inline void spin_lock_bh(spinlock_t *lock)
+static __always_inline void spin_lock_bh(spinlock_t *lock)
{
raw_spin_lock_bh(&lock->rlock);
}
-static inline int spin_trylock(spinlock_t *lock)
+static __always_inline int spin_trylock(spinlock_t *lock)
{
return raw_spin_trylock(&lock->rlock);
}
@@ -337,7 +337,7 @@ do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
} while (0)
-static inline void spin_lock_irq(spinlock_t *lock)
+static __always_inline void spin_lock_irq(spinlock_t *lock)
{
raw_spin_lock_irq(&lock->rlock);
}
@@ -352,32 +352,32 @@ do { \
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
} while (0)
-static inline void spin_unlock(spinlock_t *lock)
+static __always_inline void spin_unlock(spinlock_t *lock)
{
raw_spin_unlock(&lock->rlock);
}
-static inline void spin_unlock_bh(spinlock_t *lock)
+static __always_inline void spin_unlock_bh(spinlock_t *lock)
{
raw_spin_unlock_bh(&lock->rlock);
}
-static inline void spin_unlock_irq(spinlock_t *lock)
+static __always_inline void spin_unlock_irq(spinlock_t *lock)
{
raw_spin_unlock_irq(&lock->rlock);
}
-static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
raw_spin_unlock_irqrestore(&lock->rlock, flags);
}
-static inline int spin_trylock_bh(spinlock_t *lock)
+static __always_inline int spin_trylock_bh(spinlock_t *lock)
{
return raw_spin_trylock_bh(&lock->rlock);
}
-static inline int spin_trylock_irq(spinlock_t *lock)
+static __always_inline int spin_trylock_irq(spinlock_t *lock)
{
return raw_spin_trylock_irq(&lock->rlock);
}
@@ -387,22 +387,22 @@ static inline int spin_trylock_irq(spinlock_t *lock)
raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
-static inline void spin_unlock_wait(spinlock_t *lock)
+static __always_inline void spin_unlock_wait(spinlock_t *lock)
{
raw_spin_unlock_wait(&lock->rlock);
}
-static inline int spin_is_locked(spinlock_t *lock)
+static __always_inline int spin_is_locked(spinlock_t *lock)
{
return raw_spin_is_locked(&lock->rlock);
}
-static inline int spin_is_contended(spinlock_t *lock)
+static __always_inline int spin_is_contended(spinlock_t *lock)
{
return raw_spin_is_contended(&lock->rlock);
}
-static inline int spin_can_lock(spinlock_t *lock)
+static __always_inline int spin_can_lock(spinlock_t *lock)
{
return raw_spin_can_lock(&lock->rlock);
}
--
1.8.1.4
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists