[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180515141124.84254-6-namit@vmware.com>
Date: Tue, 15 May 2018 07:11:12 -0700
From: Nadav Amit <namit@...are.com>
To: <linux-kernel@...r.kernel.org>
CC: <nadav.amit@...il.com>, Nadav Amit <namit@...are.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>, <x86@...nel.org>,
Kees Cook <keescook@...omium.org>,
Jan Beulich <JBeulich@...e.com>,
Josh Poimboeuf <jpoimboe@...hat.com>
Subject: [RFC 5/8] x86: refcount: prevent gcc distortions
GCC considers the number of statements in inlined assembly blocks,
according to new-lines and semicolons, as an indication to the cost of
the block in time and space. This data is distorted by the kernel code,
which puts information in alternative sections. As a result, the
compiler may perform incorrect inlining and branch optimizations.
The solution is to set an assembly macro and call it from the inlined
assembly block. As a result GCC considers the inline assembly block as
a single instruction.
This patch allows to inline functions such as __get_seccomp_filter().
The effect of the patch is as follows on the kernel size:
text data bss dec hex filename
18146418 10064100 2936832 31147350 1db4556 ./vmlinux before
18148228 10063968 2936832 31149028 1db4be4 ./vmlinux after (+1678)
Static text symbols:
Before: 39673
After: 39649 (-24)
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: x86@...nel.org
Cc: Kees Cook <keescook@...omium.org>
Cc: Jan Beulich <JBeulich@...e.com>
Cc: Josh Poimboeuf <jpoimboe@...hat.com>
Signed-off-by: Nadav Amit <namit@...are.com>
---
arch/x86/include/asm/refcount.h | 55 ++++++++++++++++++++-------------
1 file changed, 33 insertions(+), 22 deletions(-)
diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h
index 4cf11d88d3b3..a668c534206d 100644
--- a/arch/x86/include/asm/refcount.h
+++ b/arch/x86/include/asm/refcount.h
@@ -14,34 +14,43 @@
* central refcount exception. The fixup address for the exception points
* back to the regular execution flow in .text.
*/
-#define _REFCOUNT_EXCEPTION \
- ".pushsection .text..refcount\n" \
- "111:\tlea %[counter], %%" _ASM_CX "\n" \
- "112:\t" ASM_UD2 "\n" \
- ASM_UNREACHABLE \
- ".popsection\n" \
- "113:\n" \
+
+asm ("\n"
+ ".macro __REFCOUNT_EXCEPTION counter:vararg\n\t"
+ ".pushsection .text..refcount\n"
+ "111:\tlea \\counter, %" _ASM_CX "\n"
+ "112:\t" ASM_UD2 "\n\t"
+ ASM_UNREACHABLE
+ ".popsection\n\t"
+ "113:\n"
_ASM_EXTABLE_REFCOUNT(112b, 113b)
+ ".endm");
/* Trigger refcount exception if refcount result is negative. */
-#define REFCOUNT_CHECK_LT_ZERO \
- "js 111f\n\t" \
- _REFCOUNT_EXCEPTION
+asm ("\n"
+ ".macro __REFCOUNT_CHECK_LT_ZERO counter:vararg\n"
+ "js 111f\n\t"
+ "__REFCOUNT_EXCEPTION \\counter\n"
+ ".endm");
/* Trigger refcount exception if refcount result is zero or negative. */
-#define REFCOUNT_CHECK_LE_ZERO \
- "jz 111f\n\t" \
- REFCOUNT_CHECK_LT_ZERO
+asm ("\n"
+ ".macro __REFCOUNT_CHECK_LE_ZERO counter:vararg\n"
+ "jz 111f\n\t"
+ "__REFCOUNT_CHECK_LT_ZERO counter=\\counter\n"
+ ".endm");
/* Trigger refcount exception unconditionally. */
-#define REFCOUNT_ERROR \
- "jmp 111f\n\t" \
- _REFCOUNT_EXCEPTION
+asm ("\n"
+ ".macro __REFCOUNT_ERROR counter:vararg\n\t"
+ "jmp 111f\n\t"
+ "__REFCOUNT_EXCEPTION counter=\\counter\n"
+ ".endm");
static __always_inline void refcount_add(unsigned int i, refcount_t *r)
{
asm volatile(LOCK_PREFIX "addl %1,%0\n\t"
- REFCOUNT_CHECK_LT_ZERO
+ "__REFCOUNT_CHECK_LT_ZERO %[counter]"
: [counter] "+m" (r->refs.counter)
: "ir" (i)
: "cc", "cx");
@@ -50,7 +59,7 @@ static __always_inline void refcount_add(unsigned int i, refcount_t *r)
static __always_inline void refcount_inc(refcount_t *r)
{
asm volatile(LOCK_PREFIX "incl %0\n\t"
- REFCOUNT_CHECK_LT_ZERO
+ "__REFCOUNT_CHECK_LT_ZERO %[counter]"
: [counter] "+m" (r->refs.counter)
: : "cc", "cx");
}
@@ -58,7 +67,7 @@ static __always_inline void refcount_inc(refcount_t *r)
static __always_inline void refcount_dec(refcount_t *r)
{
asm volatile(LOCK_PREFIX "decl %0\n\t"
- REFCOUNT_CHECK_LE_ZERO
+ "__REFCOUNT_CHECK_LE_ZERO %[counter]"
: [counter] "+m" (r->refs.counter)
: : "cc", "cx");
}
@@ -66,13 +75,15 @@ static __always_inline void refcount_dec(refcount_t *r)
static __always_inline __must_check
bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{
- GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO,
+ GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl",
+ "__REFCOUNT_CHECK_LT_ZERO %[counter]",
r->refs.counter, "er", i, "%0", e, "cx");
}
static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
- GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO,
+ GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl",
+ "__REFCOUNT_CHECK_LT_ZERO %[counter]",
r->refs.counter, "%0", e, "cx");
}
@@ -90,7 +101,7 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
/* Did we try to increment from/to an undesirable state? */
if (unlikely(c < 0 || c == INT_MAX || result < c)) {
- asm volatile(REFCOUNT_ERROR
+ asm volatile("__REFCOUNT_ERROR %[counter]"
: : [counter] "m" (r->refs.counter)
: "cc", "cx");
break;
--
2.17.0
Powered by blists - more mailing lists