[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190718174110.4635-7-namit@vmware.com>
Date: Thu, 18 Jul 2019 10:41:09 -0700
From: Nadav Amit <namit@...are.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Andy Lutomirski <luto@...nel.org>, x86@...nel.org,
linux-kernel@...r.kernel.org,
Dave Hansen <dave.hansen@...ux.intel.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Nadav Amit <namit@...are.com>
Subject: [RFC 6/7] x86/percpu: Optimized arch_raw_cpu_ptr()
Implementing arch_raw_cpu_ptr() in C, allows the compiler to perform
better optimizations, such as setting an appropriate base to compute
the address instead of an add instruction.
The benefit of this computation is relevant only when using compiler
segment qualifiers. It is inapplicable to use this method when the
address size is greater than the maximum operand size, as it is when
building vdso32.
Distinguish between the two cases in which preemption is disabled (as
happens when this_cpu_ptr() is used) and enabled (when raw_cpu_ptr() is
used).
This allows optimizations, for instance in rcu_dynticks_eqs_exit(),
the following code:
mov $0x2bbc0,%rax
add %gs:0x7ef07570(%rip),%rax # 0x10358 <this_cpu_off>
lock xadd %edx,0xd8(%rax)
Turns with this patch into:
mov %gs:0x7ef08aa5(%rip),%rax # 0x10358 <this_cpu_off>
lock xadd %edx,0x2bc58(%rax)
Signed-off-by: Nadav Amit <namit@...are.com>
---
arch/x86/include/asm/percpu.h | 25 +++++++++++++++++++++++--
1 file changed, 23 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 13987f9bc82f..8bac7db397cc 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -73,20 +73,41 @@
#endif /* USE_X86_SEG_SUPPORT */
#define __my_cpu_offset this_cpu_read(this_cpu_off)
+#define __raw_my_cpu_offset __this_cpu_read(this_cpu_off)
+#define __my_cpu_ptr(ptr) (__my_cpu_type(*ptr) *)(uintptr_t)(ptr)
+#if USE_X86_SEG_SUPPORT && (!defined(BUILD_VDSO32) || defined(CONFIG_X86_64))
+/*
+ * Efficient implementation for cases in which the compiler supports C segments.
+ * Allows the compiler to perform additional optimizations that can save more
+ * instructions.
+ *
+ * This optimized version can only be used if the pointer size equals to native
+ * operand size, which does not happen when vdso32 is used.
+ */
+#define __arch_raw_cpu_ptr_qual(qual, ptr) \
+({ \
+ (qual typeof(*(ptr)) __kernel __force *)((uintptr_t)(ptr) + \
+ __my_cpu_offset); \
+})
+#else /* USE_X86_SEG_SUPPORT && (!defined(BUILD_VDSO32) || defined(CONFIG_X86_64)) */
/*
* Compared to the generic __my_cpu_offset version, the following
* saves one instruction and avoids clobbering a temp register.
*/
-#define arch_raw_cpu_ptr(ptr) \
+#define __arch_raw_cpu_ptr_qual(qual, ptr) \
({ \
unsigned long tcp_ptr__; \
- asm volatile("add " __percpu_arg(1) ", %0" \
+ asm qual ("add " __percpu_arg(1) ", %0" \
: "=r" (tcp_ptr__) \
: "m" (__my_cpu_var(this_cpu_off)),\
"0" (ptr)); \
(typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
})
+#endif /* USE_X86_SEG_SUPPORT && (!defined(BUILD_VDSO32) || defined(CONFIG_X86_64)) */
+
+#define arch_raw_cpu_ptr(ptr) __arch_raw_cpu_ptr_qual(volatile, ptr)
+#define arch_raw_cpu_ptr_preemption_disabled(ptr) __arch_raw_cpu_ptr_qual( , ptr)
#else /* CONFIG_SMP */
#define __percpu_seg_override
#define __percpu_prefix ""
--
2.17.1
Powered by blists - more mailing lists