[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230804181443.GJ214207@hirez.programming.kicks-ass.net>
Date: Fri, 4 Aug 2023 20:14:43 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Marco Elver <elver@...gle.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Kees Cook <keescook@...omium.org>,
Guenter Roeck <linux@...ck-us.net>,
Mark Rutland <mark.rutland@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Marc Zyngier <maz@...nel.org>,
Oliver Upton <oliver.upton@...ux.dev>,
James Morse <james.morse@....com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Zenghui Yu <yuzenghui@...wei.com>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
Nathan Chancellor <nathan@...nel.org>,
Nick Desaulniers <ndesaulniers@...gle.com>,
Tom Rix <trix@...hat.com>, Miguel Ojeda <ojeda@...nel.org>,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev,
linux-kernel@...r.kernel.org, llvm@...ts.linux.dev,
Dmitry Vyukov <dvyukov@...gle.com>,
Alexander Potapenko <glider@...gle.com>,
kasan-dev@...glegroups.com, linux-toolchains@...r.kernel.org
Subject: Re: [PATCH v2 1/3] compiler_types: Introduce the Clang
__preserve_most function attribute
On Fri, Aug 04, 2023 at 11:02:56AM +0200, Marco Elver wrote:
> diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
> index 547ea1ff806e..12c4540335b7 100644
> --- a/include/linux/compiler_types.h
> +++ b/include/linux/compiler_types.h
> @@ -106,6 +106,33 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
> #define __cold
> #endif
>
> +/*
> + * On x86-64 and arm64 targets, __preserve_most changes the calling convention
> + * of a function to make the code in the caller as unintrusive as possible. This
> + * convention behaves identically to the C calling convention on how arguments
> + * and return values are passed, but uses a different set of caller- and callee-
> + * saved registers.
> + *
> + * The purpose is to alleviates the burden of saving and recovering a large
> + * register set before and after the call in the caller. This is beneficial for
> + * rarely taken slow paths, such as error-reporting functions that may be called
> + * from hot paths.
> + *
> + * Note: This may conflict with instrumentation inserted on function entry which
> + * does not use __preserve_most or equivalent convention (if in assembly). Since
> + * function tracing assumes the normal C calling convention, where the attribute
> + * is supported, __preserve_most implies notrace.
> + *
> + * Optional: not supported by gcc.
> + *
> + * clang: https://clang.llvm.org/docs/AttributeReference.html#preserve-most
> + */
> +#if __has_attribute(__preserve_most__)
> +# define __preserve_most notrace __attribute__((__preserve_most__))
> +#else
> +# define __preserve_most
> +#endif
This seems to shrink the ARM64 vmlinux just a little and mirrors what we
do on x86 in asm. I'll leave it to the arm64 people to judge if this is
worth the hassle.
Index: linux-2.6/arch/arm64/include/asm/preempt.h
===================================================================
--- linux-2.6.orig/arch/arm64/include/asm/preempt.h
+++ linux-2.6/arch/arm64/include/asm/preempt.h
@@ -88,15 +88,18 @@ void preempt_schedule_notrace(void);
#ifdef CONFIG_PREEMPT_DYNAMIC
DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
-void dynamic_preempt_schedule(void);
+void __preserve_most dynamic_preempt_schedule(void);
#define __preempt_schedule() dynamic_preempt_schedule()
-void dynamic_preempt_schedule_notrace(void);
+void __preserve_most dynamic_preempt_schedule_notrace(void);
#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
#else /* CONFIG_PREEMPT_DYNAMIC */
-#define __preempt_schedule() preempt_schedule()
-#define __preempt_schedule_notrace() preempt_schedule_notrace()
+void __preserve_most preserve_preempt_schedule(void);
+void __preserve_most preserve_preempt_schedule_notrace(void);
+
+#define __preempt_schedule() preserve_preempt_schedule()
+#define __preempt_schedule_notrace() preserve_preempt_schedule_notrace()
#endif /* CONFIG_PREEMPT_DYNAMIC */
#endif /* CONFIG_PREEMPTION */
Index: linux-2.6/kernel/sched/core.c
===================================================================
--- linux-2.6.orig/kernel/sched/core.c
+++ linux-2.6/kernel/sched/core.c
@@ -6915,7 +6915,7 @@ DEFINE_STATIC_CALL(preempt_schedule, pre
EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
-void __sched notrace dynamic_preempt_schedule(void)
+void __sched __preserve_most dynamic_preempt_schedule(void)
{
if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
return;
@@ -6924,6 +6924,11 @@ void __sched notrace dynamic_preempt_sch
NOKPROBE_SYMBOL(dynamic_preempt_schedule);
EXPORT_SYMBOL(dynamic_preempt_schedule);
#endif
+#else
+void __sched __preserve_most preserve_preempt_schedule(void)
+{
+ preempt_schedule();
+}
#endif
/**
@@ -6988,7 +6993,7 @@ DEFINE_STATIC_CALL(preempt_schedule_notr
EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
-void __sched notrace dynamic_preempt_schedule_notrace(void)
+void __sched __preserve_most dynamic_preempt_schedule_notrace(void)
{
if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
return;
@@ -6997,6 +7002,11 @@ void __sched notrace dynamic_preempt_sch
NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
#endif
+#else
+void __sched __preserve_most preserve_preempt_schedule_notrace(void)
+{
+ preempt_schedule_notrace();
+}
#endif
#endif /* CONFIG_PREEMPTION */
Powered by blists - more mailing lists