lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230609174422.04824a9e@gandalf.local.home>
Date:   Fri, 9 Jun 2023 17:44:22 -0400
From:   Steven Rostedt <rostedt@...dmis.org>
To:     LKML <linux-kernel@...r.kernel.org>,
        Linux Trace Kernel <linux-trace-kernel@...r.kernel.org>
Cc:     Masami Hiramatsu <mhiramat@...nel.org>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        Peter Zijlstra <peterz@...radead.org>,
        Mark Rutland <mark.rutland@....com>,
        Kees Cook <keescook@...omium.org>,
        Miguel Ojeda <ojeda@...nel.org>,
        Nick Desaulniers <ndesaulniers@...gle.com>,
        Ingo Molnar <mingo@...nel.org>, Song Liu <song@...nel.org>,
        Josh Poimboeuf <jpoimboe@...nel.org>
Subject: [PATCH v2] ftrace: Allow inline functions not inlined to be traced

From: "Steven Rostedt (Google)" <rostedt@...dmis.org>

Over 10 years ago there were many bugs that caused function tracing to
crash because some inlined function was not inlined and should not have
been traced. This made it hard to debug because when the developer tried
to reproduce it, if their compiler still inlined the function, the bug
would not trigger. The solution back then was simply to add "notrace" to
"inline" which would make sure all functions that are marked inline are
never traced even when the compiler decides to not inline them.

A lot has changed over the last 10 years.

1) ftrace_test_recursion_trylock() is now used by all ftrace hooks which
   will prevent the recursive crashes from happening that was caused by
   inlined functions being traced.

2) noinstr is now used to mark pretty much all functions that would also
   cause problems if they are traced.

Today, it is no longer a problem if an inlined function is not inlined and
is traced, at least on x86. Removing notrace from inline has been requested
several times over the years. I believe it is now safe to do so.

Currently only x86 uses this.

Acked-by: Song Liu <song@...nel.org>
Acked-by: Josh Poimboeuf <jpoimboe@...nel.org>
Signed-off-by: Steven Rostedt (Google) <rostedt@...dmis.org>
---
Changes since v1: https://lore.kernel.org/linux-trace-kernel/20230502164102.1a51cdb4@gandalf.local.home

 - have it opted in by architecture. Currently only x86 adds it. (Mark Rutland)

 arch/x86/Kconfig               |  1 +
 include/linux/compiler_types.h | 16 +++++++++++++---
 kernel/trace/Kconfig           |  7 +++++++
 3 files changed, 21 insertions(+), 3 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index da5c081d64a5..1ddebf832534 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -61,6 +61,7 @@ config X86
 	select ACPI_LEGACY_TABLES_LOOKUP	if ACPI
 	select ACPI_SYSTEM_POWER_STATES_SUPPORT	if ACPI
 	select ARCH_32BIT_OFF_T			if X86_32
+	select ARCH_CAN_TRACE_INLINE
 	select ARCH_CLOCKSOURCE_INIT
 	select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
 	select ARCH_ENABLE_HUGEPAGE_MIGRATION if X86_64 && HUGETLB_PAGE && MIGRATION
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 547ea1ff806e..f827e2a98500 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -169,6 +169,16 @@ struct ftrace_likely_data {
 #define notrace			__attribute__((__no_instrument_function__))
 #endif
 
+/*
+ * If all inline code not marked as __always_inline is safe to trace,
+ * then allow the architecture to do so.
+ */
+#ifdef CONFIG_ARCH_CAN_TRACE_INLINE
+#define __notrace_inline
+#else
+#define __notrace_inline	notrace
+#endif
+
 /*
  * it doesn't make sense on ARM (currently the only user of __naked)
  * to trace naked functions because then mcount is called without
@@ -184,7 +194,7 @@ struct ftrace_likely_data {
  * of extern inline functions at link time.
  * A lot of inline functions can cause havoc with function tracing.
  */
-#define inline inline __gnu_inline __inline_maybe_unused notrace
+#define inline inline __gnu_inline __inline_maybe_unused __notrace_inline
 
 /*
  * gcc provides both __inline__ and __inline as alternate spellings of
@@ -230,7 +240,7 @@ struct ftrace_likely_data {
  *     https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
  */
-# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
+# define __no_kasan_or_inline __no_sanitize_address __notrace_inline __maybe_unused
 # define __no_sanitize_or_inline __no_kasan_or_inline
 #else
 # define __no_kasan_or_inline __always_inline
@@ -247,7 +257,7 @@ struct ftrace_likely_data {
  * disable all instrumentation. See Kconfig.kcsan where this is mandatory.
  */
 # define __no_kcsan __no_sanitize_thread __disable_sanitizer_instrumentation
-# define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused
+# define __no_sanitize_or_inline __no_kcsan __notrace_inline __maybe_unused
 #else
 # define __no_kcsan
 #endif
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index abe5c583bd59..b66ab0e6ce19 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -106,6 +106,13 @@ config HAVE_BUILDTIME_MCOUNT_SORT
          An architecture selects this if it sorts the mcount_loc section
 	 at build time.
 
+config ARCH_CAN_TRACE_INLINE
+       bool
+       help
+         It is safe for an architecture to trace any function marked
+	 as inline (not __always_inline) that the compiler decides to
+	 not inline.
+
 config BUILDTIME_MCOUNT_SORT
        bool
        default y
-- 
2.39.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ