[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200604095057.259452-1-elver@google.com>
Date: Thu, 4 Jun 2020 11:50:57 +0200
From: Marco Elver <elver@...gle.com>
To: elver@...gle.com
Cc: peterz@...radead.org, bp@...en8.de, tglx@...utronix.de,
mingo@...nel.org, clang-built-linux@...glegroups.com,
paulmck@...nel.org, dvyukov@...gle.com, glider@...gle.com,
andreyknvl@...gle.com, kasan-dev@...glegroups.com,
linux-kernel@...r.kernel.org
Subject: [PATCH -tip] kcov: Make runtime functions noinstr-compatible
The KCOV runtime is very minimal, only updating a field in 'current',
and none of __sanitizer_cov-functions generates reports nor calls any
other external functions.
Therefore we can make the KCOV runtime noinstr-compatible by:
1. always-inlining internal functions and marking
__sanitizer_cov-functions noinstr. The function write_comp_data() is
now guaranteed to be inlined into __sanitize_cov_trace_*cmp()
functions, which saves a call in the fast-path and reduces stack
pressure due to the first argument being a constant.
2. For Clang, correctly pass -fno-stack-protector via a separate
cc-option, as -fno-conserve-stack does not exist on Clang.
The major benefit compared to adding another attribute to 'noinstr' to
not collect coverage information, is that we retain coverage visibility
in noinstr functions. We also currently lack such an attribute in both
GCC and Clang.
Signed-off-by: Marco Elver <elver@...gle.com>
---
Note: There are a set of KCOV patches from Andrey in -next:
https://lkml.kernel.org/r/cover.1585233617.git.andreyknvl@google.com --
Git cleanly merges this patch with those patches, and no merge conflict
is expected.
---
kernel/Makefile | 2 +-
kernel/kcov.c | 26 +++++++++++++-------------
2 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/kernel/Makefile b/kernel/Makefile
index 5d935b63f812..8e282c611a72 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -35,7 +35,7 @@ KCOV_INSTRUMENT_stacktrace.o := n
KCOV_INSTRUMENT_kcov.o := n
KASAN_SANITIZE_kcov.o := n
KCSAN_SANITIZE_kcov.o := n
-CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
+CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack) $(call cc-option, -fno-stack-protector)
# cond_syscall is currently not LTO compatible
CFLAGS_sys_ni.o = $(DISABLE_LTO)
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 8accc9722a81..d6e3be2d0570 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -142,7 +142,7 @@ static void kcov_remote_area_put(struct kcov_remote_area *area,
list_add(&area->list, &kcov_remote_areas);
}
-static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
+static __always_inline bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
{
unsigned int mode;
@@ -164,7 +164,7 @@ static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_stru
return mode == needed_mode;
}
-static notrace unsigned long canonicalize_ip(unsigned long ip)
+static __always_inline unsigned long canonicalize_ip(unsigned long ip)
{
#ifdef CONFIG_RANDOMIZE_BASE
ip -= kaslr_offset();
@@ -176,7 +176,7 @@ static notrace unsigned long canonicalize_ip(unsigned long ip)
* Entry point from instrumented code.
* This is called once per basic-block/edge.
*/
-void notrace __sanitizer_cov_trace_pc(void)
+void noinstr __sanitizer_cov_trace_pc(void)
{
struct task_struct *t;
unsigned long *area;
@@ -198,7 +198,7 @@ void notrace __sanitizer_cov_trace_pc(void)
EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
-static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
+static __always_inline void write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
{
struct task_struct *t;
u64 *area;
@@ -231,59 +231,59 @@ static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
}
}
-void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
+void noinstr __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
{
write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
}
EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
-void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
+void noinstr __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
{
write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
}
EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
-void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
+void noinstr __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
{
write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
}
EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
-void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
+void noinstr __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
{
write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
}
EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
-void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
+void noinstr __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
{
write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
_RET_IP_);
}
EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
-void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
+void noinstr __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
{
write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
_RET_IP_);
}
EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
-void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
+void noinstr __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
{
write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
_RET_IP_);
}
EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
-void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
+void noinstr __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
{
write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
_RET_IP_);
}
EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
-void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
+void noinstr __sanitizer_cov_trace_switch(u64 val, u64 *cases)
{
u64 i;
u64 count = cases[0];
--
2.27.0.rc2.251.g90737beb825-goog
Powered by blists - more mailing lists