[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CANpmjNO4nXfqFYcS3xBREZ3TCTe_feOsBFqQ46YJUjSvLWUqGQ@mail.gmail.com>
Date: Thu, 21 Apr 2022 10:46:37 +0200
From: Marco Elver <elver@...gle.com>
To: Max Filippov <jcmvbkbc@...il.com>
Cc: linux-xtensa@...ux-xtensa.org, Chris Zankel <chris@...kel.net>,
linux-kernel@...r.kernel.org, Dmitry Vyukov <dvyukov@...gle.com>,
kasan-dev@...glegroups.com
Subject: Re: [PATCH v2] xtensa: enable KCSAN
On Wed, 20 Apr 2022 at 21:08, Max Filippov <jcmvbkbc@...il.com> wrote:
>
> Prefix arch-specific barrier macros with '__' to make use of instrumented
> generic macros.
> Prefix arch-specific bitops with 'arch_' to make use of instrumented
> generic functions.
> Provide stubs for 64-bit atomics when building with KCSAN.
> Disable KCSAN instrumentation in arch/xtensa/boot.
>
> Signed-off-by: Max Filippov <jcmvbkbc@...il.com>
Acked-by: Marco Elver <elver@...gle.com>
> ---
> Changes v1->v2:
>
> - fix __wmb definition to use __mb instead of mb
> - provide __smp_{,r,w}mb definitions because definitions from the
> asm-generic use mb/rmb/wmb instead of __mb/__rmb/__wmb, thus
> doubling KCSAN instrumentation.
>
> Both changes fix a few failures in the KCSAN testsuite.
>
> arch/xtensa/Kconfig | 1 +
> arch/xtensa/boot/lib/Makefile | 1 +
> arch/xtensa/include/asm/barrier.h | 12 +++++--
> arch/xtensa/include/asm/bitops.h | 10 +++---
> arch/xtensa/lib/Makefile | 2 ++
> arch/xtensa/lib/kcsan-stubs.c | 54 +++++++++++++++++++++++++++++++
> 6 files changed, 73 insertions(+), 7 deletions(-)
> create mode 100644 arch/xtensa/lib/kcsan-stubs.c
>
> diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
> index 797355c142b3..c87f5ab493d9 100644
> --- a/arch/xtensa/Kconfig
> +++ b/arch/xtensa/Kconfig
> @@ -29,6 +29,7 @@ config XTENSA
> select HAVE_ARCH_AUDITSYSCALL
> select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
> select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
> + select HAVE_ARCH_KCSAN
> select HAVE_ARCH_SECCOMP_FILTER
> select HAVE_ARCH_TRACEHOOK
> select HAVE_CONTEXT_TRACKING
> diff --git a/arch/xtensa/boot/lib/Makefile b/arch/xtensa/boot/lib/Makefile
> index e3d717c7bfa1..162d10af36f3 100644
> --- a/arch/xtensa/boot/lib/Makefile
> +++ b/arch/xtensa/boot/lib/Makefile
> @@ -16,6 +16,7 @@ CFLAGS_REMOVE_inffast.o = -pg
> endif
>
> KASAN_SANITIZE := n
> +KCSAN_SANITIZE := n
>
> CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong
> CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong
> diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
> index d6f8d4ddc2bc..898ea397e9bc 100644
> --- a/arch/xtensa/include/asm/barrier.h
> +++ b/arch/xtensa/include/asm/barrier.h
> @@ -11,9 +11,15 @@
>
> #include <asm/core.h>
>
> -#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
> -#define rmb() barrier()
> -#define wmb() mb()
> +#define __mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
> +#define __rmb() barrier()
> +#define __wmb() __mb()
> +
> +#ifdef CONFIG_SMP
> +#define __smp_mb() __mb()
> +#define __smp_rmb() __rmb()
> +#define __smp_wmb() __wmb()
> +#endif
>
> #if XCHAL_HAVE_S32C1I
> #define __smp_mb__before_atomic() barrier()
> diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
> index cd225896c40f..e02ec5833389 100644
> --- a/arch/xtensa/include/asm/bitops.h
> +++ b/arch/xtensa/include/asm/bitops.h
> @@ -99,7 +99,7 @@ static inline unsigned long __fls(unsigned long word)
> #if XCHAL_HAVE_EXCLUSIVE
>
> #define BIT_OP(op, insn, inv) \
> -static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
> +static inline void arch_##op##_bit(unsigned int bit, volatile unsigned long *p)\
> { \
> unsigned long tmp; \
> unsigned long mask = 1UL << (bit & 31); \
> @@ -119,7 +119,7 @@ static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
>
> #define TEST_AND_BIT_OP(op, insn, inv) \
> static inline int \
> -test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
> +arch_test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
> { \
> unsigned long tmp, value; \
> unsigned long mask = 1UL << (bit & 31); \
> @@ -142,7 +142,7 @@ test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
> #elif XCHAL_HAVE_S32C1I
>
> #define BIT_OP(op, insn, inv) \
> -static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
> +static inline void arch_##op##_bit(unsigned int bit, volatile unsigned long *p)\
> { \
> unsigned long tmp, value; \
> unsigned long mask = 1UL << (bit & 31); \
> @@ -163,7 +163,7 @@ static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
>
> #define TEST_AND_BIT_OP(op, insn, inv) \
> static inline int \
> -test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
> +arch_test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
> { \
> unsigned long tmp, value; \
> unsigned long mask = 1UL << (bit & 31); \
> @@ -205,6 +205,8 @@ BIT_OPS(change, "xor", )
> #undef BIT_OP
> #undef TEST_AND_BIT_OP
>
> +#include <asm-generic/bitops/instrumented-atomic.h>
> +
> #include <asm-generic/bitops/le.h>
>
> #include <asm-generic/bitops/ext2-atomic-setbit.h>
> diff --git a/arch/xtensa/lib/Makefile b/arch/xtensa/lib/Makefile
> index 5848c133f7ea..d4e9c397e3fd 100644
> --- a/arch/xtensa/lib/Makefile
> +++ b/arch/xtensa/lib/Makefile
> @@ -8,3 +8,5 @@ lib-y += memcopy.o memset.o checksum.o \
> divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o \
> usercopy.o strncpy_user.o strnlen_user.o
> lib-$(CONFIG_PCI) += pci-auto.o
> +lib-$(CONFIG_KCSAN) += kcsan-stubs.o
> +KCSAN_SANITIZE_kcsan-stubs.o := n
> diff --git a/arch/xtensa/lib/kcsan-stubs.c b/arch/xtensa/lib/kcsan-stubs.c
> new file mode 100644
> index 000000000000..2b08faa62b86
> --- /dev/null
> +++ b/arch/xtensa/lib/kcsan-stubs.c
> @@ -0,0 +1,54 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +#include <linux/bug.h>
> +#include <linux/types.h>
> +
> +void __atomic_store_8(volatile void *p, u64 v, int i)
> +{
> + BUG();
> +}
> +
> +u64 __atomic_load_8(const volatile void *p, int i)
> +{
> + BUG();
> +}
> +
> +u64 __atomic_exchange_8(volatile void *p, u64 v, int i)
> +{
> + BUG();
> +}
> +
> +bool __atomic_compare_exchange_8(volatile void *p1, void *p2, u64 v, bool b, int i1, int i2)
> +{
> + BUG();
> +}
> +
> +u64 __atomic_fetch_add_8(volatile void *p, u64 v, int i)
> +{
> + BUG();
> +}
> +
> +u64 __atomic_fetch_sub_8(volatile void *p, u64 v, int i)
> +{
> + BUG();
> +}
> +
> +u64 __atomic_fetch_and_8(volatile void *p, u64 v, int i)
> +{
> + BUG();
> +}
> +
> +u64 __atomic_fetch_or_8(volatile void *p, u64 v, int i)
> +{
> + BUG();
> +}
> +
> +u64 __atomic_fetch_xor_8(volatile void *p, u64 v, int i)
> +{
> + BUG();
> +}
> +
> +u64 __atomic_fetch_nand_8(volatile void *p, u64 v, int i)
> +{
> + BUG();
> +}
> --
> 2.30.2
>
Powered by blists - more mailing lists