[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210916003146.3910358-5-paulmck@kernel.org>
Date: Wed, 15 Sep 2021 17:31:42 -0700
From: "Paul E. McKenney" <paulmck@...nel.org>
To: linux-kernel@...r.kernel.org, kasan-dev@...glegroups.com,
kernel-team@...com, mingo@...nel.org
Cc: elver@...gle.com, andreyknvl@...gle.com, glider@...gle.com,
dvyukov@...gle.com, cai@....pw, boqun.feng@...il.com,
"Paul E . McKenney" <paulmck@...nel.org>
Subject: [PATCH kcsan 5/9] kcsan: Save instruction pointer for scoped accesses
From: Marco Elver <elver@...gle.com>
Save the instruction pointer for scoped accesses, so that it becomes
possible for the reporting code to construct more accurate stack traces
that will show the start of the scope.
Signed-off-by: Marco Elver <elver@...gle.com>
Signed-off-by: Paul E. McKenney <paulmck@...nel.org>
---
include/linux/kcsan-checks.h | 3 +++
kernel/kcsan/core.c | 12 +++++++++---
2 files changed, 12 insertions(+), 3 deletions(-)
diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index 9fd0ad80fef6..5f5965246877 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -100,9 +100,12 @@ void kcsan_set_access_mask(unsigned long mask);
/* Scoped access information. */
struct kcsan_scoped_access {
struct list_head list;
+ /* Access information. */
const volatile void *ptr;
size_t size;
int type;
+ /* Location where scoped access was set up. */
+ unsigned long ip;
};
/*
* Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index bffd1d95addb..8b20af541776 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -202,6 +202,9 @@ static __always_inline struct kcsan_ctx *get_ctx(void)
return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
}
+static __always_inline void
+check_access(const volatile void *ptr, size_t size, int type, unsigned long ip);
+
/* Check scoped accesses; never inline because this is a slow-path! */
static noinline void kcsan_check_scoped_accesses(void)
{
@@ -210,8 +213,10 @@ static noinline void kcsan_check_scoped_accesses(void)
struct kcsan_scoped_access *scoped_access;
ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */
- list_for_each_entry(scoped_access, &ctx->scoped_accesses, list)
- __kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type);
+ list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) {
+ check_access(scoped_access->ptr, scoped_access->size,
+ scoped_access->type, scoped_access->ip);
+ }
ctx->scoped_accesses.prev = prev_save;
}
@@ -767,6 +772,7 @@ kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
sa->ptr = ptr;
sa->size = size;
sa->type = type;
+ sa->ip = _RET_IP_;
if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
INIT_LIST_HEAD(&ctx->scoped_accesses);
@@ -798,7 +804,7 @@ void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
ctx->disable_count--;
- __kcsan_check_access(sa->ptr, sa->size, sa->type);
+ check_access(sa->ptr, sa->size, sa->type, sa->ip);
}
EXPORT_SYMBOL(kcsan_end_scoped_access);
--
2.31.1.189.g2e36527f23
Powered by blists - more mailing lists