[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CACT4Y+Za7vRTQ6M6kKs-+4N4+D6q05OKf422LZCMBBy-k4Cqqw@mail.gmail.com>
Date: Wed, 9 Jul 2025 17:05:44 +0200
From: Dmitry Vyukov <dvyukov@...gle.com>
To: Alexander Potapenko <glider@...gle.com>
Cc: quic_jiangenj@...cinc.com, linux-kernel@...r.kernel.org,
kasan-dev@...glegroups.com, Aleksandr Nogikh <nogikh@...gle.com>,
Andrey Konovalov <andreyknvl@...il.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, Ingo Molnar <mingo@...hat.com>,
Josh Poimboeuf <jpoimboe@...nel.org>, Marco Elver <elver@...gle.com>,
Peter Zijlstra <peterz@...radead.org>, Thomas Gleixner <tglx@...utronix.de>
Subject: Re: [PATCH v2 07/11] kcov: add trace and trace_size to struct kcov_state
On Thu, 26 Jun 2025 at 15:42, Alexander Potapenko <glider@...gle.com> wrote:
>
> Keep kcov_state.area as the pointer to the memory buffer used by
> kcov and shared with the userspace. Store the pointer to the trace
> (part of the buffer holding sequential events) separately, as we will
> be splitting that buffer in multiple parts.
> No functional changes so far.
>
> Signed-off-by: Alexander Potapenko <glider@...gle.com>
>
> ---
> Change-Id: I50b5589ef0e0b6726aa0579334093c648f76790a
>
> v2:
> - Address comments by Dmitry Vyukov:
> - tweak commit description
> - Address comments by Marco Elver:
> - rename sanitizer_cov_write_subsequent() to kcov_append_to_buffer()
> - Update code to match the new description of struct kcov_state
> ---
> include/linux/kcov_types.h | 9 ++++++-
> kernel/kcov.c | 54 ++++++++++++++++++++++----------------
> 2 files changed, 39 insertions(+), 24 deletions(-)
>
> diff --git a/include/linux/kcov_types.h b/include/linux/kcov_types.h
> index 53b25b6f0addd..233e7a682654b 100644
> --- a/include/linux/kcov_types.h
> +++ b/include/linux/kcov_types.h
> @@ -7,9 +7,16 @@
> struct kcov_state {
> /* Size of the area (in long's). */
> unsigned int size;
> + /*
> + * Pointer to user-provided memory used by kcov. This memory may
s/kcov/KCOV/ for consistency
> + * contain multiple buffers.
> + */
> + void *area;
>
> + /* Size of the trace (in long's). */
> + unsigned int trace_size;
> /* Buffer for coverage collection, shared with the userspace. */
> - void *area;
> + unsigned long *trace;
>
> /*
> * KCOV sequence number: incremented each time kcov is reenabled, used
> diff --git a/kernel/kcov.c b/kernel/kcov.c
> index 8e98ca8d52743..038261145cf93 100644
> --- a/kernel/kcov.c
> +++ b/kernel/kcov.c
> @@ -195,11 +195,11 @@ static notrace unsigned long canonicalize_ip(unsigned long ip)
> return ip;
> }
>
> -static notrace void kcov_append_to_buffer(unsigned long *area, int size,
> +static notrace void kcov_append_to_buffer(unsigned long *trace, int size,
> unsigned long ip)
> {
> /* The first 64-bit word is the number of subsequent PCs. */
> - unsigned long pos = READ_ONCE(area[0]) + 1;
> + unsigned long pos = READ_ONCE(trace[0]) + 1;
>
> if (likely(pos < size)) {
> /*
> @@ -209,9 +209,9 @@ static notrace void kcov_append_to_buffer(unsigned long *area, int size,
> * overitten by the recursive __sanitizer_cov_trace_pc().
> * Update pos before writing pc to avoid such interleaving.
> */
> - WRITE_ONCE(area[0], pos);
> + WRITE_ONCE(trace[0], pos);
> barrier();
> - area[pos] = ip;
> + trace[pos] = ip;
> }
> }
>
> @@ -225,8 +225,8 @@ void notrace __sanitizer_cov_trace_pc_guard(u32 *guard)
> if (!check_kcov_mode(KCOV_MODE_TRACE_PC, current))
> return;
>
> - kcov_append_to_buffer(current->kcov_state.area,
> - current->kcov_state.size,
> + kcov_append_to_buffer(current->kcov_state.trace,
> + current->kcov_state.trace_size,
> canonicalize_ip(_RET_IP_));
> }
> EXPORT_SYMBOL(__sanitizer_cov_trace_pc_guard);
> @@ -242,8 +242,8 @@ void notrace __sanitizer_cov_trace_pc(void)
> if (!check_kcov_mode(KCOV_MODE_TRACE_PC, current))
> return;
>
> - kcov_append_to_buffer(current->kcov_state.area,
> - current->kcov_state.size,
> + kcov_append_to_buffer(current->kcov_state.trace,
> + current->kcov_state.trace_size,
> canonicalize_ip(_RET_IP_));
> }
> EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
> @@ -252,9 +252,9 @@ EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
> #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
> static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
> {
> - struct task_struct *t;
> - u64 *area;
> u64 count, start_index, end_pos, max_pos;
> + struct task_struct *t;
> + u64 *trace;
>
> t = current;
> if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
> @@ -266,22 +266,22 @@ static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
> * We write all comparison arguments and types as u64.
> * The buffer was allocated for t->kcov_state.size unsigned longs.
> */
> - area = (u64 *)t->kcov_state.area;
> + trace = (u64 *)t->kcov_state.trace;
> max_pos = t->kcov_state.size * sizeof(unsigned long);
>
> - count = READ_ONCE(area[0]);
> + count = READ_ONCE(trace[0]);
>
> /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
> start_index = 1 + count * KCOV_WORDS_PER_CMP;
> end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
> if (likely(end_pos <= max_pos)) {
> /* See comment in kcov_append_to_buffer(). */
> - WRITE_ONCE(area[0], count + 1);
> + WRITE_ONCE(trace[0], count + 1);
> barrier();
> - area[start_index] = type;
> - area[start_index + 1] = arg1;
> - area[start_index + 2] = arg2;
> - area[start_index + 3] = ip;
> + trace[start_index] = type;
> + trace[start_index + 1] = arg1;
> + trace[start_index + 2] = arg2;
> + trace[start_index + 3] = ip;
> }
> }
>
> @@ -382,11 +382,13 @@ static void kcov_start(struct task_struct *t, struct kcov *kcov,
>
> static void kcov_stop(struct task_struct *t)
> {
> + int saved_sequence = t->kcov_state.sequence;
> +
> WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
> barrier();
> t->kcov = NULL;
> - t->kcov_state.size = 0;
> - t->kcov_state.area = NULL;
> + t->kcov_state = (typeof(t->kcov_state)){ 0 };
In a previous patch you used the following syntax, let's stick to one
of these forms:
data->saved_state = (struct kcov_state){};
> + t->kcov_state.sequence = saved_sequence;
> }
>
> static void kcov_task_reset(struct task_struct *t)
> @@ -736,6 +738,8 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
> }
> kcov->state.area = area;
> kcov->state.size = size;
> + kcov->state.trace = area;
> + kcov->state.trace_size = size;
> kcov->mode = KCOV_MODE_INIT;
> spin_unlock_irqrestore(&kcov->lock, flags);
> return 0;
> @@ -928,10 +932,12 @@ void kcov_remote_start(u64 handle)
> local_lock_irqsave(&kcov_percpu_data.lock, flags);
> }
>
> - /* Reset coverage size. */
> - *(u64 *)area = 0;
> state.area = area;
> state.size = size;
> + state.trace = area;
> + state.trace_size = size;
> + /* Reset coverage size. */
> + state.trace[0] = 0;
>
> if (in_serving_softirq()) {
> kcov_remote_softirq_start(t);
> @@ -1004,8 +1010,8 @@ void kcov_remote_stop(void)
> struct task_struct *t = current;
> struct kcov *kcov;
> unsigned int mode;
> - void *area;
> - unsigned int size;
> + void *area, *trace;
> + unsigned int size, trace_size;
> int sequence;
> unsigned long flags;
>
> @@ -1037,6 +1043,8 @@ void kcov_remote_stop(void)
> kcov = t->kcov;
> area = t->kcov_state.area;
> size = t->kcov_state.size;
> + trace = t->kcov_state.trace;
> + trace_size = t->kcov_state.trace_size;
> sequence = t->kcov_state.sequence;
>
> kcov_stop(t);
> --
> 2.50.0.727.gbf7dc18ff4-goog
>
Powered by blists - more mailing lists