[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CACT4Y+Ymd=7zQ-AYhEx93DpBZ89jVbdUM0pbN+2vPaiwKg-sdA@mail.gmail.com>
Date: Tue, 29 Jul 2025 13:11:58 +0200
From: Dmitry Vyukov <dvyukov@...gle.com>
To: Alexander Potapenko <glider@...gle.com>
Cc: quic_jiangenj@...cinc.com, linux-kernel@...r.kernel.org,
kasan-dev@...glegroups.com, Aleksandr Nogikh <nogikh@...gle.com>,
Andrey Konovalov <andreyknvl@...il.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, Ingo Molnar <mingo@...hat.com>,
Josh Poimboeuf <jpoimboe@...nel.org>, Marco Elver <elver@...gle.com>,
Peter Zijlstra <peterz@...radead.org>, Thomas Gleixner <tglx@...utronix.de>
Subject: Re: [PATCH v3 06/10] kcov: add trace and trace_size to struct kcov_state
On Mon, 28 Jul 2025 at 17:26, Alexander Potapenko <glider@...gle.com> wrote:
>
> Keep kcov_state.area as the pointer to the memory buffer used by
> kcov and shared with the userspace. Store the pointer to the trace
> (part of the buffer holding sequential events) separately, as we will
> be splitting that buffer in multiple parts.
> No functional changes so far.
>
> Signed-off-by: Alexander Potapenko <glider@...gle.com>
Reviewed-by: Dmitry Vyukov <dvyukov@...gle.com>
> ---
> v3:
> - Fix a warning detected by the kernel test robot <lkp@...el.com>
> - Address comments by Dmitry Vyukov:
> - s/kcov/KCOV/
> - fix struct initialization style
>
> v2:
> - Address comments by Dmitry Vyukov:
> - tweak commit description
> - Address comments by Marco Elver:
> - rename sanitizer_cov_write_subsequent() to kcov_append_to_buffer()
> - Update code to match the new description of struct kcov_state
>
> Change-Id: I50b5589ef0e0b6726aa0579334093c648f76790a
> ---
> include/linux/kcov_types.h | 9 ++++++-
> kernel/kcov.c | 48 +++++++++++++++++++++-----------------
> 2 files changed, 35 insertions(+), 22 deletions(-)
>
> diff --git a/include/linux/kcov_types.h b/include/linux/kcov_types.h
> index 53b25b6f0addd..9d38a2020b099 100644
> --- a/include/linux/kcov_types.h
> +++ b/include/linux/kcov_types.h
> @@ -7,9 +7,16 @@
> struct kcov_state {
> /* Size of the area (in long's). */
> unsigned int size;
> + /*
> + * Pointer to user-provided memory used by KCOV. This memory may
> + * contain multiple buffers.
> + */
> + void *area;
>
> + /* Size of the trace (in long's). */
> + unsigned int trace_size;
> /* Buffer for coverage collection, shared with the userspace. */
> - void *area;
> + unsigned long *trace;
>
> /*
> * KCOV sequence number: incremented each time kcov is reenabled, used
> diff --git a/kernel/kcov.c b/kernel/kcov.c
> index 8154ac1c1622e..2005fc7f578ee 100644
> --- a/kernel/kcov.c
> +++ b/kernel/kcov.c
> @@ -194,11 +194,11 @@ static notrace unsigned long canonicalize_ip(unsigned long ip)
> return ip;
> }
>
> -static notrace void kcov_append_to_buffer(unsigned long *area, int size,
> +static notrace void kcov_append_to_buffer(unsigned long *trace, int size,
> unsigned long ip)
> {
> /* The first 64-bit word is the number of subsequent PCs. */
> - unsigned long pos = READ_ONCE(area[0]) + 1;
> + unsigned long pos = READ_ONCE(trace[0]) + 1;
>
> if (likely(pos < size)) {
> /*
> @@ -208,9 +208,9 @@ static notrace void kcov_append_to_buffer(unsigned long *area, int size,
> * overitten by the recursive __sanitizer_cov_trace_pc().
> * Update pos before writing pc to avoid such interleaving.
> */
> - WRITE_ONCE(area[0], pos);
> + WRITE_ONCE(trace[0], pos);
> barrier();
> - area[pos] = ip;
> + trace[pos] = ip;
> }
> }
>
> @@ -224,8 +224,8 @@ void notrace __sanitizer_cov_trace_pc_guard(u32 *guard)
> if (!check_kcov_mode(KCOV_MODE_TRACE_PC, current))
> return;
>
> - kcov_append_to_buffer(current->kcov_state.area,
> - current->kcov_state.size,
> + kcov_append_to_buffer(current->kcov_state.trace,
> + current->kcov_state.trace_size,
> canonicalize_ip(_RET_IP_));
> }
> EXPORT_SYMBOL(__sanitizer_cov_trace_pc_guard);
> @@ -241,8 +241,8 @@ void notrace __sanitizer_cov_trace_pc(void)
> if (!check_kcov_mode(KCOV_MODE_TRACE_PC, current))
> return;
>
> - kcov_append_to_buffer(current->kcov_state.area,
> - current->kcov_state.size,
> + kcov_append_to_buffer(current->kcov_state.trace,
> + current->kcov_state.trace_size,
> canonicalize_ip(_RET_IP_));
> }
> EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
> @@ -251,9 +251,9 @@ EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
> #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
> static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
> {
> - struct task_struct *t;
> - u64 *area;
> u64 count, start_index, end_pos, max_pos;
> + struct task_struct *t;
> + u64 *trace;
>
> t = current;
> if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
> @@ -265,22 +265,22 @@ static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
> * We write all comparison arguments and types as u64.
> * The buffer was allocated for t->kcov_state.size unsigned longs.
> */
> - area = (u64 *)t->kcov_state.area;
> + trace = (u64 *)t->kcov_state.trace;
> max_pos = t->kcov_state.size * sizeof(unsigned long);
>
> - count = READ_ONCE(area[0]);
> + count = READ_ONCE(trace[0]);
>
> /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
> start_index = 1 + count * KCOV_WORDS_PER_CMP;
> end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
> if (likely(end_pos <= max_pos)) {
> /* See comment in kcov_append_to_buffer(). */
> - WRITE_ONCE(area[0], count + 1);
> + WRITE_ONCE(trace[0], count + 1);
> barrier();
> - area[start_index] = type;
> - area[start_index + 1] = arg1;
> - area[start_index + 2] = arg2;
> - area[start_index + 3] = ip;
> + trace[start_index] = type;
> + trace[start_index + 1] = arg1;
> + trace[start_index + 2] = arg2;
> + trace[start_index + 3] = ip;
> }
> }
>
> @@ -381,11 +381,13 @@ static void kcov_start(struct task_struct *t, struct kcov *kcov,
>
> static void kcov_stop(struct task_struct *t)
> {
> + int saved_sequence = t->kcov_state.sequence;
> +
> WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
> barrier();
> t->kcov = NULL;
> - t->kcov_state.size = 0;
> - t->kcov_state.area = NULL;
> + t->kcov_state = (typeof(t->kcov_state)){};
> + t->kcov_state.sequence = saved_sequence;
> }
>
> static void kcov_task_reset(struct task_struct *t)
> @@ -734,6 +736,8 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
> }
> kcov->state.area = area;
> kcov->state.size = size;
> + kcov->state.trace = area;
> + kcov->state.trace_size = size;
> kcov->mode = KCOV_MODE_INIT;
> spin_unlock_irqrestore(&kcov->lock, flags);
> return 0;
> @@ -925,10 +929,12 @@ void kcov_remote_start(u64 handle)
> local_lock_irqsave(&kcov_percpu_data.lock, flags);
> }
>
> - /* Reset coverage size. */
> - *(u64 *)area = 0;
> state.area = area;
> state.size = size;
> + state.trace = area;
> + state.trace_size = size;
> + /* Reset coverage size. */
> + state.trace[0] = 0;
>
> if (in_serving_softirq()) {
> kcov_remote_softirq_start(t);
> --
> 2.50.1.470.g6ba607880d-goog
>
Powered by blists - more mailing lists