lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 11 Aug 2021 17:04:40 +0200
From:   Jiri Olsa <jolsa@...hat.com>
To:     Namhyung Kim <namhyung@...nel.org>
Cc:     Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...nel.org>,
        Arnaldo Carvalho de Melo <acme@...nel.org>,
        Mark Rutland <mark.rutland@....com>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        LKML <linux-kernel@...r.kernel.org>,
        Stephane Eranian <eranian@...gle.com>,
        Andi Kleen <ak@...ux.intel.com>,
        Ian Rogers <irogers@...gle.com>, gmx@...gle.com
Subject: Re: [RFC] perf/core: Add an ioctl to get a number of lost samples

On Tue, Aug 10, 2021 at 11:21:35PM -0700, Namhyung Kim wrote:
> Sometimes we want to know an accurate number of samples even if it's
> lost.  Currenlty PERF_RECORD_LOST is generated for a ring-buffer which
> might be shared with other events.  So it's hard to know per-event
> lost count.
> 
> Add event->lost_samples field and PERF_EVENT_IOC_LOST_SAMPLES to
> retrieve it from userspace.
> 
> Signed-off-by: Namhyung Kim <namhyung@...nel.org>
> ---
>  include/linux/perf_event.h      | 2 ++
>  include/uapi/linux/perf_event.h | 1 +
>  kernel/events/core.c            | 9 +++++++++
>  kernel/events/ring_buffer.c     | 5 ++++-
>  4 files changed, 16 insertions(+), 1 deletion(-)
> 
> diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
> index f5a6a2f069ed..44d72079c77a 100644
> --- a/include/linux/perf_event.h
> +++ b/include/linux/perf_event.h
> @@ -756,6 +756,8 @@ struct perf_event {
>  	struct pid_namespace		*ns;
>  	u64				id;
>  
> +	atomic_t			lost_samples;
> +
>  	u64				(*clock)(void);
>  	perf_overflow_handler_t		overflow_handler;
>  	void				*overflow_handler_context;
> diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
> index bf8143505c49..24397799127d 100644
> --- a/include/uapi/linux/perf_event.h
> +++ b/include/uapi/linux/perf_event.h
> @@ -505,6 +505,7 @@ struct perf_event_query_bpf {
>  #define PERF_EVENT_IOC_PAUSE_OUTPUT		_IOW('$', 9, __u32)
>  #define PERF_EVENT_IOC_QUERY_BPF		_IOWR('$', 10, struct perf_event_query_bpf *)
>  #define PERF_EVENT_IOC_MODIFY_ATTRIBUTES	_IOW('$', 11, struct perf_event_attr *)
> +#define PERF_EVENT_IOC_LOST_SAMPLES		_IOR('$', 12, __u64 *)

would it be better to use the read syscall for that?
  https://lore.kernel.org/lkml/20210622153918.688500-5-jolsa@kernel.org/

that patchset ended up on me not having a way to reproduce the
issue you guys wanted the fix for ;-) the lost count is there
as well

jirka

>  
>  enum perf_event_ioc_flags {
>  	PERF_IOC_FLAG_GROUP		= 1U << 0,
> diff --git a/kernel/events/core.c b/kernel/events/core.c
> index 0e125ae2fa92..a4d6736b6594 100644
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -5664,6 +5664,15 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
>  
>  		return perf_event_modify_attr(event,  &new_attr);
>  	}
> +
> +	case PERF_EVENT_IOC_LOST_SAMPLES: {
> +		u64 lost = atomic_read(&event->lost_samples);
> +
> +		if (copy_to_user((void __user *)arg, &lost, sizeof(lost)))
> +			return -EFAULT;
> +		return 0;
> +	}
> +
>  	default:
>  		return -ENOTTY;
>  	}
> diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
> index 52868716ec35..06d7dacb05da 100644
> --- a/kernel/events/ring_buffer.c
> +++ b/kernel/events/ring_buffer.c
> @@ -172,8 +172,10 @@ __perf_output_begin(struct perf_output_handle *handle,
>  		goto out;
>  
>  	if (unlikely(rb->paused)) {
> -		if (rb->nr_pages)
> +		if (rb->nr_pages) {
>  			local_inc(&rb->lost);
> +			atomic_inc(&event->lost_samples);
> +		}
>  		goto out;
>  	}
>  
> @@ -254,6 +256,7 @@ __perf_output_begin(struct perf_output_handle *handle,
>  
>  fail:
>  	local_inc(&rb->lost);
> +	atomic_inc(&event->lost_samples);
>  	perf_output_put_handle(handle);
>  out:
>  	rcu_read_unlock();
> -- 
> 2.32.0.605.g8dce9f2422-goog
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ