lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 23 Aug 2019 16:22:21 +0800
From:   Leo Yan <leo.yan@...aro.org>
To:     Mathieu Poirier <mathieu.poirier@...aro.org>
Cc:     yabinc@...gle.com, suzuki.poulose@....com, mike.leach@....com,
        alexander.shishkin@...ux.intel.com,
        linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 2/2] coresight: tmc-etr: Add barrier packet when moving
 offset forward

Hi Mathieu,

On Thu, Aug 22, 2019 at 04:09:15PM -0600, Mathieu Poirier wrote:
> This patch adds barrier packets in the trace stream when the offset in the
> data buffer needs to be moved forward.  Otherwise the decoder isn't aware
> of the break in the stream and can't synchronise itself with the trace
> data.
> 
> Signed-off-by: Mathieu Poirier <mathieu.poirier@...aro.org>
> ---
>  .../hwtracing/coresight/coresight-tmc-etr.c   | 43 ++++++++++++++-----
>  1 file changed, 33 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
> index 4f000a03152e..0e4cd6ec5f28 100644
> --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
> +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
> @@ -946,10 +946,6 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
>  	WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
>  
>  	etr_buf->ops->sync(etr_buf, rrp, rwp);
> -
> -	/* Insert barrier packets at the beginning, if there was an overflow */
> -	if (etr_buf->full)
> -		tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
>  }
>  
>  static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
> @@ -1415,10 +1411,11 @@ static void tmc_free_etr_buffer(void *config)
>   * buffer to the perf ring buffer.
>   */
>  static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf,
> +				     unsigned long src_offset,
>  				     unsigned long to_copy)
>  {
>  	long bytes;
> -	long pg_idx, pg_offset, src_offset;
> +	long pg_idx, pg_offset;
>  	unsigned long head = etr_perf->head;
>  	char **dst_pages, *src_buf;
>  	struct etr_buf *etr_buf = etr_perf->etr_buf;
> @@ -1427,7 +1424,6 @@ static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf,
>  	pg_idx = head >> PAGE_SHIFT;
>  	pg_offset = head & (PAGE_SIZE - 1);
>  	dst_pages = (char **)etr_perf->pages;
> -	src_offset = etr_buf->offset + etr_buf->len - to_copy;
>  
>  	while (to_copy > 0) {
>  		/*
> @@ -1475,7 +1471,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
>  		      void *config)
>  {
>  	bool lost = false;
> -	unsigned long flags, size = 0;
> +	unsigned long flags, offset, size = 0;
>  	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
>  	struct etr_perf_buffer *etr_perf = config;
>  	struct etr_buf *etr_buf = etr_perf->etr_buf;
> @@ -1503,11 +1499,39 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
>  	spin_unlock_irqrestore(&drvdata->spinlock, flags);
>  
>  	size = etr_buf->len;
> +	offset = etr_buf->offset;
> +	lost |= etr_buf->full;
> +
> +	/*
> +	 * The ETR buffer may be bigger than the space available in the
> +	 * perf ring buffer (handle->size).  If so advance the offset so that we
> +	 * get the latest trace data.  In snapshot mode none of that matters
> +	 * since we are expected to clobber stale data in favour of the latest
> +	 * traces.
> +	 */
>  	if (!etr_perf->snapshot && size > handle->size) {
> -		size = handle->size;
> +		u32 mask = tmc_get_memwidth_mask(drvdata);
> +
> +		/*
> +		 * Make sure the new size is aligned in accordance with the
> +		 * requirement explained in function tmc_get_memwidth_mask().
> +		 */
> +		size = handle->size & mask;
> +		offset = etr_buf->offset + etr_buf->len - size;
> +
> +		if (offset >= etr_buf->size)
> +			offset -= etr_buf->size;
>  		lost = true;
>  	}
> -	tmc_etr_sync_perf_buffer(etr_perf, size);
> +
> +	/*
> +	 * Insert barrier packets at the beginning, if there was an overflow
> +	 * or if the offset had to be brought forward.
> +	 */
> +	if (lost)
> +		tmc_etr_buf_insert_barrier_packet(etr_buf, offset);
> +
> +	tmc_etr_sync_perf_buffer(etr_perf, offset, size);

With this new code, the inserting barrier packet has been moved out
from function tmc_sync_etr_buf(); but this patch doesn't handle the
path when user uses SysFS node to access trace data and the trace
buffer is also likely full, thus the SysFS mode might miss to insert
barrier packets?

Thanks,
Leo Yan

>  	/*
>  	 * In snapshot mode we simply increment the head by the number of byte
> @@ -1518,7 +1542,6 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
>  	if (etr_perf->snapshot)
>  		handle->head += size;
>  
> -	lost |= etr_buf->full;
>  out:
>  	/*
>  	 * Don't set the TRUNCATED flag in snapshot mode because 1) the
> -- 
> 2.17.1
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ