lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 1 Mar 2023 07:55:19 +0900
From:   Masami Hiramatsu (Google) <mhiramat@...nel.org>
To:     Uros Bizjak <ubizjak@...il.com>
Cc:     linux-trace-kernel@...r.kernel.org, linux-kernel@...r.kernel.org,
        Steven Rostedt <rostedt@...dmis.org>,
        Masami Hiramatsu <mhiramat@...nel.org>
Subject: Re: [PATCH 1/3] ring_buffer: Change some static functions to void

On Tue, 28 Feb 2023 18:59:27 +0100
Uros Bizjak <ubizjak@...il.com> wrote:

> The results of some static functions are not used. Change the
> type of these function to void and remove unnecessary returns.
> 
> No functional change intended.

NAK, instead of dropping the errors, please handle it on the caller side.

Thank you,

> 
> Cc: Steven Rostedt <rostedt@...dmis.org>
> Cc: Masami Hiramatsu <mhiramat@...nel.org>
> Signed-off-by: Uros Bizjak <ubizjak@...il.com>
> ---
>  kernel/trace/ring_buffer.c | 22 +++++++---------------
>  1 file changed, 7 insertions(+), 15 deletions(-)
> 
> diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
> index af50d931b020..05fdc92554df 100644
> --- a/kernel/trace/ring_buffer.c
> +++ b/kernel/trace/ring_buffer.c
> @@ -1569,15 +1569,12 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
>  	}
>  }
>  
> -static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
> +static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
>  			  struct buffer_page *bpage)
>  {
>  	unsigned long val = (unsigned long)bpage;
>  
> -	if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
> -		return 1;
> -
> -	return 0;
> +	RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK);
>  }
>  
>  /**
> @@ -1587,30 +1584,28 @@ static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
>   * As a safety measure we check to make sure the data pages have not
>   * been corrupted.
>   */
> -static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
> +static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
>  {
>  	struct list_head *head = rb_list_head(cpu_buffer->pages);
>  	struct list_head *tmp;
>  
>  	if (RB_WARN_ON(cpu_buffer,
>  			rb_list_head(rb_list_head(head->next)->prev) != head))
> -		return -1;
> +		return;
>  
>  	if (RB_WARN_ON(cpu_buffer,
>  			rb_list_head(rb_list_head(head->prev)->next) != head))
> -		return -1;
> +		return;
>  
>  	for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
>  		if (RB_WARN_ON(cpu_buffer,
>  				rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
> -			return -1;
> +			return;
>  
>  		if (RB_WARN_ON(cpu_buffer,
>  				rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
> -			return -1;
> +			return;
>  	}
> -
> -	return 0;
>  }
>  
>  static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
> @@ -4500,7 +4495,6 @@ rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
>  	default:
>  		RB_WARN_ON(cpu_buffer, 1);
>  	}
> -	return;
>  }
>  
>  static void
> @@ -4531,7 +4525,6 @@ rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
>  	default:
>  		RB_WARN_ON(iter->cpu_buffer, 1);
>  	}
> -	return;
>  }
>  
>  static struct buffer_page *
> @@ -4946,7 +4939,6 @@ rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
>  {
>  	if (likely(locked))
>  		raw_spin_unlock(&cpu_buffer->reader_lock);
> -	return;
>  }
>  
>  /**
> -- 
> 2.39.2
> 


-- 
Masami Hiramatsu (Google) <mhiramat@...nel.org>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ