lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date:   Thu, 1 Sep 2016 12:49:46 -0400
From:   Steven Rostedt <rostedt@...dmis.org>
To:     Chunyan Zhang <zhang.chunyan@...aro.org>
Cc:     mingo@...hat.com, linux-kernel@...r.kernel.org,
        zhang.lyra@...il.com
Subject: Re: [PATCH] trace: fix the errors caused by incompatible type of
 RCU variables

On Mon, 22 Aug 2016 19:27:14 +0800
Chunyan Zhang <zhang.chunyan@...aro.org> wrote:

> The variables which are processed by RCU functions should be annotated
> as RCU, otherwise sparse will report the errors like below:
> 
> "error: incompatible types in comparison expression (different
> address spaces)"

Thanks, but I'll need to fix this up a little.

> 
> Signed-off-by: Chunyan Zhang <zhang.chunyan@...aro.org>
> ---
>  include/linux/ftrace.h       |  6 +++---
>  include/linux/trace_events.h |  2 +-
>  kernel/trace/ftrace.c        | 36 ++++++++++++++++++++++--------------
>  kernel/trace/trace.h         |  6 +++---
>  4 files changed, 29 insertions(+), 21 deletions(-)
> 
> diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
> index 7d565af..24f75ca 100644
> --- a/include/linux/ftrace.h
> +++ b/include/linux/ftrace.h
> @@ -140,8 +140,8 @@ enum {
>  #ifdef CONFIG_DYNAMIC_FTRACE
>  /* The hash used to know what functions callbacks trace */
>  struct ftrace_ops_hash {
> -	struct ftrace_hash		*notrace_hash;
> -	struct ftrace_hash		*filter_hash;
> +	struct ftrace_hash __rcu	*notrace_hash;
> +	struct ftrace_hash __rcu	*filter_hash;
>  	struct mutex			regex_lock;
>  };
>  #endif
> @@ -159,7 +159,7 @@ struct ftrace_ops_hash {
>   */
>  struct ftrace_ops {
>  	ftrace_func_t			func;
> -	struct ftrace_ops		*next;
> +	struct ftrace_ops __rcu		*next;
>  	unsigned long			flags;
>  	void				*private;
>  	ftrace_func_t			saved_func;
> diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
> index be00761..90e148c 100644
> --- a/include/linux/trace_events.h
> +++ b/include/linux/trace_events.h
> @@ -331,7 +331,7 @@ enum {
>  struct trace_event_file {
>  	struct list_head		list;
>  	struct trace_event_call		*event_call;
> -	struct event_filter		*filter;
> +	struct event_filter __rcu	*filter;
>  	struct dentry			*dir;
>  	struct trace_array		*tr;
>  	struct trace_subsystem_dir	*system;
> diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
> index 84752c8..088c4e7 100644
> --- a/kernel/trace/ftrace.c
> +++ b/kernel/trace/ftrace.c
> @@ -111,7 +111,8 @@ static int ftrace_disabled __read_mostly;
>  
>  static DEFINE_MUTEX(ftrace_lock);
>  
> -static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
> +static struct ftrace_ops __rcu *
> +		ftrace_ops_list __read_mostly = &ftrace_list_end;
>  ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
>  static struct ftrace_ops global_ops;
>  
> @@ -167,8 +168,9 @@ int ftrace_nr_registered_ops(void)
>  
>  	mutex_lock(&ftrace_lock);
>  
> -	for (ops = ftrace_ops_list;
> -	     ops != &ftrace_list_end; ops = ops->next)
> +	for (ops = rcu_dereference_raw_notrace(ftrace_ops_list);
> +	     ops != &ftrace_list_end;
> +	     ops = rcu_dereference_raw_notrace(ops->next))

These need to be rcu_dereference_protected(), they are not modified in
rcu critical sections, but are modified by the write side (see the
ftrace_lock above it). Otherwise, lockdep will complain that these are
modified without rcu protection. The rcu_dereference_protected() tells
RCU that the modification is done on the write side, and we get to
specify the lock that protects it. lockdep will make sure that lock is
held when this happens.


>  		cnt++;
>  
>  	mutex_unlock(&ftrace_lock);
> @@ -273,10 +275,10 @@ static void update_ftrace_function(void)
>  	 * If there's only one ftrace_ops registered, the ftrace_ops_list
>  	 * will point to the ops we want.
>  	 */
> -	set_function_trace_op = ftrace_ops_list;
> +	set_function_trace_op = rcu_dereference_raw_notrace(ftrace_ops_list);

Same here.

>  
>  	/* If there's no ftrace_ops registered, just call the stub function */
> -	if (ftrace_ops_list == &ftrace_list_end) {
> +	if (set_function_trace_op == &ftrace_list_end) {
>  		func = ftrace_stub;
>  
>  	/*
> @@ -284,7 +286,8 @@ static void update_ftrace_function(void)
>  	 * recursion safe and not dynamic and the arch supports passing ops,
>  	 * then have the mcount trampoline call the function directly.
>  	 */
> -	} else if (ftrace_ops_list->next == &ftrace_list_end) {
> +	} else if (rcu_dereference_raw_notrace(ftrace_ops_list->next) ==
> +		   &ftrace_list_end) {

And here.

>  		func = ftrace_ops_get_list_func(ftrace_ops_list);
>  
>  	} else {
> @@ -346,9 +349,11 @@ int using_ftrace_ops_list_func(void)
>  	return ftrace_trace_function == ftrace_ops_list_func;
>  }
>  
> -static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
> +static void add_ftrace_ops(struct ftrace_ops __rcu **list,
> +			   struct ftrace_ops *ops)
>  {
> -	ops->next = *list;
> +	rcu_assign_pointer(ops->next, *list);
> +
>  	/*
>  	 * We are entering ops into the list but another
>  	 * CPU might be walking that list. We need to make sure
> @@ -358,7 +363,8 @@ static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
>  	rcu_assign_pointer(*list, ops);
>  }
>  
> -static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
> +static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
> +			     struct ftrace_ops *ops)
>  {
>  	struct ftrace_ops **p;
>  
> @@ -366,7 +372,8 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
>  	 * If we are removing the last function, then simply point
>  	 * to the ftrace_stub.
>  	 */
> -	if (*list == ops && ops->next == &ftrace_list_end) {
> +	if (rcu_dereference_raw_notrace(*list) == ops &&
> +	    rcu_dereference_raw_notrace(ops->next) == &ftrace_list_end) {

And here.

>  		*list = &ftrace_list_end;
>  		return 0;
>  	}
> @@ -1493,8 +1500,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
>  		return 0;
>  #endif
>  
> -	hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
> -	hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
> +	rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
> +	rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
>  
>  	if (hash_contains_ip(ip, &hash))
>  		ret = 1;
> @@ -2752,7 +2759,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
>  	 * If there's no more ops registered with ftrace, run a
>  	 * sanity check to make sure all rec flags are cleared.
>  	 */
> -	if (ftrace_ops_list == &ftrace_list_end) {
> +	if (rcu_dereference_raw_notrace(ftrace_ops_list) == &ftrace_list_end) {
>  		struct ftrace_page *pg;
>  		struct dyn_ftrace *rec;
>  
> @@ -5642,7 +5649,8 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
>  	if (ftrace_enabled) {
>  
>  		/* we are starting ftrace again */
> -		if (ftrace_ops_list != &ftrace_list_end)
> +		if (rcu_dereference_raw_notrace(ftrace_ops_list) !=
> +		    &ftrace_list_end)

And here.

No need to resend. I'll update the patch and you still get credit for
it ;-)

-- Steve

>  			update_ftrace_function();
>  
>  		ftrace_startup_sysctl();
> diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
> index f783df4..4538110 100644
> --- a/kernel/trace/trace.h
> +++ b/kernel/trace/trace.h
> @@ -1075,9 +1075,9 @@ struct ftrace_event_field {
>  struct event_filter {
>  	int			n_preds;	/* Number assigned */
>  	int			a_preds;	/* allocated */
> -	struct filter_pred	*preds;
> -	struct filter_pred	*root;
> -	char			*filter_string;
> +	struct filter_pred __rcu	*preds;
> +	struct filter_pred __rcu	*root;
> +	char				*filter_string;
>  };
>  
>  struct event_subsystem {

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ