lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <521AEA44.70608@hitachi.com>
Date:	Mon, 26 Aug 2013 14:40:20 +0900
From:	Masami Hiramatsu <masami.hiramatsu.pt@...achi.com>
To:	Tom Zanussi <tom.zanussi@...ux.intel.com>
Cc:	rostedt@...dmis.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v6 01/10] tracing: Add support for SOFT_DISABLE to syscall
 events

(2013/08/23 8:27), Tom Zanussi wrote:
> The original SOFT_DISABLE patches didn't add support for soft disable
> of syscall events; this adds it and paves the way for future patches
> allowing triggers to be added to syscall events, since triggers are
> built on top of SOFT_DISABLE.
> 
> Add an array of ftrace_event_file pointers indexed by syscall number
> to the trace array and remove the existing enabled bitmaps, which as a
> result are now redundant.  The ftrace_event_file structs in turn
> contain the soft disable flags we need for per-syscall soft disable
> accounting; later patches add additional 'trigger' flags and
> per-syscall triggers and filters.
> 

This looks good for me.

Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@...achi.com>

> Signed-off-by: Tom Zanussi <tom.zanussi@...ux.intel.com>
> ---
>  kernel/trace/trace.h          |  4 ++--
>  kernel/trace/trace_syscalls.c | 36 ++++++++++++++++++++++++++++++------
>  2 files changed, 32 insertions(+), 8 deletions(-)
> 
> diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
> index fe39acd..b1227b9 100644
> --- a/kernel/trace/trace.h
> +++ b/kernel/trace/trace.h
> @@ -192,8 +192,8 @@ struct trace_array {
>  #ifdef CONFIG_FTRACE_SYSCALLS
>  	int			sys_refcount_enter;
>  	int			sys_refcount_exit;
> -	DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
> -	DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
> +	struct ftrace_event_file *enter_syscall_files[NR_syscalls];
> +	struct ftrace_event_file *exit_syscall_files[NR_syscalls];
>  #endif
>  	int			stop_count;
>  	int			clock_id;
> diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
> index 559329d..230cdb6 100644
> --- a/kernel/trace/trace_syscalls.c
> +++ b/kernel/trace/trace_syscalls.c
> @@ -302,6 +302,7 @@ static int __init syscall_exit_define_fields(struct ftrace_event_call *call)
>  static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
>  {
>  	struct trace_array *tr = data;
> +	struct ftrace_event_file *ftrace_file;
>  	struct syscall_trace_enter *entry;
>  	struct syscall_metadata *sys_data;
>  	struct ring_buffer_event *event;
> @@ -314,7 +315,13 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
>  	syscall_nr = trace_get_syscall_nr(current, regs);
>  	if (syscall_nr < 0)
>  		return;
> -	if (!test_bit(syscall_nr, tr->enabled_enter_syscalls))
> +
> +	/* Here we're inside the tp handler's rcu_read_lock (__DO_TRACE()) */
> +	ftrace_file = rcu_dereference_raw(tr->enter_syscall_files[syscall_nr]);
> +	if (!ftrace_file)
> +		return;
> +
> +	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
>  		return;
>  
>  	sys_data = syscall_nr_to_meta(syscall_nr);
> @@ -345,6 +352,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
>  static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
>  {
>  	struct trace_array *tr = data;
> +	struct ftrace_event_file *ftrace_file;
>  	struct syscall_trace_exit *entry;
>  	struct syscall_metadata *sys_data;
>  	struct ring_buffer_event *event;
> @@ -356,7 +364,13 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
>  	syscall_nr = trace_get_syscall_nr(current, regs);
>  	if (syscall_nr < 0)
>  		return;
> -	if (!test_bit(syscall_nr, tr->enabled_exit_syscalls))
> +
> +	/* Here we're inside the tp handler's rcu_read_lock (__DO_TRACE()) */
> +	ftrace_file = rcu_dereference_raw(tr->exit_syscall_files[syscall_nr]);
> +	if (!ftrace_file)
> +		return;
> +
> +	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
>  		return;
>  
>  	sys_data = syscall_nr_to_meta(syscall_nr);
> @@ -397,7 +411,7 @@ static int reg_event_syscall_enter(struct ftrace_event_file *file,
>  	if (!tr->sys_refcount_enter)
>  		ret = register_trace_sys_enter(ftrace_syscall_enter, tr);
>  	if (!ret) {
> -		set_bit(num, tr->enabled_enter_syscalls);
> +		rcu_assign_pointer(tr->enter_syscall_files[num], file);
>  		tr->sys_refcount_enter++;
>  	}
>  	mutex_unlock(&syscall_trace_lock);
> @@ -415,9 +429,14 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file,
>  		return;
>  	mutex_lock(&syscall_trace_lock);
>  	tr->sys_refcount_enter--;
> -	clear_bit(num, tr->enabled_enter_syscalls);
> +	rcu_assign_pointer(tr->enter_syscall_files[num], NULL);
>  	if (!tr->sys_refcount_enter)
>  		unregister_trace_sys_enter(ftrace_syscall_enter, tr);
> +	/*
> +	 * Callers expect the event to be completely disabled on
> +	 * return, so wait for current handlers to finish.
> +	 */
> +	synchronize_sched();
>  	mutex_unlock(&syscall_trace_lock);
>  }
>  
> @@ -435,7 +454,7 @@ static int reg_event_syscall_exit(struct ftrace_event_file *file,
>  	if (!tr->sys_refcount_exit)
>  		ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
>  	if (!ret) {
> -		set_bit(num, tr->enabled_exit_syscalls);
> +		rcu_assign_pointer(tr->exit_syscall_files[num], file);
>  		tr->sys_refcount_exit++;
>  	}
>  	mutex_unlock(&syscall_trace_lock);
> @@ -453,9 +472,14 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file,
>  		return;
>  	mutex_lock(&syscall_trace_lock);
>  	tr->sys_refcount_exit--;
> -	clear_bit(num, tr->enabled_exit_syscalls);
> +	rcu_assign_pointer(tr->exit_syscall_files[num], NULL);
>  	if (!tr->sys_refcount_exit)
>  		unregister_trace_sys_exit(ftrace_syscall_exit, tr);
> +	/*
> +	 * Callers expect the event to be completely disabled on
> +	 * return, so wait for current handlers to finish.
> +	 */
> +	synchronize_sched();
>  	mutex_unlock(&syscall_trace_lock);
>  }
>  
> 


-- 
Masami HIRAMATSU
IT Management Research Dept. Linux Technology Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: masami.hiramatsu.pt@...achi.com


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ