lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAG2=9p_SKF4TAbbqF6L4u=y9-_m73vZJ3tOAbRKHCT0MOsLwGQ@mail.gmail.com>
Date:   Fri, 21 Oct 2016 20:13:13 +0800
From:   Chunyan Zhang <zhang.chunyan@...aro.org>
To:     Steven Rostedt <rostedt@...dmis.org>
Cc:     Mathieu Poirier <mathieu.poirier@...aro.org>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        mingo@...hat.com, Mike Leach <mike.leach@....com>,
        Tor Jeremiassen <tor@...com>, philippe.langlais@...com,
        Nicolas GUION <nicolas.guion@...com>,
        Felipe Balbi <felipe.balbi@...ux.intel.com>,
        Lyra Zhang <zhang.lyra@...il.com>,
        "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
        "linux-arm-kernel@...ts.infradead.org" 
        <linux-arm-kernel@...ts.infradead.org>
Subject: Re: [PATCH V7 1/3] tracing: add a possibility of exporting function
 trace to other places instead of ring buffer only

On 18 October 2016 at 23:44, Steven Rostedt <rostedt@...dmis.org> wrote:
> On Tue, 18 Oct 2016 16:08:58 +0800
> Chunyan Zhang <zhang.chunyan@...aro.org> wrote:
>
>> Currently Function traces can be only exported to ring buffer, this
>> patch added trace_export concept which can process traces and export
>> them to a registered destination as an addition to the current only
>> one output of Ftrace - i.e. ring buffer.
>>
>> In this way, if we want Function traces to be sent to other destination
>> rather than ring buffer only, we just need to register a new trace_export
>> and implement its own .write() function for writing traces to storage.
>>
>> With this patch, only Function trace (trace type is TRACE_FN)
>> is supported.
>
> This is getting better, but I still have some nits.
>

Thanks.

>>
>> Signed-off-by: Chunyan Zhang <zhang.chunyan@...aro.org>
>> ---
>>  include/linux/trace.h |  28 +++++++++++
>>  kernel/trace/trace.c  | 132 +++++++++++++++++++++++++++++++++++++++++++++++++-
>>  2 files changed, 159 insertions(+), 1 deletion(-)
>>  create mode 100644 include/linux/trace.h
>>
>> diff --git a/include/linux/trace.h b/include/linux/trace.h
>> new file mode 100644
>> index 0000000..eb1c5b8
>> --- /dev/null
>> +++ b/include/linux/trace.h
>> @@ -0,0 +1,28 @@
>> +#ifndef _LINUX_TRACE_H
>> +#define _LINUX_TRACE_H
>> +
>> +#ifdef CONFIG_TRACING
>> +/*
>> + * The trace export - an export of Ftrace output. The trace_export
>> + * can process traces and export them to a registered destination as
>> + * an addition to the current only output of Ftrace - i.e. ring buffer.
>> + *
>> + * If you want traces to be sent to some other place rather than ring
>> + * buffer only, just need to register a new trace_export and implement
>> + * its own .write() function for writing traces to the storage.
>> + *
>> + * next              - pointer to the next trace_export
>> + * write     - copy traces which have been delt with ->commit() to
>> + *             the destination
>> + */
>> +struct trace_export {
>> +     struct trace_export __rcu       *next;
>> +     void (*write)(const char *, unsigned int);
>
> Why const char*? Why not const void *? This will never be a string.
>

Will revise this.

>
>> +};
>> +
>> +int register_ftrace_export(struct trace_export *export);
>> +int unregister_ftrace_export(struct trace_export *export);
>> +
>> +#endif       /* CONFIG_TRACING */
>> +
>> +#endif       /* _LINUX_TRACE_H */
>> diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
>> index 8696ce6..db94ec1 100644
>> --- a/kernel/trace/trace.c
>> +++ b/kernel/trace/trace.c
>> @@ -40,6 +40,7 @@
>>  #include <linux/poll.h>
>>  #include <linux/nmi.h>
>>  #include <linux/fs.h>
>> +#include <linux/trace.h>
>>  #include <linux/sched/rt.h>
>>
>>  #include "trace.h"
>> @@ -2128,6 +2129,132 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
>>       ftrace_trace_userstack(buffer, flags, pc);
>>  }
>>
>> +static void
>> +trace_process_export(struct trace_export *export,
>> +            struct ring_buffer_event *event)
>> +{
>> +     struct trace_entry *entry;
>> +     unsigned int size = 0;
>> +
>> +     entry = ring_buffer_event_data(event);
>> +
>> +     size = ring_buffer_event_length(event);
>> +
>> +     if (export->write)
>> +             export->write((char *)entry, size);
>
> Is there ever going to be a time where export->write wont be set?

There hasn't been since only one trace_export (i.e. stm_ftrace) was
added in this patch-set , I just wanted to make sure the write() has
been set before registering trace_export like what I added in 2/3 of
this series.

>
> And if there is, this can be racy. As in
>
>
>         CPU 0:                  CPU 1:
>         ------                  ------
>         if (export->write)
>
>                                 export->write = NULL;

Is there going to be this kind of use case? Why some one needs to
change export->write() rather than register a new trace_export?

I probably haven't understood your point thoroughly, please correct me
if my guess was wrong.


Thanks for the review,
Chunyan

>
>         export->write(entry, size);
>
>         BOOM!
>
>
> -- Steve
>
>> +}
>> +
>> +static DEFINE_MUTEX(ftrace_export_lock);
>> +
>> +static struct trace_export __rcu *ftrace_exports_list __read_mostly;
>> +
>> +static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
>> +
>> +static inline void ftrace_exports_enable(void)
>> +{
>> +     static_branch_enable(&ftrace_exports_enabled);
>> +}
>> +
>> +static inline void ftrace_exports_disable(void)
>> +{
>> +     static_branch_disable(&ftrace_exports_enabled);
>> +}
>> +
>> +void ftrace_exports(struct ring_buffer_event *event)
>> +{
>> +     struct trace_export *export;
>> +
>> +     preempt_disable_notrace();
>> +
>> +     export = rcu_dereference_raw_notrace(ftrace_exports_list);
>> +     while (export) {
>> +             trace_process_export(export, event);
>> +             export = rcu_dereference_raw_notrace(export->next);
>> +     }
>> +
>> +     preempt_enable_notrace();
>> +}
>> +
>> +static inline void
>> +add_trace_export(struct trace_export **list, struct trace_export *export)
>> +{
>> +     rcu_assign_pointer(export->next, *list);
>> +     /*
>> +      * We are entering export into the list but another
>> +      * CPU might be walking that list. We need to make sure
>> +      * the export->next pointer is valid before another CPU sees
>> +      * the export pointer included into the list.
>> +      */
>> +     rcu_assign_pointer(*list, export);
>> +}
>> +
>> +static inline int
>> +rm_trace_export(struct trace_export **list, struct trace_export *export)
>> +{
>> +     struct trace_export **p;
>> +
>> +     for (p = list; *p != NULL; p = &(*p)->next)
>> +             if (*p == export)
>> +                     break;
>> +
>> +     if (*p != export)
>> +             return -1;
>> +
>> +     rcu_assign_pointer(*p, (*p)->next);
>> +
>> +     return 0;
>> +}
>> +
>> +static inline void
>> +add_ftrace_export(struct trace_export **list, struct trace_export *export)
>> +{
>> +     if (*list == NULL)
>> +             ftrace_exports_enable();
>> +
>> +     add_trace_export(list, export);
>> +}
>> +
>> +static inline int
>> +rm_ftrace_export(struct trace_export **list, struct trace_export *export)
>> +{
>> +     int ret;
>> +
>> +     ret = rm_trace_export(list, export);
>> +     if (*list == NULL)
>> +             ftrace_exports_disable();
>> +
>> +     return ret;
>> +}
>> +
>> +int register_ftrace_export(struct trace_export *export)
>> +{
>> +     if (WARN_ON_ONCE(!export->write))
>> +             return -1;
>> +
>> +     mutex_lock(&ftrace_export_lock);
>> +
>> +     add_ftrace_export(&ftrace_exports_list, export);
>> +
>> +     mutex_unlock(&ftrace_export_lock);
>> +
>> +     return 0;
>> +}
>> +EXPORT_SYMBOL_GPL(register_ftrace_export);
>> +
>> +int unregister_ftrace_export(struct trace_export *export)
>> +{
>> +     int ret;
>> +
>> +     mutex_lock(&ftrace_export_lock);
>> +
>> +     ret = rm_ftrace_export(&ftrace_exports_list, export);
>> +
>> +     mutex_unlock(&ftrace_export_lock);
>> +
>> +     return ret;
>> +}
>> +EXPORT_SYMBOL_GPL(unregister_ftrace_export);
>> +
>>  void
>>  trace_function(struct trace_array *tr,
>>              unsigned long ip, unsigned long parent_ip, unsigned long flags,
>> @@ -2146,8 +2273,11 @@ trace_function(struct trace_array *tr,
>>       entry->ip                       = ip;
>>       entry->parent_ip                = parent_ip;
>>
>> -     if (!call_filter_check_discard(call, entry, buffer, event))
>> +     if (!call_filter_check_discard(call, entry, buffer, event)) {
>> +             if (static_branch_unlikely(&ftrace_exports_enabled))
>> +                     ftrace_exports(event);
>>               __buffer_unlock_commit(buffer, event);
>> +     }
>>  }
>>
>>  #ifdef CONFIG_STACKTRACE
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ