[<prev] [next>] [day] [month] [year] [list]
Message-ID: <6754382c-30dd-4ec2-8657-1710d57c750c@codeaurora.org>
Date: Mon, 16 Dec 2019 11:04:04 +0530
From: Prateek Sood <prsood@...eaurora.org>
To: rostedt@...dmis.org, mingo@...hat.com
Cc: linux-kernel@...r.kernel.org, kaushalk@...eaurora.org
Subject: Re: [PATCH v3] tracing: Fix lock inversion in
trace_event_enable_tgid_record()
Hi Steve,
Please help in reviewing below patch
Thanks
Prateek
On 12/10/2019 2:45 PM, Prateek Sood wrote:
> Hi Steve,
>
> Are you suggesting something like below?
>
>> 8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8>8
>
> Task T2 Task T3
> trace_options_core_write() subsystem_open()
>
> mutex_lock(trace_types_lock) mutex_lock(event_mutex)
>
> set_tracer_flag()
>
> trace_event_enable_tgid_record() mutex_lock(trace_types_lock)
>
> mutex_lock(event_mutex)
>
> This gives a circular dependency deadlock between trace_types_lock and
> event_mutex. To fix this invert the usage of trace_types_lock and
> event_mutex in trace_options_core_write(). This keeps the sequence of
> lock usage consistent.
>
> Signed-off-by: Prateek Sood <prsood@...eaurora.org>
> ---
> kernel/trace/trace.c | 8 ++++++++
> kernel/trace/trace_events.c | 8 ++++----
> 2 files changed, 12 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
> index 6a0ee91..4dc93e3 100644
> --- a/kernel/trace/trace.c
> +++ b/kernel/trace/trace.c
> @@ -4590,6 +4590,10 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
>
> int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
> {
> + if ((mask == TRACE_ITER_RECORD_TGID) ||
> + (mask == TRACE_ITER_RECORD_CMD))
> + lockdep_assert_held(&event_mutex);
> +
> /* do nothing if flag is already set */
> if (!!(tr->trace_flags & mask) == !!enabled)
> return 0;
> @@ -4657,6 +4661,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
>
> cmp += len;
>
> + mutex_lock(&event_mutex);
> mutex_lock(&trace_types_lock);
>
> ret = match_string(trace_options, -1, cmp);
> @@ -4667,6 +4672,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
> ret = set_tracer_flag(tr, 1 << ret, !neg);
>
> mutex_unlock(&trace_types_lock);
> + mutex_unlock(&event_mutex);
>
> /*
> * If the first trailing whitespace is replaced with '\0' by strstrip,
> @@ -7972,9 +7978,11 @@ static void get_tr_index(void *data, struct trace_array **ptr,
> if (val != 0 && val != 1)
> return -EINVAL;
>
> + mutex_lock(&event_mutex);
> mutex_lock(&trace_types_lock);
> ret = set_tracer_flag(tr, 1 << index, val);
> mutex_unlock(&trace_types_lock);
> + mutex_unlock(&event_mutex);
>
> if (ret < 0)
> return ret;
> diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
> index fba87d1..995061b 100644
> --- a/kernel/trace/trace_events.c
> +++ b/kernel/trace/trace_events.c
> @@ -320,7 +320,8 @@ void trace_event_enable_cmd_record(bool enable)
> struct trace_event_file *file;
> struct trace_array *tr;
>
> - mutex_lock(&event_mutex);
> + lockdep_assert_held(&event_mutex);
> +
> do_for_each_event_file(tr, file) {
>
> if (!(file->flags & EVENT_FILE_FL_ENABLED))
> @@ -334,7 +335,6 @@ void trace_event_enable_cmd_record(bool enable)
> clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
> }
> } while_for_each_event_file();
> - mutex_unlock(&event_mutex);
> }
>
> void trace_event_enable_tgid_record(bool enable)
> @@ -342,7 +342,8 @@ void trace_event_enable_tgid_record(bool enable)
> struct trace_event_file *file;
> struct trace_array *tr;
>
> - mutex_lock(&event_mutex);
> + lockdep_assert_held(&event_mutex);
> +
> do_for_each_event_file(tr, file) {
> if (!(file->flags & EVENT_FILE_FL_ENABLED))
> continue;
> @@ -356,7 +357,6 @@ void trace_event_enable_tgid_record(bool enable)
> &file->flags);
> }
> } while_for_each_event_file();
> - mutex_unlock(&event_mutex);
> }
>
> static int __ftrace_event_enable_disable(struct trace_event_file *file,
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation
Center, Inc., is a member of Code Aurora Forum, a Linux Foundation
Collaborative Project
Powered by blists - more mailing lists