lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170410181005.420764935@goodmis.org>
Date:   Mon, 10 Apr 2017 14:10:05 -0400
From:   Steven Rostedt <rostedt@...dmis.org>
To:     linux-kernel@...r.kernel.org
Cc:     Ingo Molnar <mingo@...nel.org>,
        Andrew Morton <akpm@...ux-foundation.org>,
        "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
Subject: [PATCH 0/7 v3] tracing: Add usecase of synchronize_rcu_tasks()  and stack_tracer_disable()

Hopefully this is the final version before I push this to linux-next.

Paul, can I get an ack on the last patch from you?

-- Steve

Paul E. McKenney (1):
      rcu: Fix dyntick-idle tracing

Steven Rostedt (VMware) (6):
      ftrace: Add use of synchronize_rcu_tasks() with dynamic trampolines
      tracing: Replace the per_cpu() with __this_cpu*() in trace_stack.c
      tracing: Add stack_tracer_disable/enable() functions
      tracing: Rename trace_active to disable_stack_tracer and inline its modification
      rcu/tracing: Add rcu_disabled to denote when rcu_irq_enter() will not work
      tracing: Make sure rcu_irq_enter() can work for trace_*_rcuidle() trace events

----
 include/linux/ftrace.h     | 38 ++++++++++++++++++++++++++++
 include/linux/rcupdate.h   |  5 ++++
 include/linux/tracepoint.h | 19 ++++++++------
 kernel/rcu/tree.c          | 62 +++++++++++++++++++++++++++-------------------
 kernel/trace/Kconfig       |  3 ++-
 kernel/trace/ftrace.c      | 42 ++++++++++++++-----------------
 kernel/trace/trace_stack.c | 35 +++++++++++++-------------
 7 files changed, 128 insertions(+), 76 deletions(-)

Diff against v2 (not counting the last two patches at the end of this series)

diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 4bde7ff..06b2990 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -323,7 +323,7 @@ static inline void stack_tracer_enable(void)
 }
 #else
 static inline void stack_tracer_disable(void) { }
-static inline void stack_tracer_enabe(void) { }
+static inline void stack_tracer_enable(void) { }
 #endif
 
 struct ftrace_func_command {
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 84fafb6..f2f02ff 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -211,8 +211,8 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
 	preempt_disable_notrace();
 
 	/* no atomic needed, we only modify this variable by this cpu */
-	this_cpu_inc(disable_stack_tracer);
-	if (this_cpu_read(disable_stack_tracer) != 1)
+	__this_cpu_inc(disable_stack_tracer);
+	if (__this_cpu_read(disable_stack_tracer) != 1)
 		goto out;
 
 	ip += MCOUNT_INSN_SIZE;
@@ -220,7 +220,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
 	check_stack(ip, &stack);
 
  out:
-	this_cpu_dec(disable_stack_tracer);
+	__this_cpu_dec(disable_stack_tracer);
 	/* prevent recursion in schedule */
 	preempt_enable_notrace();
 }
@@ -264,13 +264,13 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
 	 * we will cause circular lock, so we also need to increase
 	 * the percpu disable_stack_tracer here.
 	 */
-	this_cpu_inc(disable_stack_tracer);
+	__this_cpu_inc(disable_stack_tracer);
 
 	arch_spin_lock(&stack_trace_max_lock);
 	*ptr = val;
 	arch_spin_unlock(&stack_trace_max_lock);
 
-	this_cpu_dec(disable_stack_tracer);
+	__this_cpu_dec(disable_stack_tracer);
 	local_irq_restore(flags);
 
 	return count;
@@ -306,7 +306,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
 {
 	local_irq_disable();
 
-	this_cpu_inc(disable_stack_tracer);
+	__this_cpu_inc(disable_stack_tracer);
 
 	arch_spin_lock(&stack_trace_max_lock);
 
@@ -320,7 +320,7 @@ static void t_stop(struct seq_file *m, void *p)
 {
 	arch_spin_unlock(&stack_trace_max_lock);
 
-	this_cpu_dec(disable_stack_tracer);
+	__this_cpu_dec(disable_stack_tracer);
 
 	local_irq_enable();
 }

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ