lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220414063829.2472251-1-qiang1.zhang@intel.com>
Date:   Thu, 14 Apr 2022 14:38:29 +0800
From:   Zqiang <qiang1.zhang@...el.com>
To:     akpm@...ux-foundation.org, peterz@...radead.org
Cc:     linux-kernel@...r.kernel.org
Subject: [PATCH] irq_work: Add event-tracing points for irq_work

Add irq_work_queue_on tracepoints allow tracing when and how
a irq-work is queued, irq_work_execute_start/end tracepoints
allow know when a irq-work is executed and the executed time.

Signed-off-by: Zqiang <qiang1.zhang@...el.com>
---
 include/trace/events/irq_work.h | 79 +++++++++++++++++++++++++++++++++
 kernel/irq_work.c               |  9 +++-
 2 files changed, 87 insertions(+), 1 deletion(-)
 create mode 100644 include/trace/events/irq_work.h

diff --git a/include/trace/events/irq_work.h b/include/trace/events/irq_work.h
new file mode 100644
index 000000000000..823cb48666c4
--- /dev/null
+++ b/include/trace/events/irq_work.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM irq_work
+
+#if !defined(_TRACE_IRQ_WORK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_IRQ_WORK_H
+
+#include <linux/tracepoint.h>
+#include <linux/irq_work.h>
+
+TRACE_EVENT(irq_work_queue_on,
+
+	TP_PROTO(unsigned int req_cpu, struct irq_work *work),
+
+	TP_ARGS(req_cpu, work),
+
+	TP_STRUCT__entry(
+		__field( void *,	work	)
+		__field( void *,	function)
+		__field( unsigned int,	req_cpu	)
+		__field( int,		flags	)
+	),
+
+	TP_fast_assign(
+		__entry->work		= work;
+		__entry->function	= work->func;
+		__entry->req_cpu	= req_cpu;
+		__entry->flags		= atomic_read(&work->node.a_flags);
+	),
+
+	TP_printk("irq_work=%p func=%ps req_cpu=%u flags=%x",
+			__entry->work, __entry->function, __entry->req_cpu, __entry->flags)
+);
+
+TRACE_EVENT(irq_work_execute_start,
+
+	TP_PROTO(struct irq_work *work),
+
+	TP_ARGS(work),
+
+	TP_STRUCT__entry(
+		__field( void *,	work	)
+		__field( void *,	function)
+		__field( int,		flags	)
+	),
+
+	TP_fast_assign(
+		__entry->work		= work;
+		__entry->function	= work->func;
+		__entry->flags		= atomic_read(&work->node.a_flags);
+	),
+
+	TP_printk("irq_work=%p: func=%ps flags=%x",
+			__entry->work, __entry->function,  __entry->flags)
+);
+
+TRACE_EVENT(irq_work_execute_end,
+
+	TP_PROTO(struct irq_work *work),
+
+	TP_ARGS(work),
+
+	TP_STRUCT__entry(
+		__field( void *,	work	)
+		__field( void *,	function)
+		__field( int,		flags	)
+	),
+
+	TP_fast_assign(
+		__entry->work		= work;
+		__entry->function	= work->func;
+		__entry->flags		= atomic_read(&work->node.a_flags);
+	),
+
+	TP_printk("irq_work=%p: func=%ps flags=%x",
+			__entry->work, __entry->function, __entry->flags)
+);
+#endif
+#include <trace/define_trace.h>
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 7afa40fe5cc4..edad992556d0 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -22,6 +22,9 @@
 #include <asm/processor.h>
 #include <linux/kasan.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/irq_work.h>
+
 static DEFINE_PER_CPU(struct llist_head, raised_list);
 static DEFINE_PER_CPU(struct llist_head, lazy_list);
 static DEFINE_PER_CPU(struct task_struct *, irq_workd);
@@ -81,7 +84,9 @@ static void __irq_work_queue_local(struct irq_work *work)
 	bool rt_lazy_work = false;
 	bool lazy_work = false;
 	int work_flags;
+	int cpu = smp_processor_id();
 
+	trace_irq_work_queue_on(cpu, work);
 	work_flags = atomic_read(&work->node.a_flags);
 	if (work_flags & IRQ_WORK_LAZY)
 		lazy_work = true;
@@ -143,7 +148,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
 	if (cpu != smp_processor_id()) {
 		/* Arch remote IPI send/receive backend aren't NMI safe */
 		WARN_ON_ONCE(in_nmi());
-
+		trace_irq_work_queue_on(cpu, work);
 		/*
 		 * On PREEMPT_RT the items which are not marked as
 		 * IRQ_WORK_HARD_IRQ are added to the lazy list and a HARD work
@@ -208,7 +213,9 @@ void irq_work_single(void *arg)
 	smp_mb();
 
 	lockdep_irq_work_enter(flags);
+	trace_irq_work_execute_start(work);
 	work->func(work);
+	trace_irq_work_execute_end(work);
 	lockdep_irq_work_exit(flags);
 
 	/*
-- 
2.25.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ