lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1435187973-23931-1-git-send-email-xiyou.wangcong@gmail.com>
Date:	Wed, 24 Jun 2015 16:19:33 -0700
From:	Cong Wang <xiyou.wangcong@...il.com>
To:	linux-kernel@...r.kernel.org
Cc:	Cong Wang <xiyou.wangcong@...il.com>,
	Steven Rostedt <rostedt@...dmis.org>,
	Ingo Molnar <mingo@...hat.com>,
	Peter Zijlstra <peterz@...radead.org>,
	Cong Wang <cwang@...pensource.com>
Subject: [PATCH] sched: split sched_switch trace event into two

Currently we only have one sched_switch trace event
for task switching, which is generated very early during
task switch. When we try to monitor per-container events,
this is not what we expect.

For example, we have a process A which is in the cgroup
we monitor, and process B which isn't, when kernel switches
from B to A, the sched_switch event is not recorded for this
cgroup since it belongs to B (current process is still B
util we finish the switch), but we require this event to
signal that process A in this cgroup gets scheduled. This is
crucial for calculating schedule latency.

So split the sched_switch event into two: sched_in event
before we perform the switch, and sched_out event after we
perform the switch.

For compatibility, the sched_switch event is not touched.

Cc: Steven Rostedt <rostedt@...dmis.org>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Signed-off-by: Cong Wang <xiyou.wangcong@...il.com>
Signed-off-by: Cong Wang <cwang@...pensource.com>
---
 include/trace/events/sched.h | 51 +++++++++++++++++++++++++++++++++++++++++++-
 kernel/sched/core.c          |  2 ++
 2 files changed, 52 insertions(+), 1 deletion(-)

diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index d57a575..c31f1e0 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -112,8 +112,57 @@ static inline long __trace_sched_switch_state(struct task_struct *p)
 #endif /* CREATE_TRACE_POINTS */
 
 /*
- * Tracepoint for task switches, performed by the scheduler:
+ * Tracepoints for task switches, performed by the scheduler:
  */
+TRACE_EVENT(sched_out,
+
+	TP_PROTO(struct task_struct *curr),
+
+	TP_ARGS(curr),
+
+	TP_STRUCT__entry(
+		__array(	char,	comm,	TASK_COMM_LEN	)
+		__field(	int,	prio			)
+		__field(	long,	state			)
+	),
+
+	TP_fast_assign(
+		__entry->prio	= curr->prio;
+		__entry->state	= __trace_sched_switch_state(curr);
+		memcpy(__entry->comm, curr->comm, TASK_COMM_LEN);
+	),
+
+	TP_printk("comm=%s prio=%d state=%s%s",
+		__entry->comm, __entry->prio,
+		__entry->state & (TASK_STATE_MAX-1) ?
+		  __print_flags(__entry->state & (TASK_STATE_MAX-1), "|",
+				{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
+				{ 16, "Z" }, { 32, "X" }, { 64, "x" },
+				{ 128, "K" }, { 256, "W" }, { 512, "P" },
+				{ 1024, "N" }) : "R",
+		__entry->state & TASK_STATE_MAX ? "+" : "")
+);
+
+TRACE_EVENT(sched_in,
+
+	TP_PROTO(struct task_struct *next),
+
+	TP_ARGS(next),
+
+	TP_STRUCT__entry(
+		__array(	char,	comm,	TASK_COMM_LEN	)
+		__field(	int,	prio			)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, next->comm, TASK_COMM_LEN);
+		__entry->prio	= next->prio;
+	),
+
+	TP_printk("comm=%s prio=%d",
+		__entry->comm, __entry->prio)
+);
+
 TRACE_EVENT(sched_switch,
 
 	TP_PROTO(struct task_struct *prev,
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c86935a..681fc50 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2219,6 +2219,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
 		    struct task_struct *next)
 {
 	trace_sched_switch(prev, next);
+	trace_sched_out(prev);
 	sched_info_switch(rq, prev, next);
 	perf_event_task_sched_out(prev, next);
 	fire_sched_out_preempt_notifiers(prev, next);
@@ -2288,6 +2289,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
 	}
 
 	tick_nohz_task_switch(current);
+	trace_sched_in(current);
 	return rq;
 }
 
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ