lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Wed,  6 Mar 2019 16:43:46 +0800
From:   Yafang Shao <laoar.shao@...il.com>
To:     mingo@...hat.com, peterz@...radead.org
Cc:     linux-kernel@...r.kernel.org, shaoyafang@...iglobal.com,
        Yafang Shao <laoar.shao@...il.com>
Subject: [PATCH] sched: fair: fix missed CONFIG_SCHEDSTATS

When I'm using trace_sched_stat_{iowait, blocked, wait, sleep} to
measure how long the processes are stalled, there's always no output from
trace_pipe while there're really some tasks in uninterruptible sleep
state. That makes me confused, so I try to investigate why.
Finally I find the reason is that CONFIG_SCHEDSTATS is not set.

To avoid such kind of confusion, we should not expose these tracepoints
if CONFIG_SCHEDSTATS is not set.

Signed-off-by: Yafang Shao <laoar.shao@...il.com>
---
 include/trace/events/sched.h |  3 ++-
 kernel/sched/fair.c          | 13 ++++++++++++-
 2 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 9a4bdfa..a261da8 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -336,6 +336,7 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
 		  __entry->pid, __entry->old_pid)
 );
 
+#ifdef CONFIG_SCHEDSTATS
 /*
  * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
  *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
@@ -363,7 +364,6 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
 			(unsigned long long)__entry->delay)
 );
 
-
 /*
  * Tracepoint for accounting wait time (time the task is runnable
  * but not actually running due to scheduler contention).
@@ -394,6 +394,7 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
 DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
 	     TP_PROTO(struct task_struct *tsk, u64 delay),
 	     TP_ARGS(tsk, delay));
+#endif
 
 /*
  * Tracepoint for accounting runtime (time the task is executing
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8213ff6..a8006c9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -855,6 +855,7 @@ static void update_curr_fair(struct rq *rq)
 static inline void
 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
+#ifdef CONFIG_SCHEDSTATS
 	u64 wait_start, prev_wait_start;
 
 	if (!schedstat_enabled())
@@ -868,11 +869,13 @@ static void update_curr_fair(struct rq *rq)
 		wait_start -= prev_wait_start;
 
 	__schedstat_set(se->statistics.wait_start, wait_start);
+#endif
 }
 
 static inline void
 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
+#ifdef CONFIG_SCHEDSTATS
 	struct task_struct *p;
 	u64 delta;
 
@@ -900,11 +903,13 @@ static void update_curr_fair(struct rq *rq)
 	__schedstat_inc(se->statistics.wait_count);
 	__schedstat_add(se->statistics.wait_sum, delta);
 	__schedstat_set(se->statistics.wait_start, 0);
+#endif
 }
 
 static inline void
 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
+#ifdef CONFIG_SCHEDSTATS
 	struct task_struct *tsk = NULL;
 	u64 sleep_start, block_start;
 
@@ -968,6 +973,7 @@ static void update_curr_fair(struct rq *rq)
 			account_scheduler_latency(tsk, delta >> 10, 0);
 		}
 	}
+#endif
 }
 
 /*
@@ -976,6 +982,7 @@ static void update_curr_fair(struct rq *rq)
 static inline void
 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 {
+#ifdef CONFIG_SCHEDSTATS
 	if (!schedstat_enabled())
 		return;
 
@@ -988,12 +995,13 @@ static void update_curr_fair(struct rq *rq)
 
 	if (flags & ENQUEUE_WAKEUP)
 		update_stats_enqueue_sleeper(cfs_rq, se);
+#endif
 }
 
 static inline void
 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 {
-
+#ifdef CONFIG_SCHEDSTATS
 	if (!schedstat_enabled())
 		return;
 
@@ -1014,6 +1022,7 @@ static void update_curr_fair(struct rq *rq)
 			__schedstat_set(se->statistics.block_start,
 				      rq_clock(rq_of(cfs_rq)));
 	}
+#endif
 }
 
 /*
@@ -4090,6 +4099,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
 	update_stats_curr_start(cfs_rq, se);
 	cfs_rq->curr = se;
 
+#ifdef CONFIG_SCHEDSTATS
 	/*
 	 * Track our maximum slice length, if the CPU's load is at
 	 * least twice that of our own weight (i.e. dont track it
@@ -4100,6 +4110,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
 			max((u64)schedstat_val(se->statistics.slice_max),
 			    se->sum_exec_runtime - se->prev_sum_exec_runtime));
 	}
+#endif
 
 	se->prev_sum_exec_runtime = se->sum_exec_runtime;
 }
-- 
1.8.3.1

Powered by blists - more mailing lists