lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <0b9c2f8bfb19406740ffb76c3269a13a7db43314.1464986080.git.jpoimboe@redhat.com>
Date:	Fri,  3 Jun 2016 15:44:41 -0500
From:	Josh Poimboeuf <jpoimboe@...hat.com>
To:	Ingo Molnar <mingo@...hat.com>,
	Peter Zijlstra <peterz@...radead.org>
Cc:	linux-kernel@...r.kernel.org,
	Mel Gorman <mgorman@...hsingularity.net>,
	Matt Fleming <matt@...eblueprint.co.uk>,
	Srikar Dronamraju <srikar@...ux.vnet.ibm.com>
Subject: [PATCH 3/3] sched/debug: remove unnecessary CONFIG_SCHEDSTATS checks

The schedstat_enabled() macro already has an implicit check for
CONFIG_SCHEDSTATS, so these explicit checks can be removed.

Signed-off-by: Josh Poimboeuf <jpoimboe@...hat.com>
---
 kernel/sched/debug.c | 7 +------
 kernel/sched/fair.c  | 4 ++--
 2 files changed, 3 insertions(+), 8 deletions(-)

diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index cbdf208..8f595f0 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -378,7 +378,6 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
 	PN(se->exec_start);
 	PN(se->vruntime);
 	PN(se->sum_exec_runtime);
-#ifdef CONFIG_SCHEDSTATS
 	if (schedstat_enabled()) {
 		PN(se->statistics.wait_start);
 		PN(se->statistics.sleep_start);
@@ -391,7 +390,6 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
 		PN(se->statistics.wait_sum);
 		P(se->statistics.wait_count);
 	}
-#endif
 	P(se->load.weight);
 #ifdef CONFIG_SMP
 	P(se->avg.load_avg);
@@ -632,7 +630,6 @@ do {									\
 #undef P64
 #endif
 
-#ifdef CONFIG_SCHEDSTATS
 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
 
 	if (schedstat_enabled()) {
@@ -644,7 +641,6 @@ do {									\
 	}
 
 #undef P
-#endif
 	spin_lock_irqsave(&sched_debug_lock, flags);
 	print_cfs_stats(m, cpu);
 	print_rt_stats(m, cpu);
@@ -887,7 +883,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 
 	P(se.nr_migrations);
 
-#ifdef CONFIG_SCHEDSTATS
 	if (schedstat_enabled()) {
 		u64 avg_atom, avg_per_cpu;
 
@@ -936,7 +931,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 		__PN(avg_atom);
 		__PN(avg_per_cpu);
 	}
-#endif
+
 	__P(nr_switches);
 	SEQ_printf(m, "%-45s:%21Ld\n",
 		   "nr_voluntary_switches", (long long)p->nvcsw);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c6dd8ba..dce0bf4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3447,7 +3447,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 
 	update_stats_curr_start(cfs_rq, se);
 	cfs_rq->curr = se;
-#ifdef CONFIG_SCHEDSTATS
+
 	/*
 	 * Track our maximum slice length, if the CPU's load is at
 	 * least twice that of our own weight (i.e. dont track it
@@ -3457,7 +3457,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 		se->statistics.slice_max = max(se->statistics.slice_max,
 			se->sum_exec_runtime - se->prev_sum_exec_runtime);
 	}
-#endif
+
 	se->prev_sum_exec_runtime = se->sum_exec_runtime;
 }
 
-- 
2.4.11

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ