[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <tip-11d4afd4ff667f9b6178ee8c142c36cb78bd84db@git.kernel.org>
Date: Tue, 2 Oct 2018 03:06:20 -0700
From: tip-bot for Vincent Guittot <tipbot@...or.com>
To: linux-tip-commits@...r.kernel.org
Cc: mingo@...nel.org, miguel.ojeda.sandonis@...il.com,
peterz@...radead.org, hpa@...or.com, torvalds@...ux-foundation.org,
tglx@...utronix.de, mingo@...hat.com, vincent.guittot@...aro.org,
douly.fnst@...fujitsu.com, linux-kernel@...r.kernel.org
Subject: [tip:sched/core] sched/pelt: Fix warning and clean up IRQ PELT
config
Commit-ID: 11d4afd4ff667f9b6178ee8c142c36cb78bd84db
Gitweb: https://git.kernel.org/tip/11d4afd4ff667f9b6178ee8c142c36cb78bd84db
Author: Vincent Guittot <vincent.guittot@...aro.org>
AuthorDate: Tue, 25 Sep 2018 11:17:42 +0200
Committer: Ingo Molnar <mingo@...nel.org>
CommitDate: Tue, 2 Oct 2018 09:45:00 +0200
sched/pelt: Fix warning and clean up IRQ PELT config
Create a config for enabling irq load tracking in the scheduler.
irq load tracking is useful only when irq or paravirtual time is
accounted but it's only possible with SMP for now.
Also use __maybe_unused to remove the compilation warning in
update_rq_clock_task() that has been introduced by:
2e62c4743adc ("sched/fair: Remove #ifdefs from scale_rt_capacity()")
Suggested-by: Ingo Molnar <mingo@...hat.com>
Reported-by: Dou Liyang <douly.fnst@...fujitsu.com>
Reported-by: Miguel Ojeda <miguel.ojeda.sandonis@...il.com>
Signed-off-by: Vincent Guittot <vincent.guittot@...aro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: bp@...en8.de
Cc: dou_liyang@....com
Fixes: 2e62c4743adc ("sched/fair: Remove #ifdefs from scale_rt_capacity()")
Link: http://lkml.kernel.org/r/1537867062-27285-1-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
init/Kconfig | 5 +++++
kernel/sched/core.c | 7 +++----
kernel/sched/fair.c | 2 +-
kernel/sched/pelt.c | 2 +-
kernel/sched/pelt.h | 2 +-
kernel/sched/sched.h | 5 ++---
6 files changed, 13 insertions(+), 10 deletions(-)
diff --git a/init/Kconfig b/init/Kconfig
index 1e234e2f1cba..317d5ccb5191 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -415,6 +415,11 @@ config IRQ_TIME_ACCOUNTING
If in doubt, say N here.
+config HAVE_SCHED_AVG_IRQ
+ def_bool y
+ depends on IRQ_TIME_ACCOUNTING || PARAVIRT_TIME_ACCOUNTING
+ depends on SMP
+
config BSD_PROCESS_ACCT
bool "BSD Process Accounting"
depends on MULTIUSER
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ad97f3ba5ec5..f2caf1bae4a3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -135,9 +135,8 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
* In theory, the compile should just see 0 here, and optimize out the call
* to sched_rt_avg_update. But I don't trust it...
*/
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
- s64 steal = 0, irq_delta = 0;
-#endif
+ s64 __maybe_unused steal = 0, irq_delta = 0;
+
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
@@ -177,7 +176,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
rq->clock_task += delta;
-#ifdef HAVE_SCHED_AVG_IRQ
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
update_irq_load_avg(rq, irq_delta + steal);
#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1d92ed2eca8b..d59307ecd67d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7317,7 +7317,7 @@ static inline bool others_have_blocked(struct rq *rq)
if (READ_ONCE(rq->avg_dl.util_avg))
return true;
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
if (READ_ONCE(rq->avg_irq.util_avg))
return true;
#endif
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index 35475c0c5419..48a126486435 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -358,7 +358,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
return 0;
}
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
/*
* irq:
*
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index d2894db28955..7e56b489ff32 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -6,7 +6,7 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
int update_irq_load_avg(struct rq *rq, u64 running);
#else
static inline int
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 632804fa0b12..798b1afd5092 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -862,8 +862,7 @@ struct rq {
struct sched_avg avg_rt;
struct sched_avg avg_dl;
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
-#define HAVE_SCHED_AVG_IRQ
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
struct sched_avg avg_irq;
#endif
u64 idle_stamp;
@@ -2223,7 +2222,7 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
}
#endif
-#ifdef HAVE_SCHED_AVG_IRQ
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
static inline unsigned long cpu_util_irq(struct rq *rq)
{
return rq->avg_irq.util_avg;
Powered by blists - more mailing lists