[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YYuJ3yE0gottvExw@hirez.programming.kicks-ass.net>
Date: Wed, 10 Nov 2021 09:59:11 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: Josh Don <joshdon@...gle.com>
Cc: Ingo Molnar <mingo@...hat.com>, Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>, Mel Gorman <mgorman@...e.de>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Joel Fernandes <joel@...lfernandes.org>,
Vineeth Pillai <vineethrp@...il.com>,
Hao Luo <haoluo@...gle.com>, Tao Zhou <tao.zhou@...ux.dev>,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2] sched/core: forced idle accounting
On Wed, Nov 10, 2021 at 09:38:53AM +0100, Peter Zijlstra wrote:
> On Tue, Nov 09, 2021 at 02:59:00PM -0800, Josh Don wrote:
> > On Tue, Nov 9, 2021 at 3:15 AM Peter Zijlstra <peterz@...radead.org> wrote:
> > >
> > > On Mon, Oct 18, 2021 at 01:34:28PM -0700, Josh Don wrote:
> > > > @@ -5804,6 +5830,12 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
> > > > }
> > > > }
> > > >
> > > > + if (rq->core->core_forceidle_count) {
> > >
> > > Does this want to be something like:
> > >
> > > if (schedstat_enabled() && .. ) ?
> > >
> > > afaict without schedstat on this is dead code.
> > >
> >
> > Makes sense to me, and similarly we can bail out of sched_core_tick()
> > early in the disabled case.
>
> Ok, I already changed the patch to include the above, lemme also edit
> sched_core_tick().
Something like so then?
--- a/kernel/sched/core_sched.c
+++ b/kernel/sched/core_sched.c
@@ -235,7 +235,7 @@ int sched_core_share_pid(unsigned int cm
#ifdef CONFIG_SCHEDSTATS
/* REQUIRES: rq->core's clock recently updated. */
-void sched_core_account_forceidle(struct rq *rq)
+void __sched_core_account_forceidle(struct rq *rq)
{
const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
u64 delta, now = rq_clock(rq->core);
@@ -256,9 +256,6 @@ void sched_core_account_forceidle(struct
rq->core->core_forceidle_start = now;
- if (!schedstat_enabled())
- return;
-
if (WARN_ON_ONCE(!rq->core->core_forceidle_occupation)) {
/* can't be forced idle without a running task */
} else if (rq->core->core_forceidle_count > 1 ||
@@ -283,17 +280,15 @@ void sched_core_account_forceidle(struct
}
}
-void sched_core_tick(struct rq *rq)
+void __sched_core_tick(struct rq *rq)
{
- if (!sched_core_enabled(rq))
- return;
-
if (!rq->core->core_forceidle_count)
return;
if (rq != rq->core)
update_rq_clock(rq->core);
- sched_core_account_forceidle(rq);
+
+ __sched_core_account_forceidle(rq);
}
#endif /* CONFIG_SCHEDSTATS */
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1304,20 +1304,6 @@ static inline bool sched_group_cookie_ma
}
#endif /* CONFIG_SCHED_CORE */
-#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_SCHEDSTATS)
-
-extern void sched_core_account_forceidle(struct rq *rq);
-
-extern void sched_core_tick(struct rq *rq);
-
-#else
-
-static inline void sched_core_account_forceidle(struct rq *rq) {}
-
-static inline void sched_core_tick(struct rq *rq) {}
-
-#endif /* CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS */
-
static inline void lockdep_assert_rq_held(struct rq *rq)
{
lockdep_assert_held(__rq_lockp(rq));
@@ -1870,6 +1856,32 @@ static inline void flush_smp_call_functi
#include "stats.h"
#include "autogroup.h"
+#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_SCHEDSTATS)
+
+extern void __sched_core_account_forceidle(struct rq *rq);
+
+static inline void sched_core_account_forceidle(struct rq *rq)
+{
+ if (schedstat_enabled())
+ __sched_core_account_forceidle(rq);
+}
+
+extern void __sched_core_tick(struct rq *rq);
+
+static inline void sched_core_tick(struct rq *rq)
+{
+ if (sched_core_enabled(rq) && schedstat_enabled())
+ __sched_core_tick(rq);
+}
+
+#else
+
+static inline void sched_core_account_forceidle(struct rq *rq) {}
+
+static inline void sched_core_tick(struct rq *rq) {}
+
+#endif /* CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS */
+
#ifdef CONFIG_CGROUP_SCHED
/*
Powered by blists - more mailing lists