[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <tip-f2bedc4705659216bd60948029ad8dfedf923ad9@git.kernel.org>
Date: Mon, 3 Jun 2019 06:01:16 -0700
From: tip-bot for Dietmar Eggemann <tipbot@...or.com>
To: linux-tip-commits@...r.kernel.org
Cc: mingo@...nel.org, torvalds@...ux-foundation.org,
dietmar.eggemann@....com, peterz@...radead.org, tglx@...utronix.de,
hpa@...or.com, linux-kernel@...r.kernel.org
Subject: [tip:sched/core] sched/fair: Remove rq->load
Commit-ID: f2bedc4705659216bd60948029ad8dfedf923ad9
Gitweb: https://git.kernel.org/tip/f2bedc4705659216bd60948029ad8dfedf923ad9
Author: Dietmar Eggemann <dietmar.eggemann@....com>
AuthorDate: Wed, 24 Apr 2019 09:45:56 +0100
Committer: Ingo Molnar <mingo@...nel.org>
CommitDate: Mon, 3 Jun 2019 11:49:37 +0200
sched/fair: Remove rq->load
The CFS class is the only one maintaining and using the CPU wide load
(rq->load(.weight)). The last use case of the CPU wide load in CFS's
set_next_entity() can be replaced by using the load of the CFS class
(rq->cfs.load(.weight)) instead.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@....com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Link: https://lkml.kernel.org/r/20190424084556.604-1-dietmar.eggemann@arm.com
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
kernel/sched/debug.c | 2 --
kernel/sched/fair.c | 7 ++-----
kernel/sched/sched.h | 2 --
3 files changed, 2 insertions(+), 9 deletions(-)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 678bfb9bd87f..150043e1d716 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -656,8 +656,6 @@ do { \
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
P(nr_running);
- SEQ_printf(m, " .%-30s: %lu\n", "load",
- rq->load.weight);
P(nr_switches);
P(nr_load_updates);
P(nr_uninterruptible);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8691a8fffe40..08b1cb06f968 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2686,8 +2686,6 @@ static void
account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
update_load_add(&cfs_rq->load, se->load.weight);
- if (!parent_entity(se))
- update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
#ifdef CONFIG_SMP
if (entity_is_task(se)) {
struct rq *rq = rq_of(cfs_rq);
@@ -2703,8 +2701,6 @@ static void
account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
update_load_sub(&cfs_rq->load, se->load.weight);
- if (!parent_entity(se))
- update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
#ifdef CONFIG_SMP
if (entity_is_task(se)) {
account_numa_dequeue(rq_of(cfs_rq), task_of(se));
@@ -4100,7 +4096,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
* least twice that of our own weight (i.e. dont track it
* when there are only lesser-weight tasks around):
*/
- if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
+ if (schedstat_enabled() &&
+ rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
schedstat_set(se->statistics.slice_max,
max((u64)schedstat_val(se->statistics.slice_max),
se->sum_exec_runtime - se->prev_sum_exec_runtime));
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b52ed1ada0be..c308410675ed 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -830,8 +830,6 @@ struct rq {
atomic_t nohz_flags;
#endif /* CONFIG_NO_HZ_COMMON */
- /* capture load from *all* tasks on this CPU: */
- struct load_weight load;
unsigned long nr_load_updates;
u64 nr_switches;
Powered by blists - more mailing lists