lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180907214047.26914-15-jschoenh@amazon.de>
Date:   Fri,  7 Sep 2018 23:40:01 +0200
From:   Jan H. Schönherr <jschoenh@...zon.de>
To:     Ingo Molnar <mingo@...hat.com>,
        Peter Zijlstra <peterz@...radead.org>
Cc:     Jan H. Schönherr <jschoenh@...zon.de>,
        linux-kernel@...r.kernel.org
Subject: [RFC 14/60] sched: Refactor sync_throttle() to accept a CFS runqueue as argument

Prepare for future changes and refactor sync_throttle() to work with
a different set of arguments.

Signed-off-by: Jan H. Schönherr <jschoenh@...zon.de>
---
 kernel/sched/fair.c | 13 ++++++-------
 1 file changed, 6 insertions(+), 7 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5cad364e3a88..9f0ce4555c26 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4709,18 +4709,17 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
 		throttle_cfs_rq(cfs_rq);
 }
 
-static void sync_throttle(struct task_group *tg, int cpu)
+static void sync_throttle(struct cfs_rq *cfs_rq)
 {
-	struct cfs_rq *pcfs_rq, *cfs_rq;
+	struct cfs_rq *pcfs_rq;
 
 	if (!cfs_bandwidth_used())
 		return;
 
-	cfs_rq = tg->cfs_rq[cpu];
-	pcfs_rq = tg->parent->cfs_rq[cpu];
+	pcfs_rq = cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))];
 
 	cfs_rq->throttle_count = pcfs_rq->throttle_count;
-	cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
+	cfs_rq->throttled_clock_task = rq_clock_task(rq_of(cfs_rq));
 }
 
 /* conditionally throttle active cfs_rq's from put_prev_entity() */
@@ -4887,7 +4886,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
-static inline void sync_throttle(struct task_group *tg, int cpu) {}
+static inline void sync_throttle(struct cfs_rq *cfs_rq) {}
 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
 
 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
@@ -9866,7 +9865,7 @@ void online_fair_sched_group(struct task_group *tg)
 		raw_spin_lock_irq(&rq->lock);
 		update_rq_clock(rq);
 		attach_entity_cfs_rq(se);
-		sync_throttle(tg, i);
+		sync_throttle(tg->cfs_rq[i]);
 		raw_spin_unlock_irq(&rq->lock);
 	}
 }
-- 
2.9.3.1.gcba166c.dirty

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ