lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <lsq.1578512578.981696343@decadent.org.uk>
Date:   Wed, 08 Jan 2020 19:43:56 +0000
From:   Ben Hutchings <ben@...adent.org.uk>
To:     linux-kernel@...r.kernel.org, stable@...r.kernel.org
CC:     akpm@...ux-foundation.org, Denis Kirjanov <kda@...ux-powerpc.org>
Subject: [PATCH 3.16 58/63] Revert "sched/fair: Fix bandwidth timer clock
 drift condition"

3.16.81-rc1 review patch.  If anyone has any objections, please let me know.

------------------

From: Ben Hutchings <ben@...adent.org.uk>

This reverts commit eb29ee5a3873134917a760bf9c416da0a089a0be, which
was commit 512ac999d2755d2b7109e996a76b6fb8b888631d upstream.  This
introduced a regression and doesn't seem to have been suitable for
older stable branches.  (It has been fixed differently upstream.)

Signed-off-by: Ben Hutchings <ben@...adent.org.uk>
---
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3163,7 +3163,6 @@ void __refill_cfs_bandwidth_runtime(stru
 	now = sched_clock_cpu(smp_processor_id());
 	cfs_b->runtime = cfs_b->quota;
 	cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
-	cfs_b->expires_seq++;
 }
 
 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
@@ -3186,7 +3185,6 @@ static int assign_cfs_rq_runtime(struct
 	struct task_group *tg = cfs_rq->tg;
 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
 	u64 amount = 0, min_amount, expires;
-	int expires_seq;
 
 	/* note: this is a positive sum as runtime_remaining <= 0 */
 	min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
@@ -3212,7 +3210,6 @@ static int assign_cfs_rq_runtime(struct
 			cfs_b->idle = 0;
 		}
 	}
-	expires_seq = cfs_b->expires_seq;
 	expires = cfs_b->runtime_expires;
 	raw_spin_unlock(&cfs_b->lock);
 
@@ -3222,10 +3219,8 @@ static int assign_cfs_rq_runtime(struct
 	 * spread between our sched_clock and the one on which runtime was
 	 * issued.
 	 */
-	if (cfs_rq->expires_seq != expires_seq) {
-		cfs_rq->expires_seq = expires_seq;
+	if ((s64)(expires - cfs_rq->runtime_expires) > 0)
 		cfs_rq->runtime_expires = expires;
-	}
 
 	return cfs_rq->runtime_remaining > 0;
 }
@@ -3251,9 +3246,12 @@ static void expire_cfs_rq_runtime(struct
 	 * has not truly expired.
 	 *
 	 * Fortunately we can check determine whether this the case by checking
-	 * whether the global deadline(cfs_b->expires_seq) has advanced.
+	 * whether the global deadline has advanced. It is valid to compare
+	 * cfs_b->runtime_expires without any locks since we only care about
+	 * exact equality, so a partial write will still work.
 	 */
-	if (cfs_rq->expires_seq == cfs_b->expires_seq) {
+
+	if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
 		/* extend local deadline, drift is bounded above by 2 ticks */
 		cfs_rq->runtime_expires += TICK_NSEC;
 	} else {
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -187,7 +187,6 @@ struct cfs_bandwidth {
 	u64 quota, runtime;
 	s64 hierarchal_quota;
 	u64 runtime_expires;
-	int expires_seq;
 
 	int idle, timer_active;
 	struct hrtimer period_timer, slack_timer;
@@ -377,7 +376,6 @@ struct cfs_rq {
 
 #ifdef CONFIG_CFS_BANDWIDTH
 	int runtime_enabled;
-	int expires_seq;
 	u64 runtime_expires;
 	s64 runtime_remaining;
 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ