lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	26 Aug 2015 21:28:28 -0400
From:	"George Spelvin" <linux@...izon.com>
To:	jason.low2@...com
Cc:	linux-kernel@...r.kernel.org, linux@...izon.com
Subject: Re: [PATCH 3/3] timer: Reduce unnecessary sighand lock contention

> I can include your patch in the series and then use boolean for the new
> checking_timer field. However, it looks like this applies on an old
> kernel. For example, the spin_lock field has already been removed from
> the structure.

Apologies; that was 4.1.6.  A 4.2-rc8 patch is appended (it's a pretty
trivial merge once you look at it).

> The spinlock call has already been removed from a previous patch. The
> issue now is with contention with the sighand lock.

I'll look some more and try to wrap my head around it.

>> Or is it basically okay if this is massively racey, since process-wide
>> CPU timers are inherently sloppy.  A race will just cause an expiration
>> check to be missed, but it will be retried soon anyway.

> Yes, the worst case scenario is that we wait until the next thread to
> come along and handle the next expired timer. However, this "delay"
> already occurs now (for example: a timer can expire right after a thread
> exits check_process_timers()).

Ad is this polled, or is there some non-polled system that will trigger
another call to check_process_timers().

E.g. suppose a process fails to notice that it blew past a CPU time
timeout before blocking.  Does anything guarantee that it will get
the timeout signal in finite real time?



>From 95349f9b16c30aea518ce79d72e8e0f0c5d12069 Mon Sep 17 00:00:00 2001
From: George Spelvin <linux@...izon.com>
Date: Wed, 26 Aug 2015 19:15:54 +0000
Subject: [PATCH] timer: Use bool more in kernel/time/posix-cpu-timers.c

This is mostly function return values, for documentation.

One structure field is changed (from int), but alignment
padding precludes any actual space saving.

Signed-off-by: George Spelvin <linux@...izon.com>

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 04b5ada4..0cd80f76 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -607,7 +607,7 @@ struct task_cputime_atomic {
 /**
  * struct thread_group_cputimer - thread group interval timer counts
  * @cputime_atomic:	atomic thread group interval timers.
- * @running:		non-zero when there are timers running and
+ * @running:		%true when there are timers running and
  * 			@cputime receives updates.
  *
  * This structure contains the version of task_cputime, above, that is
@@ -615,7 +615,7 @@ struct task_cputime_atomic {
  */
 struct thread_group_cputimer {
 	struct task_cputime_atomic cputime_atomic;
-	int running;
+	bool running;
 };
 
 #include <linux/rwsem.h>
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 892e3dae..106368c5 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -113,14 +113,12 @@ static void bump_cpu_timer(struct k_itimer *timer,
  *
  * @cputime:	The struct to compare.
  *
- * Checks @cputime to see if all fields are zero.  Returns true if all fields
- * are zero, false if any field is nonzero.
+ * Checks @cputime to see if all fields are zero.  Returns %true if all fields
+ * are zero, %false if any field is nonzero.
  */
-static inline int task_cputime_zero(const struct task_cputime *cputime)
+static inline bool task_cputime_zero(const struct task_cputime *cputime)
 {
-	if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
-		return 1;
-	return 0;
+	return !cputime->utime && !cputime->stime && !cputime->sum_exec_runtime;
 }
 
 static inline unsigned long long prof_ticks(struct task_struct *p)
@@ -249,7 +247,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
 		 * but barriers are not required because update_gt_cputime()
 		 * can handle concurrent updates.
 		 */
-		WRITE_ONCE(cputimer->running, 1);
+		WRITE_ONCE(cputimer->running, true);
 	}
 	sample_cputime_atomic(times, &cputimer->cputime_atomic);
 }
@@ -458,8 +456,9 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
 	cleanup_timers(tsk->signal->cpu_timers);
 }
 
-static inline int expires_gt(cputime_t expires, cputime_t new_exp)
+static inline bool expires_gt(cputime_t expires, cputime_t new_exp)
 {
+	/* Could also be written "expires - 1 >= new_exp" */
 	return expires == 0 || expires > new_exp;
 }
 
@@ -911,7 +910,7 @@ static inline void stop_process_timers(struct signal_struct *sig)
 	struct thread_group_cputimer *cputimer = &sig->cputimer;
 
 	/* Turn off cputimer->running. This is done without locking. */
-	WRITE_ONCE(cputimer->running, 0);
+	WRITE_ONCE(cputimer->running, false);
 }
 
 static u32 onecputick;
@@ -1088,20 +1087,20 @@ out:
  * @expires:	Expiration times, against which @sample will be checked.
  *
  * Checks @sample against @expires to see if any field of @sample has expired.
- * Returns true if any field of the former is greater than the corresponding
- * field of the latter if the latter field is set.  Otherwise returns false.
+ * Returns %true if any field of the former is greater than the corresponding
+ * field of the latter if the latter field is set.  Otherwise returns %false.
  */
-static inline int task_cputime_expired(const struct task_cputime *sample,
+static inline bool task_cputime_expired(const struct task_cputime *sample,
 					const struct task_cputime *expires)
 {
 	if (expires->utime && sample->utime >= expires->utime)
-		return 1;
+		return true;
 	if (expires->stime && sample->utime + sample->stime >= expires->stime)
-		return 1;
+		return true;
 	if (expires->sum_exec_runtime != 0 &&
 	    sample->sum_exec_runtime >= expires->sum_exec_runtime)
-		return 1;
-	return 0;
+		return true;
+	return false;
 }
 
 /**
@@ -1110,11 +1109,11 @@ static inline int task_cputime_expired(const struct task_cputime *sample,
  * @tsk:	The task (thread) being checked.
  *
  * Check the task and thread group timers.  If both are zero (there are no
- * timers set) return false.  Otherwise snapshot the task and thread group
+ * timers set) return %false.  Otherwise snapshot the task and thread group
  * timers and compare them with the corresponding expiration times.  Return
- * true if a timer has expired, else return false.
+ * %true if a timer has expired, else return %false.
  */
-static inline int fastpath_timer_check(struct task_struct *tsk)
+static inline bool fastpath_timer_check(struct task_struct *tsk)
 {
 	struct signal_struct *sig;
 	cputime_t utime, stime;
@@ -1129,7 +1128,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
 		};
 
 		if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
-			return 1;
+			return true;
 	}
 
 	sig = tsk->signal;
@@ -1140,10 +1139,10 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
 		sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
 
 		if (task_cputime_expired(&group_sample, &sig->cputime_expires))
-			return 1;
+			return true;
 	}
 
-	return 0;
+	return false;
 }
 
 /*
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ