lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20150603212949.GD3644@twins.programming.kicks-ass.net>
Date:	Wed, 3 Jun 2015 23:29:49 +0200
From:	Peter Zijlstra <peterz@...radead.org>
To:	Thomas Gleixner <tglx@...utronix.de>
Cc:	umgwanakikbuti@...il.com, mingo@...e.hu, ktkhai@...allels.com,
	rostedt@...dmis.org, juri.lelli@...il.com, pang.xunlei@...aro.org,
	oleg@...hat.com, wanpeng.li@...ux.intel.com,
	linux-kernel@...r.kernel.org
Subject: Re: [PATCH 8/9] hrtimer: Allow hrtimer::function() to free the timer

On Wed, Jun 03, 2015 at 07:41:43PM +0200, Thomas Gleixner wrote:
> On Wed, 3 Jun 2015, Peter Zijlstra wrote:
> >  /**
> >   * struct hrtimer - the basic hrtimer structure
> > @@ -153,6 +144,7 @@ struct hrtimer_clock_base {
> >  	struct timerqueue_head	active;
> >  	ktime_t			(*get_time)(void);
> >  	ktime_t			offset;
> > +	struct hrtimer		*running;
> 
> Aside of lacking a KernelDoc comment, it expands the struct size on
> 32bit from 32 bytes to 36 bytes which undoes some of the recent cache
> line optimizations I did. Mooo!
> 
> So we might think about storing the running timer pointer in cpu_base
> instead for 32bit, which increases the foot print of the migration
> base and the extra cost for the additional indirection, but it would
> keep cache line tight for the hot pathes.

A wee something like this then?

---
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -123,8 +123,10 @@ struct hrtimer_sleeper {
 
 #ifdef CONFIG_64BIT
 # define HRTIMER_CLOCK_BASE_ALIGN	64
+# define __timer_base_running(timer)	timer->base->running
 #else
 # define HRTIMER_CLOCK_BASE_ALIGN	32
+# define __timer_base_running(timer)	timer->base->cpu_base->running
 #endif
 
 /**
@@ -136,6 +138,7 @@ struct hrtimer_sleeper {
  * @active:		red black tree root node for the active timers
  * @get_time:		function to retrieve the current time of the clock
  * @offset:		offset of this clock to the monotonic base
+ * @running:		pointer to the currently running hrtimer
  */
 struct hrtimer_clock_base {
 	struct hrtimer_cpu_base	*cpu_base;
@@ -144,7 +147,9 @@ struct hrtimer_clock_base {
 	struct timerqueue_head	active;
 	ktime_t			(*get_time)(void);
 	ktime_t			offset;
+#ifdef CONFIG_64BIT
 	struct hrtimer		*running;
+#endif
 } __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
 
 enum  hrtimer_base_type {
@@ -162,6 +167,7 @@ enum  hrtimer_base_type {
  * @cpu:		cpu number
  * @active_bases:	Bitfield to mark bases with active timers
  * @clock_was_set_seq:	Sequence counter of clock was set events
+ * @running:		pointer to the currently running hrtimer
  * @expires_next:	absolute time of the next event which was scheduled
  *			via clock_set_next_event()
  * @next_timer:		Pointer to the first expiring timer
@@ -183,6 +189,9 @@ struct hrtimer_cpu_base {
 	unsigned int			cpu;
 	unsigned int			active_bases;
 	unsigned int			clock_was_set_seq;
+#ifndef CONFIG_64BIT
+	struct hrtimer			*running;
+#endif
 #ifdef CONFIG_HIGH_RES_TIMERS
 	unsigned int			in_hrtirq	: 1,
 					hres_active	: 1,
@@ -401,7 +410,7 @@ static inline bool hrtimer_active(const
 
 	smp_rmb(); /* C matches A */
 
-	if (timer->base->running == timer)
+	if (__timer_base_running(timer) == timer)
 		return true;
 
 	smp_rmb(); /* D matches B */
@@ -426,7 +435,7 @@ static inline int hrtimer_is_queued(stru
  */
 static inline int hrtimer_callback_running(struct hrtimer *timer)
 {
-	return timer->base->running == timer;
+	return __timer_base_running(timer) == timer;
 }
 
 /* Forward a hrtimer so it expires after now: */
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -110,12 +110,20 @@ static inline int hrtimer_clockid_to_bas
  */
 #ifdef CONFIG_SMP
 
+#ifndef CONFIG_64BIT
+static struct hrtimer_cpu_base migration_cpu_base;
+
+#define MIGRATION_BASE_INIT { .cpu_base = &migration_cpu_base, }
+#else
+#define MIGRATION_BASE_INIT {}
+#endif
+
 /*
  * We require the migration_base for lock_hrtimer_base()/switch_hrtimer_base()
  * such that hrtimer_callback_running() can unconditionally dereference
  * timer->base.
  */
-static struct hrtimer_clock_base migration_base;
+static struct hrtimer_clock_base migration_base = MIGRATION_BASE_INIT;
 
 /*
  * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
@@ -1121,7 +1129,7 @@ static void __run_hrtimer(struct hrtimer
 	WARN_ON(!irqs_disabled());
 
 	debug_deactivate(timer);
-	base->running = timer;
+	__timer_base_running(timer) = timer;
 
 	/*
 	 * Pairs with hrtimer_active().
@@ -1178,8 +1186,8 @@ static void __run_hrtimer(struct hrtimer
 	 */
 	smp_wmb(); /* B matches D */
 
-	WARN_ON_ONCE(base->running != timer);
-	base->running = NULL;
+	WARN_ON_ONCE(__timer_base_running(timer) != timer);
+	__timer_base_running(timer) = NULL;
 }
 
 static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ