lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Sat, 3 May 2008 19:01:30 +0200
From:	Ingo Molnar <mingo@...e.hu>
To:	Peter Zijlstra <a.p.zijlstra@...llo.nl>
Cc:	Thomas Gleixner <tglx@...utronix.de>,
	David Miller <davem@...emloft.net>,
	Guillaume Chazarain <guichaz@...il.com>,
	Andi Kleen <andi@...stfloor.org>,
	linux-kernel <linux-kernel@...r.kernel.org>,
	linux-arch <linux-arch@...r.kernel.org>,
	Mike Galbraith <efault@....de>,
	Dhaval Giani <dhaval@...ux.vnet.ibm.com>
Subject: Re: [RFC][PATCH] sched_clock_cpu()


* Peter Zijlstra <a.p.zijlstra@...llo.nl> wrote:

> it _DOESN'T_ boot ;-/ and I seem to have caught a flu that makes my
> whole body hurt like hell, so I'm not getting anything done.

i think i see where your boot problem comes from:

> +struct sched_clock_data {
> +	spinlock_t lock;

that wont work very well when sched_clock() is called from within 
CONFIG_LOCK_STAT instrumentation. Does the patch below solve the boot 
problems for you?

	Ingo

-------------------->
Subject: sched: sched_clock() fix
From: Ingo Molnar <mingo@...e.hu>
Date: Sat May 03 18:41:11 CEST 2008

Signed-off-by: Ingo Molnar <mingo@...e.hu>
---
 kernel/sched.c       |    2 --
 kernel/sched_clock.c |   42 ++++++++++++++++++++++++------------------
 2 files changed, 24 insertions(+), 20 deletions(-)

Index: linux/kernel/sched.c
===================================================================
--- linux.orig/kernel/sched.c
+++ linux/kernel/sched.c
@@ -1074,8 +1074,6 @@ static struct rq *this_rq_lock(void)
 	return rq;
 }
 
-	WARN_ON(!irqs_disabled());
-	WARN_ON(!irqs_disabled());
 static void __resched_task(struct task_struct *p, int tif_bit);
 
 static inline void resched_task(struct task_struct *p)
Index: linux/kernel/sched_clock.c
===================================================================
--- linux.orig/kernel/sched_clock.c
+++ linux/kernel/sched_clock.c
@@ -33,12 +33,18 @@
 #ifndef CONFIG_HAVE_STABLE_CLOCK
 
 struct sched_clock_data {
-	spinlock_t lock;
-	unsigned long prev_jiffies;
-	u64 prev_raw;
-	u64 tick_raw;
-	u64 tick_gtod;
-	u64 clock;
+	/*
+	 * Raw spinlock - this is a special case: this might be called
+	 * from within instrumentation code so we dont want to do any
+	 * instrumentation ourselves.
+	 */
+	raw_spinlock_t		lock;
+
+	unsigned long		prev_jiffies;
+	u64			prev_raw;
+	u64			tick_raw;
+	u64			tick_gtod;
+	u64			clock;
 };
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
@@ -62,7 +68,7 @@ void sched_clock_init(void)
 	for_each_possible_cpu(cpu) {
 		struct sched_clock_data *scd = cpu_sdc(cpu);
 
-		spin_lock_init(&scd->lock);
+		scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
 		scd->prev_jiffies = jiffies;
 		scd->prev_raw = now;
 		scd->tick_raw = now;
@@ -116,11 +122,11 @@ static void lock_double_clock(struct sch
 			      struct sched_clock_data *data2)
 {
 	if (data1 < data2) {
-		spin_lock(&data1->lock);
-		spin_lock_nested(&data2->lock, SINGLE_DEPTH_NESTING);
+		__raw_spin_lock(&data1->lock);
+		__raw_spin_lock(&data2->lock);
 	} else {
-		spin_lock(&data2->lock);
-		spin_lock_nested(&data1->lock, SINGLE_DEPTH_NESTING);
+		__raw_spin_lock(&data2->lock);
+		__raw_spin_lock(&data1->lock);
 	}
 }
 
@@ -147,14 +153,14 @@ u64 sched_clock_cpu(int cpu)
 		now -= my_scd->tick_gtod;
 		now += scd->tick_gtod;
 
-		spin_unlock(&my_scd->lock);
+		__raw_spin_unlock(&my_scd->lock);
 	} else
-		spin_lock(&scd->lock);
+		__raw_spin_lock(&scd->lock);
 
 	__update_sched_clock(scd, now);
 	clock = scd->clock;
 
-	spin_unlock(&scd->lock);
+	__raw_spin_unlock(&scd->lock);
 
 	return clock;
 }
@@ -164,7 +170,7 @@ void sched_clock_tick(void)
 	struct sched_clock_data *scd = this_scd();
 	u64 now;
 
-	spin_lock(&scd->lock);
+	__raw_spin_lock(&scd->lock);
 	now = sched_clock();
 	__update_sched_clock(scd, now);
 	/*
@@ -174,7 +180,7 @@ void sched_clock_tick(void)
 	 */
 	scd->tick_raw = now;
 	scd->tick_gtod = ktime_to_ns(ktime_get()); // XXX get from regular tick
-	spin_unlock(&scd->lock);
+	__raw_spin_unlock(&scd->lock);
 }
 
 /*
@@ -199,10 +205,10 @@ void sched_clock_idle_wakeup_event(u64 d
 	 * and use the PM-provided delta_ns to advance the
 	 * rq clock:
 	 */
-	spin_lock(&scd->lock);
+	__raw_spin_lock(&scd->lock);
 	scd->prev_raw = sched_clock();
 	scd->clock += delta_ns;
-	spin_unlock(&scd->lock);
+	__raw_spin_unlock(&scd->lock);
 
 	touch_softlockup_watchdog();
 }
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ