lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20160201142107.c280954da0d1b8bdbdf6e0ee@linux-foundation.org>
Date:	Mon, 1 Feb 2016 14:21:07 -0800
From:	Andrew Morton <akpm@...ux-foundation.org>
To:	Andrey Ryabinin <aryabinin@...tuozzo.com>
Cc:	Mike Krinkin <krinkin.m.u@...il.com>,
	<linux-kernel@...r.kernel.org>,
	Peter Zijlstra <peterz@...radead.org>,
	Ingo Molnar <mingo@...e.hu>
Subject: Re: CONFIG_UBSAN_ALIGNMENT breaks x86-64 kernel with lockdep
 enabled

On Mon, 1 Feb 2016 18:10:38 +0300 Andrey Ryabinin <aryabinin@...tuozzo.com> wrote:

> On 01/30/2016 03:36 AM, Mike Krinkin wrote:
> > Hi,
> > 
> > option CONFIG_UBSAN_ALIGNMENT breaks x86-64 kernel with lockdep enabled,
> > i. e kernel with CONFIG_UBSAN_ALIGNMENT fails to load without even any
> > error message.
> > 
> > The problem is that ubsan callbacks use spinlocks and might be called
> > before lockdep is initialized. Particularly this line in the
> > reserve_ebda_region function causes problem:
> > 
> > lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
> > 
> > If i put lockdep_init() before reserve_ebda_region call in
> > x86_64_start_reservations kernel loads well. Since CONFIG_UBSAN_ALIGNMENT
> > isn't useful for x86 anyway it might be better to disable this option for
> > x86 arch?
> > 
> 
> 
> Alignment checks could be useful even on x86, because there are unaligned accesses in generic code.
> I think we can disable alignment instrumentation for arch/x86 directory only.

It looks pretty simple to make lockdep self-initialize on demand.  I
don't think it'll affect performance much at all and it takes away all
these "has lockdep initialized yet" concerns?


--- a/kernel/locking/lockdep.c~a
+++ a/kernel/locking/lockdep.c
@@ -290,9 +290,20 @@ LIST_HEAD(all_lock_classes);
 #define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
 #define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS)
 #define __classhashfn(key)	hash_long((unsigned long)key, CLASSHASH_BITS)
-#define classhashentry(key)	(classhash_table + __classhashfn((key)))
 
-static struct list_head classhash_table[CLASSHASH_SIZE];
+static struct list_head __classhash_table[CLASSHASH_SIZE];
+
+static inline struct list_head *get_classhash_table(void)
+{
+	if (unlikely(!lockdep_initialized))
+		lockdep_init();
+	return __classhash_table;
+}
+
+static inline struct list_head *classhashentry(struct lockdep_subclass_key *key)
+{
+	return get_classhash_table() + __classhashfn(key);
+}
 
 /*
  * We put the lock dependency chains into a hash-table as well, to cache
@@ -301,9 +312,15 @@ static struct list_head classhash_table[
 #define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1)
 #define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS)
 #define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
-#define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))
 
-static struct list_head chainhash_table[CHAINHASH_SIZE];
+static struct list_head __chainhash_table[CHAINHASH_SIZE];
+
+static struct list_head *chainhashentry(unsigned long chain)
+{
+	if (unlikely(!lockdep_initialized))
+		lockdep_init();
+	return __chainhash_table + __chainhashfn(chain);
+}
 
 /*
  * The hash key of the lock dependency chains is a hash itself too:
@@ -3875,7 +3892,7 @@ void lockdep_reset(void)
 	nr_process_chains = 0;
 	debug_locks = 1;
 	for (i = 0; i < CHAINHASH_SIZE; i++)
-		INIT_LIST_HEAD(chainhash_table + i);
+		INIT_LIST_HEAD(__chainhash_table + i);
 	raw_local_irq_restore(flags);
 }
 
@@ -3929,7 +3946,7 @@ void lockdep_free_key_range(void *start,
 	 * Unhash all classes that were created by this module:
 	 */
 	for (i = 0; i < CLASSHASH_SIZE; i++) {
-		head = classhash_table + i;
+		head = get_classhash_table() + i;
 		if (list_empty(head))
 			continue;
 		list_for_each_entry_rcu(class, head, hash_entry) {
@@ -3986,7 +4003,7 @@ void lockdep_reset_lock(struct lockdep_m
 	 */
 	locked = graph_lock();
 	for (i = 0; i < CLASSHASH_SIZE; i++) {
-		head = classhash_table + i;
+		head = get_classhash_table() + i;
 		if (list_empty(head))
 			continue;
 		list_for_each_entry_rcu(class, head, hash_entry) {
@@ -4027,10 +4044,10 @@ void lockdep_init(void)
 		return;
 
 	for (i = 0; i < CLASSHASH_SIZE; i++)
-		INIT_LIST_HEAD(classhash_table + i);
+		INIT_LIST_HEAD(__classhash_table + i);
 
 	for (i = 0; i < CHAINHASH_SIZE; i++)
-		INIT_LIST_HEAD(chainhash_table + i);
+		INIT_LIST_HEAD(__chainhash_table + i);
 
 	lockdep_initialized = 1;
 }
_

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ