lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1501207574-24958-1-git-send-email-leo.yan@linaro.org>
Date:   Fri, 28 Jul 2017 10:06:14 +0800
From:   Leo Yan <leo.yan@...aro.org>
To:     linux-kernel@...r.kernel.org, stable@...r.kernel.org,
        Willy Tarreau <w@....eu>,
        Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Cc:     Joel Fernandes <joelaf@...gle.com>,
        Kees Cook <keescook@...omium.org>, Leo Yan <leo.yan@...aro.org>
Subject: [PATCH 3.10] pstore: Make spinlock per zone instead of global

From: Joel Fernandes <joelaf@...gle.com>

Currently pstore has a global spinlock for all zones. Since the zones
are independent and modify different areas of memory, there's no need
to have a global lock, so we should use a per-zone lock as introduced
here. Also, when ramoops's ftrace use-case has a FTRACE_PER_CPU flag
introduced later, which splits the ftrace memory area into a single zone
per CPU, it will eliminate the need for locking. In preparation for this,
make the locking optional.

Supply for commit log (Leo):

This patch has another effect is to fix deadlock issue when enable
ftrace and console log together in ramoops; in the old code ftrace
buffer and console buffer in ramoops use the same raw spinlock
"buffer_lock". So in below case the kernel firstly has acquired the
lock for console buffer; when exit from console recording it calls
function _raw_spin_unlock_irqrestore(), this function has been enabled
function tracer when enter it, before release the spinlock it call
function tracing and acquire the spinlock again. At the end the spinlock
recursion happens and introduce the hang.

This patch uses separate locks for every buffer, this let console
buffer and ftrace buffer uses the dedicated locking in the same flow;
this effectively fixes the lock recursion issue.

[   65.103905] hrtimer: interrupt took 2759375 ns
[   65.108721] BUG: spinlock recursion on CPU#0, kschedfreq:0/1246
[   65.108760]  lock: buffer_lock+0x0/0x38, .magic: dead4ead, .owner: kschedfreq:0/1246, .owner_cpu: 0
[   65.108779] CPU: 0 PID: 1246 Comm: kschedfreq:0 Not tainted 4.4.74-07294-g5c996a9-dirty #130
[   65.108786] Hardware name: HiKey960 (DT)
[   65.108794] Call trace:
[   65.108820] [<ffffff800808ad64>] dump_backtrace+0x0/0x1e0
[   65.108835] [<ffffff800808af64>] show_stack+0x20/0x28
[   65.108857] [<ffffff80084ed4ec>] dump_stack+0xa8/0xe0
[   65.108872] [<ffffff800813c934>] spin_dump+0x88/0xac
[   65.108882] [<ffffff800813c988>] spin_bug+0x30/0x3c
[   65.108894] [<ffffff800813cb98>] do_raw_spin_lock+0xd0/0x1b8
[   65.108916] [<ffffff8008cba444>] _raw_spin_lock_irqsave+0x58/0x68
[   65.108935] [<ffffff8008453aec>] buffer_size_add.isra.4+0x30/0x78
[   65.108948] [<ffffff8008453f44>] persistent_ram_write+0x58/0x150
[   65.108961] [<ffffff8008452ca0>] ramoops_pstore_write_buf+0x14c/0x1d8
[   65.108974] [<ffffff8008452648>] pstore_ftrace_call+0x80/0xb4
[   65.108991] [<ffffff80081a9404>] ftrace_ops_no_ops+0xb8/0x154
[   65.109008] [<ffffff8008092e9c>] ftrace_graph_call+0x0/0x14
[   65.109023] [<ffffff8008cba594>] _raw_spin_unlock_irqrestore+0x20/0x90
[   65.109036] [<ffffff8008453b24>] buffer_size_add.isra.4+0x68/0x78
[   65.109048] [<ffffff8008453f44>] persistent_ram_write+0x58/0x150
[   65.109061] [<ffffff8008452ca0>] ramoops_pstore_write_buf+0x14c/0x1d8
[   65.109073] [<ffffff80084517c8>] pstore_write_compat+0x60/0x6c
[   65.109086] [<ffffff80084519d0>] pstore_console_write+0xa8/0xf4
[   65.109104] [<ffffff80081442e0>] call_console_drivers.constprop.21+0x1bc/0x1ec
[   65.109117] [<ffffff8008145488>] console_unlock+0x3a8/0x500
[   65.109129] [<ffffff8008145900>] vprintk_emit+0x320/0x62c
[   65.109142] [<ffffff8008145db0>] vprintk_default+0x48/0x54
[   65.109161] [<ffffff80081e3bec>] printk+0xa8/0xb4
[   65.109178] [<ffffff80081602a8>] hrtimer_interrupt+0x1f0/0x1f4
[   65.109197] [<ffffff80088eefd4>] arch_timer_handler_phys+0x3c/0x48
[   65.109211] [<ffffff800814bd00>] handle_percpu_devid_irq+0xd0/0x3c0
[   65.109225] [<ffffff800814718c>] generic_handle_irq+0x34/0x4c
[   65.109237] [<ffffff8008147234>] __handle_domain_irq+0x90/0xf8
[   65.109250] [<ffffff800808253c>] gic_handle_irq+0x5c/0xa8

Fixes: 0405a5cec340 ("pstore/ram: avoid atomic accesses for ioremapped regions")
Signed-off-by: Joel Fernandes <joelaf@...gle.com>
[kees: updated commit message]
Signed-off-by: Kees Cook <keescook@...omium.org>
Signed-off-by: Leo Yan <leo.yan@...aro.org>
---
 fs/pstore/ram_core.c       | 11 +++++------
 include/linux/pstore_ram.h |  1 +
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 7df456d..ac55707 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -45,8 +45,6 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz)
 	return atomic_read(&prz->buffer->start);
 }
 
-static DEFINE_RAW_SPINLOCK(buffer_lock);
-
 /* increase and wrap the start pointer, returning the old value */
 static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
 {
@@ -54,7 +52,7 @@ static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
 	int new;
 	unsigned long flags;
 
-	raw_spin_lock_irqsave(&buffer_lock, flags);
+	raw_spin_lock_irqsave(&prz->buffer_lock, flags);
 
 	old = atomic_read(&prz->buffer->start);
 	new = old + a;
@@ -62,7 +60,7 @@ static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
 		new -= prz->buffer_size;
 	atomic_set(&prz->buffer->start, new);
 
-	raw_spin_unlock_irqrestore(&buffer_lock, flags);
+	raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
 
 	return old;
 }
@@ -74,7 +72,7 @@ static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
 	size_t new;
 	unsigned long flags;
 
-	raw_spin_lock_irqsave(&buffer_lock, flags);
+	raw_spin_lock_irqsave(&prz->buffer_lock, flags);
 
 	old = atomic_read(&prz->buffer->size);
 	if (old == prz->buffer_size)
@@ -86,7 +84,7 @@ static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
 	atomic_set(&prz->buffer->size, new);
 
 exit:
-	raw_spin_unlock_irqrestore(&buffer_lock, flags);
+	raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
 }
 
 static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
@@ -452,6 +450,7 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
 
 	prz->buffer->sig = sig;
 	persistent_ram_zap(prz);
+	prz->buffer_lock = __RAW_SPIN_LOCK_UNLOCKED(buffer_lock);
 
 	return 0;
 }
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 4af3fdc..4bfcd43 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -39,6 +39,7 @@ struct persistent_ram_zone {
 	void *vaddr;
 	struct persistent_ram_buffer *buffer;
 	size_t buffer_size;
+	raw_spinlock_t buffer_lock;
 
 	/* ECC correction */
 	char *par_buffer;
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ