[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170804231551.735075501@linuxfoundation.org>
Date: Fri, 4 Aug 2017 16:14:30 -0700
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Joel Fernandes <joelaf@...gle.com>,
Kees Cook <keescook@...omium.org>, Leo Yan <leo.yan@...aro.org>
Subject: [PATCH 4.9 002/105] pstore: Make spinlock per zone instead of global
4.9-stable review patch. If anyone has any objections, please let me know.
------------------
From: Joel Fernandes <joelaf@...gle.com>
commit 109704492ef637956265ec2eb72ae7b3b39eb6f4 upstream.
Currently pstore has a global spinlock for all zones. Since the zones
are independent and modify different areas of memory, there's no need
to have a global lock, so we should use a per-zone lock as introduced
here. Also, when ramoops's ftrace use-case has a FTRACE_PER_CPU flag
introduced later, which splits the ftrace memory area into a single zone
per CPU, it will eliminate the need for locking. In preparation for this,
make the locking optional.
Signed-off-by: Joel Fernandes <joelaf@...gle.com>
[kees: updated commit message]
Signed-off-by: Kees Cook <keescook@...omium.org>
Cc: Leo Yan <leo.yan@...aro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
fs/pstore/ram_core.c | 11 +++++------
include/linux/pstore_ram.h | 1 +
2 files changed, 6 insertions(+), 6 deletions(-)
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -48,8 +48,6 @@ static inline size_t buffer_start(struct
return atomic_read(&prz->buffer->start);
}
-static DEFINE_RAW_SPINLOCK(buffer_lock);
-
/* increase and wrap the start pointer, returning the old value */
static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
{
@@ -57,7 +55,7 @@ static size_t buffer_start_add(struct pe
int new;
unsigned long flags;
- raw_spin_lock_irqsave(&buffer_lock, flags);
+ raw_spin_lock_irqsave(&prz->buffer_lock, flags);
old = atomic_read(&prz->buffer->start);
new = old + a;
@@ -65,7 +63,7 @@ static size_t buffer_start_add(struct pe
new -= prz->buffer_size;
atomic_set(&prz->buffer->start, new);
- raw_spin_unlock_irqrestore(&buffer_lock, flags);
+ raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
return old;
}
@@ -77,7 +75,7 @@ static void buffer_size_add(struct persi
size_t new;
unsigned long flags;
- raw_spin_lock_irqsave(&buffer_lock, flags);
+ raw_spin_lock_irqsave(&prz->buffer_lock, flags);
old = atomic_read(&prz->buffer->size);
if (old == prz->buffer_size)
@@ -89,7 +87,7 @@ static void buffer_size_add(struct persi
atomic_set(&prz->buffer->size, new);
exit:
- raw_spin_unlock_irqrestore(&buffer_lock, flags);
+ raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
}
static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
@@ -493,6 +491,7 @@ static int persistent_ram_post_init(stru
prz->buffer->sig = sig;
persistent_ram_zap(prz);
+ prz->buffer_lock = __RAW_SPIN_LOCK_UNLOCKED(buffer_lock);
return 0;
}
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -40,6 +40,7 @@ struct persistent_ram_zone {
void *vaddr;
struct persistent_ram_buffer *buffer;
size_t buffer_size;
+ raw_spinlock_t buffer_lock;
/* ECC correction */
char *par_buffer;
Powered by blists - more mailing lists