[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAGXu5jJABwK44o_vp0onCM6ct+QMPoFZgSHbPWVntcCafE6Jzg@mail.gmail.com>
Date: Thu, 8 Sep 2016 12:27:30 -0700
From: Kees Cook <keescook@...omium.org>
To: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
LKML <linux-kernel@...r.kernel.org>,
Anton Vorontsov <anton@...msg.org>,
Colin Cross <ccross@...roid.com>,
Tony Luck <tony.luck@...el.com>,
Rabin Vincent <rabinv@...s.com>
Subject: Re: [REPOST PATCH 2/2] pstore/core: drop cmpxchg based updates
On Thu, Sep 8, 2016 at 4:48 AM, Sebastian Andrzej Siewior
<bigeasy@...utronix.de> wrote:
> I have here a FPGA behind PCIe which exports SRAM which I use for
> pstore. Now it seems that the FPGA no longer supports cmpxchg based
> updates and writes back 0xff…ff and returns the same. This leads to
> crash during crash rendering pstore useless.
> Since I doubt that there is much benefit from using cmpxchg() here, I am
> dropping this atomic access and use the spinlock based version.
>
> Cc: Anton Vorontsov <anton@...msg.org>
> Cc: Colin Cross <ccross@...roid.com>
> Cc: Kees Cook <keescook@...omium.org>
> Cc: Tony Luck <tony.luck@...el.com>
> Cc: Rabin Vincent <rabinv@...s.com>
> Tested-by: Rabin Vincent <rabinv@...s.com>
> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
> ---
> Also fixes this for ARMv7 as Rabin Vincent pointed out:
> https://lkml.kernel.org/g/CABXOdTfT7xMfiBvRuUS1hsVs=q5q2wY1x1Z8oCyyJNFckM0g0A@mail.gmail.com
Thanks! I've applied this for -next.
-Kees
>
> fs/pstore/ram_core.c | 43 ++-----------------------------------------
> 1 file changed, 2 insertions(+), 41 deletions(-)
>
> diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
> index 76c3f80efdfa..4bae54bb61cd 100644
> --- a/fs/pstore/ram_core.c
> +++ b/fs/pstore/ram_core.c
> @@ -47,39 +47,6 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz)
> return atomic_read(&prz->buffer->start);
> }
>
> -/* increase and wrap the start pointer, returning the old value */
> -static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a)
> -{
> - int old;
> - int new;
> -
> - do {
> - old = atomic_read(&prz->buffer->start);
> - new = old + a;
> - while (unlikely(new >= prz->buffer_size))
> - new -= prz->buffer_size;
> - } while (atomic_cmpxchg(&prz->buffer->start, old, new) != old);
> -
> - return old;
> -}
> -
> -/* increase the size counter until it hits the max size */
> -static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a)
> -{
> - size_t old;
> - size_t new;
> -
> - if (atomic_read(&prz->buffer->size) == prz->buffer_size)
> - return;
> -
> - do {
> - old = atomic_read(&prz->buffer->size);
> - new = old + a;
> - if (new > prz->buffer_size)
> - new = prz->buffer_size;
> - } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
> -}
> -
> static DEFINE_RAW_SPINLOCK(buffer_lock);
>
> /* increase and wrap the start pointer, returning the old value */
> @@ -124,9 +91,6 @@ static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
> raw_spin_unlock_irqrestore(&buffer_lock, flags);
> }
>
> -static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic;
> -static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic;
> -
> static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
> uint8_t *data, size_t len, uint8_t *ecc)
> {
> @@ -338,9 +302,9 @@ int notrace persistent_ram_write(struct persistent_ram_zone *prz,
> c = prz->buffer_size;
> }
>
> - buffer_size_add(prz, c);
> + buffer_size_add_locked(prz, c);
>
> - start = buffer_start_add(prz, c);
> + start = buffer_start_add_locked(prz, c);
>
> rem = prz->buffer_size - start;
> if (unlikely(rem < c)) {
> @@ -426,9 +390,6 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size,
> return NULL;
> }
>
> - buffer_start_add = buffer_start_add_locked;
> - buffer_size_add = buffer_size_add_locked;
> -
> if (memtype)
> va = ioremap(start, size);
> else
> --
> 2.9.3
>
--
Kees Cook
Nexus Security
Powered by blists - more mailing lists