[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <54E21F10.1040402@cantab.net>
Date: Mon, 16 Feb 2015 16:47:12 +0000
From: David Vrabel <dvrabel@...tab.net>
To: Raghavendra K T <raghavendra.kt@...ux.vnet.ibm.com>,
tglx@...utronix.de, mingo@...hat.com, hpa@...or.com,
peterz@...radead.org, torvalds@...ux-foundation.org,
konrad.wilk@...cle.com, pbonzini@...hat.com
CC: waiman.long@...com, jeremy@...p.org, ak@...ux.intel.com,
a.ryabinin@...sung.com, kvm@...r.kernel.org,
borntraeger@...ibm.com, jasowang@...hat.com, x86@...nel.org,
oleg@...hat.com, linux-kernel@...r.kernel.org,
paul.gortmaker@...driver.com, dave@...olabs.net,
xen-devel@...ts.xenproject.org, davej@...hat.com,
akpm@...ux-foundation.org, paulmck@...ux.vnet.ibm.com,
virtualization@...ts.linux-foundation.org, sasha.levin@...cle.com
Subject: Re: [Xen-devel] [PATCH V5] x86 spinlock: Fix memory corruption on
completing completions
On 15/02/15 17:30, Raghavendra K T wrote:
> --- a/arch/x86/xen/spinlock.c
> +++ b/arch/x86/xen/spinlock.c
> @@ -41,7 +41,7 @@ static u8 zero_stats;
> static inline void check_zero(void)
> {
> u8 ret;
> - u8 old = ACCESS_ONCE(zero_stats);
> + u8 old = READ_ONCE(zero_stats);
> if (unlikely(old)) {
> ret = cmpxchg(&zero_stats, old, 0);
> /* This ensures only one fellow resets the stat */
> @@ -112,6 +112,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
> struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
> int cpu = smp_processor_id();
> u64 start;
> + __ticket_t head;
> unsigned long flags;
>
> /* If kicker interrupts not initialized yet, just spin */
> @@ -159,11 +160,15 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
> */
> __ticket_enter_slowpath(lock);
>
> + /* make sure enter_slowpath, which is atomic does not cross the read */
> + smp_mb__after_atomic();
> +
> /*
> * check again make sure it didn't become free while
> * we weren't looking
> */
> - if (ACCESS_ONCE(lock->tickets.head) == want) {
> + head = READ_ONCE(lock->tickets.head);
> + if (__tickets_equal(head, want)) {
> add_stats(TAKEN_SLOW_PICKUP, 1);
> goto out;
> }
> @@ -204,8 +209,8 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
> const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
>
> /* Make sure we read lock before want */
> - if (ACCESS_ONCE(w->lock) == lock &&
> - ACCESS_ONCE(w->want) == next) {
> + if (READ_ONCE(w->lock) == lock &&
> + READ_ONCE(w->want) == next) {
> add_stats(RELEASED_SLOW_KICKED, 1);
> xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
> break;
Acked-by: David Vrabel <david.vrabel@...rix.com>
Although some of the ACCESS_ONCE to READ_ONCE changes are cosmetic and
are perhaps best left out of a patch destined for stable.
David
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists