[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20160714162355.GW7094@linux.vnet.ibm.com>
Date: Thu, 14 Jul 2016 09:23:55 -0700
From: "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Tejun Heo <tj@...nel.org>, John Stultz <john.stultz@...aro.org>,
Ingo Molnar <mingo@...hat.com>,
lkml <linux-kernel@...r.kernel.org>,
Dmitry Shmidt <dimitrysh@...gle.com>,
Rom Lemarchand <romlem@...gle.com>,
Colin Cross <ccross@...gle.com>, Todd Kjos <tkjos@...gle.com>,
Oleg Nesterov <oleg@...hat.com>
Subject: Re: Severe performance regression w/ 4.4+ on Android due to cgroup
locking changes
On Thu, Jul 14, 2016 at 03:18:09PM +0200, Peter Zijlstra wrote:
> On Wed, Jul 13, 2016 at 10:51:02PM +0200, Peter Zijlstra wrote:
> > So, IIRC, the trade-off is a full memory barrier in read_lock and
> > read_unlock() vs sync_sched() in write.
> >
> > Full memory barriers are expensive and while the combined cost might
> > well exceed the cost of the sync_sched() it doesn't suffer the latency
> > issues.
> >
> > Not sure if we can frob the two in a single codebase, but I can have a
> > poke if Oleg or Paul doesn't beat me to it.
>
> OK, not too horrible if I say so myself :-)
>
> The below is a compile tested only first draft so far. I'll go give it
> some runtime next.
Hmmm... How does this handle the following sequence of events for
the case where we are not biased towards the reader?
o The per-CPU rwsem is set up with RCU_NONE and readers_slow
(as opposed to readers_block). The rcu_sync ->gp_state is
GP_PENDING, which means that rcu_sync_is_idle() will always
return true.
o Task A on CPU 0 runs percpu_down_read() to completion, and remains
within the critical section. CPU 0's ->refcount is therefore 1.
o Task B on CPU 1 does percpu_down_write(), which write-acquires
->rw_sem, does rcu_sync_enter() (which is a no-op due to
RCU_NONE), sets ->state to readers_block, and is just now going
to wait for readers, which first invokes readers_active_check(),
which starts summing the ->refcount for CPUs 0, 1, and 2,
obtaining the value 1 so far.
o Task C CPU 2 enters percpu_down_read(), disables preemption,
increments CPU 2's ->refcount, sees rcu_sync_is_idle() return
true (so skips __percpu_down_read()), enables preemption, and
enters its critical section.
o Task C migrates to CPU 3 and invokes percpu_up_read(), which
disables preemption, sees rcu_sync_is_idle() return true, calls
__this_cpu_dec() on CPU 3's ->refcount, and enables preemption.
The value of CPU 3's ->refcount is thus (unsigned int)-1.
o Task B on CPU 1 continues execution in readers_active_check(), with
the full sum being zero.
So it looks to me like we have Task A as a writer at the same time that
Task A is a reader, which would not be so good.
So what am I missing here?
And a couple of checkpatch nits below. Yes, I had to apply the patch to
figure out what it was doing. ;-)
Thanx, Paul
> ---
> fs/super.c | 3 +-
> include/linux/percpu-rwsem.h | 96 +++++++++++++++--
> include/linux/rcu_sync.h | 2 +-
> kernel/locking/percpu-rwsem.c | 243 +++++++++++++++++++++++++-----------------
> kernel/rcu/sync.c | 15 +++
> 5 files changed, 249 insertions(+), 110 deletions(-)
>
> diff --git a/fs/super.c b/fs/super.c
> index d78b9847e6cb..8ff18af7703f 100644
> --- a/fs/super.c
> +++ b/fs/super.c
> @@ -195,7 +195,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
> for (i = 0; i < SB_FREEZE_LEVELS; i++) {
> if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
> sb_writers_name[i],
> - &type->s_writers_key[i]))
> + &type->s_writers_key[i],
> + PERCPU_RWSEM_READER))
> goto fail;
> }
> init_waitqueue_head(&s->s_writers.wait_unfrozen);
> diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
> index c2fa3ecb0dce..5e1c2b029e3a 100644
> --- a/include/linux/percpu-rwsem.h
> +++ b/include/linux/percpu-rwsem.h
> @@ -10,29 +10,107 @@
>
> struct percpu_rw_semaphore {
> struct rcu_sync rss;
> - unsigned int __percpu *fast_read_ctr;
> + unsigned int __percpu *refcount;
> struct rw_semaphore rw_sem;
> - atomic_t slow_read_ctr;
> - wait_queue_head_t write_waitq;
> + wait_queue_head_t writer;
> + int state;
> };
>
> -extern void percpu_down_read(struct percpu_rw_semaphore *);
> -extern int percpu_down_read_trylock(struct percpu_rw_semaphore *);
> -extern void percpu_up_read(struct percpu_rw_semaphore *);
> +extern void __percpu_down_read(struct percpu_rw_semaphore *);
> +extern int __percpu_down_read_trylock(struct percpu_rw_semaphore *);
> +extern void __percpu_up_read(struct percpu_rw_semaphore *);
> +
> +static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
> +{
> + might_sleep();
> +
> + rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_);
> +
> + preempt_disable();
> + /*
> + * We are in an RCU-sched read-side critical section, so the writer
> + * cannot both change sem->state from readers_fast and start checking
> + * counters while we are here. So if we see !sem->state, we know that
> + * the writer won't be checking until we're past the preempt_enable()
> + * and that one the synchronize_sched() is done, the writer will see
> + * anything we did within this RCU-sched read-size critical section.
> + */
> + __this_cpu_inc(*sem->refcount);
> + if (unlikely(!rcu_sync_is_idle(&sem->rss)))
> + __percpu_down_read(sem); /* Unconditional memory barrier */
> + preempt_enable();
> + /*
> + * The barrier() from preempt_enable() prevents the compiler from
> + * bleeding the critical section out.
> + */
> +}
> +
> +static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
> +{
> + int ret = 1;
> +
> + preempt_disable();
> + /*
> + * Same as in percpu_down_read().
> + */
> + __this_cpu_inc(*sem->refcount);
> + if (unlikely(!rcu_sync_is_idle(&sem->rss)))
> + ret = __percpu_down_read_trylock(sem);
> + preempt_enable();
> + /*
> + * The barrier() from preempt_enable() prevents the compiler from
> + * bleeding the critical section out.
> + */
> +
> + if (ret)
> + rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_);
> +
> + return ret;
> +}
> +
> +static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
> +{
> + /*
> + * The barrier() in preempt_disable() prevents the compiler from
> + * bleeding the critical section out.
> + */
> + preempt_disable();
> + /*
> + * Same as in percpu_down_read().
> + */
> + if (likely(rcu_sync_is_idle(&sem->rss)))
> + __this_cpu_dec(*sem->refcount);
> + else
> + __percpu_up_read(sem); /* Unconditional memory barrier */
> + preempt_enable();
> +
> + rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
> +}
>
> extern void percpu_down_write(struct percpu_rw_semaphore *);
> extern void percpu_up_write(struct percpu_rw_semaphore *);
>
> +enum percpu_rwsem_bias { PERCPU_RWSEM_READER, PERCPU_RWSEM_WRITER };
> +
> extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
> - const char *, struct lock_class_key *);
> + const char *, struct lock_class_key *,
> + enum percpu_rwsem_bias bias);
> +
> extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
>
> -#define percpu_init_rwsem(brw) \
> +#define percpu_init_rwsem(sem) \
> ({ \
> static struct lock_class_key rwsem_key; \
> - __percpu_init_rwsem(brw, #brw, &rwsem_key); \
> + __percpu_init_rwsem(sem, #sem, &rwsem_key, \
> + PERCPU_RWSEM_READER); \
> })
>
> +#define percpu_init_rwsem_writer(sem) \
> +({ \
> + static struct lock_class_key rwsem_key; \
> + __percpu_init_rwsem(sem, #sem, &rwsem_key,i \
> + PERCPU_RWSEM_WRITER); \
> +})
>
> #define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
>
> diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h
> index a63a33e6196e..e556baaf785e 100644
> --- a/include/linux/rcu_sync.h
> +++ b/include/linux/rcu_sync.h
> @@ -26,7 +26,7 @@
> #include <linux/wait.h>
> #include <linux/rcupdate.h>
>
> -enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC };
> +enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC, RCU_NONE };
>
> /* Structure to mediate between updaters and fastpath-using readers. */
> struct rcu_sync {
> diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
> index bec0b647f9cc..be37c7732b54 100644
> --- a/kernel/locking/percpu-rwsem.c
> +++ b/kernel/locking/percpu-rwsem.c
> @@ -8,152 +8,197 @@
> #include <linux/sched.h>
> #include <linux/errno.h>
>
> -int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
> - const char *name, struct lock_class_key *rwsem_key)
> +enum { readers_slow, readers_block };
> +
> +int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
> + const char *name, struct lock_class_key *rwsem_key,
> + enum percpu_rwsem_bias bias)
> {
> - brw->fast_read_ctr = alloc_percpu(int);
> - if (unlikely(!brw->fast_read_ctr))
> + sem->refcount = alloc_percpu(int);
> + if (unlikely(!sem->refcount))
> return -ENOMEM;
>
> /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
> - __init_rwsem(&brw->rw_sem, name, rwsem_key);
> - rcu_sync_init(&brw->rss, RCU_SCHED_SYNC);
> - atomic_set(&brw->slow_read_ctr, 0);
> - init_waitqueue_head(&brw->write_waitq);
> + rcu_sync_init(&sem->rss, bias == PERCPU_RWSEM_READER ?
> + RCU_SCHED_SYNC :
Whitespace complaint on prior two lines.
> + RCU_NONE);
> + __init_rwsem(&sem->rw_sem, name, rwsem_key);
> + init_waitqueue_head(&sem->writer);
> + sem->state = readers_slow;
> return 0;
> }
> EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
>
> -void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
> +void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
> {
> /*
> * XXX: temporary kludge. The error path in alloc_super()
> * assumes that percpu_free_rwsem() is safe after kzalloc().
> */
> - if (!brw->fast_read_ctr)
> + if (!sem->refcount)
> return;
>
> - rcu_sync_dtor(&brw->rss);
> - free_percpu(brw->fast_read_ctr);
> - brw->fast_read_ctr = NULL; /* catch use after free bugs */
> + rcu_sync_dtor(&sem->rss);
> + free_percpu(sem->refcount);
> + sem->refcount = NULL; /* catch use after free bugs */
> }
> EXPORT_SYMBOL_GPL(percpu_free_rwsem);
>
> -/*
> - * This is the fast-path for down_read/up_read. If it succeeds we rely
> - * on the barriers provided by rcu_sync_enter/exit; see the comments in
> - * percpu_down_write() and percpu_up_write().
> - *
> - * If this helper fails the callers rely on the normal rw_semaphore and
> - * atomic_dec_and_test(), so in this case we have the necessary barriers.
> - */
> -static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
> +void __percpu_down_read(struct percpu_rw_semaphore *sem)
> {
> - bool success;
> + /*
> + * Due to having preemption disabled the decrement happens on
> + * the same CPU as the increment, avoiding the
> + * increment-on-one-CPU-and-decrement-on-another problem.
> + *
> + * And yes, if the reader misses the writer's assignment of
> + * readers_block to sem->state, then the writer is
> + * guaranteed to see the reader's increment. Conversely, any
> + * readers that increment their sem->refcount after the
> + * writer looks are guaranteed to see the readers_block value,
> + * which in turn means that they are guaranteed to immediately
> + * decrement their sem->refcount, so that it doesn't matter
> + * that the writer missed them.
> + */
>
> - preempt_disable();
> - success = rcu_sync_is_idle(&brw->rss);
> - if (likely(success))
> - __this_cpu_add(*brw->fast_read_ctr, val);
> - preempt_enable();
> + smp_mb(); /* A matches D */
>
> - return success;
> -}
> + /*
> + * If !readers_block the critical section starts here, matched by the
> + * release in percpu_up_write().
> + */
> + if (likely(smp_load_acquire(&sem->state) != readers_block))
> + return;
>
> -/*
> - * Like the normal down_read() this is not recursive, the writer can
> - * come after the first percpu_down_read() and create the deadlock.
> - *
> - * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep,
> - * percpu_up_read() does rwsem_release(). This pairs with the usage
> - * of ->rw_sem in percpu_down/up_write().
> - */
> -void percpu_down_read(struct percpu_rw_semaphore *brw)
> -{
> - might_sleep();
> - rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
> + /*
> + * Per the above comment; we still have preemption disabled and
> + * will thus decrement on the same CPU as we incremented.
> + */
> + __percpu_up_read(sem);
>
> - if (likely(update_fast_ctr(brw, +1)))
> - return;
> + /*
> + * We either call schedule() in the wait, or we'll fall through
> + * and reschedule on the preempt_enable() in percpu_down_read().
> + */
> + preempt_enable_no_resched();
> +
> + /*
> + * Avoid lockdep for the down/up_read() we already have them.
> + */
> + __down_read(&sem->rw_sem);
> + __this_cpu_inc(*sem->refcount);
> + __up_read(&sem->rw_sem);
>
> - /* Avoid rwsem_acquire_read() and rwsem_release() */
> - __down_read(&brw->rw_sem);
> - atomic_inc(&brw->slow_read_ctr);
> - __up_read(&brw->rw_sem);
> + preempt_disable();
> }
> -EXPORT_SYMBOL_GPL(percpu_down_read);
> +EXPORT_SYMBOL_GPL(__percpu_down_read);
>
> -int percpu_down_read_trylock(struct percpu_rw_semaphore *brw)
> +int __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
> {
> - if (unlikely(!update_fast_ctr(brw, +1))) {
> - if (!__down_read_trylock(&brw->rw_sem))
> - return 0;
> - atomic_inc(&brw->slow_read_ctr);
> - __up_read(&brw->rw_sem);
> - }
> -
> - rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 1, _RET_IP_);
> - return 1;
> + smp_mb(); /* A matches D */
> +
> + if (likely(smp_load_acquire(&sem->state) != readers_block))
> + return 1;
> +
> + __percpu_up_read(sem);
> + return 0;
> }
> +EXPORT_SYMBOL_GPL(__percpu_down_read_trylock);
>
> -void percpu_up_read(struct percpu_rw_semaphore *brw)
> +void __percpu_up_read(struct percpu_rw_semaphore *sem)
> {
> - rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_);
> -
> - if (likely(update_fast_ctr(brw, -1)))
> - return;
> + smp_mb(); /* B matches C */
> + /*
> + * In other words, if they see our decrement (presumably to aggregate
> + * zero, as that is the only time it matters) they will also see our
> + * critical section.
> + */
> + __this_cpu_dec(*sem->refcount);
>
> - /* false-positive is possible but harmless */
> - if (atomic_dec_and_test(&brw->slow_read_ctr))
> - wake_up_all(&brw->write_waitq);
> + /* Prod writer to recheck readers_active */
> + wake_up(&sem->writer);
> }
> -EXPORT_SYMBOL_GPL(percpu_up_read);
> +EXPORT_SYMBOL_GPL(__percpu_up_read);
> +
> +#define per_cpu_sum(var) \
> +({ \
> + typeof(var) __sum = 0; \
> + int cpu; \
> + compiletime_assert_atomic_type(__sum); \
> + for_each_possible_cpu(cpu) \
> + __sum += per_cpu(var, cpu); \
> + __sum; \
> +})
>
> -static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
> +/*
> + * Return true if the modular sum of the sem->refcount per-CPU variable is
> + * zero. If this sum is zero, then it is stable due to the fact that if any
> + * newly arriving readers increment a given counter, they will immediately
> + * decrement that same counter.
> + */
> +static bool readers_active_check(struct percpu_rw_semaphore *sem)
> {
> - unsigned int sum = 0;
> - int cpu;
> + if (per_cpu_sum(*sem->refcount) != 0)
> + return false;
> +
> + /*
> + * If we observed the decrement; ensure we see the entire critical
> + * section.
> + */
>
> - for_each_possible_cpu(cpu) {
> - sum += per_cpu(*brw->fast_read_ctr, cpu);
> - per_cpu(*brw->fast_read_ctr, cpu) = 0;
> - }
> + smp_mb(); /* C matches B */
>
> - return sum;
> + return true;
> }
>
> -void percpu_down_write(struct percpu_rw_semaphore *brw)
> +void percpu_down_write(struct percpu_rw_semaphore *sem)
> {
> + down_write(&sem->rw_sem);
> +
> + /* Notify readers to take the slow path. */
> + rcu_sync_enter(&sem->rss);
> +
> /*
> - * Make rcu_sync_is_idle() == F and thus disable the fast-path in
> - * percpu_down_read() and percpu_up_read(), and wait for gp pass.
> - *
> - * The latter synchronises us with the preceding readers which used
> - * the fast-past, so we can not miss the result of __this_cpu_add()
> - * or anything else inside their criticial sections.
> + * Notify new readers to block; up until now, and thus throughout the
> + * longish rcu_sync_enter() above, new readers could still come in.
> */
> - rcu_sync_enter(&brw->rss);
> + sem->state = readers_block;
>
> - /* exclude other writers, and block the new readers completely */
> - down_write(&brw->rw_sem);
> + smp_mb(); /* D matches A */
>
> - /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
> - atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr);
> + /*
> + * If they don't see our writer of readers_block to sem->state,
> + * then we are guaranteed to see their sem->refcount increment, and
> + * therefore will wait for them.
> + */
>
> - /* wait for all readers to complete their percpu_up_read() */
> - wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
> + /* Wait for all now active readers to complete. */
> + wait_event(sem->writer, readers_active_check(sem));
> }
> -EXPORT_SYMBOL_GPL(percpu_down_write);
>
> -void percpu_up_write(struct percpu_rw_semaphore *brw)
> +void percpu_up_write(struct percpu_rw_semaphore *sem)
> {
> - /* release the lock, but the readers can't use the fast-path */
> - up_write(&brw->rw_sem);
> /*
> - * Enable the fast-path in percpu_down_read() and percpu_up_read()
> - * but only after another gp pass; this adds the necessary barrier
> - * to ensure the reader can't miss the changes done by us.
> + * Signal the writer is done, no fast path yet.
> + *
> + * One reason that we cannot just immediately flip to readers_fast is
> + * that new readers might fail to see the results of this writer's
> + * critical section.
> + *
> + * Therefore we force it through the slow path which guarantees an
> + * acquire and thereby guarantees the critical section's consistency.
> + */
> + smp_store_release(&sem->state, readers_slow);
> +
> + /*
> + * Release the write lock, this will allow readers back in the game.
> + */
> + up_write(&sem->rw_sem);
> +
> + /*
> + * Once this completes (at least one RCU grace period hence) the reader
> + * fast path will be available again. Safe to use outside the exclusive
> + * write lock because its counting.
> */
> - rcu_sync_exit(&brw->rss);
> + rcu_sync_exit(&sem->rss);
> }
> -EXPORT_SYMBOL_GPL(percpu_up_write);
> diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
> index be922c9f3d37..48055bf629af 100644
> --- a/kernel/rcu/sync.c
> +++ b/kernel/rcu/sync.c
> @@ -55,6 +55,7 @@ static const struct {
> .wait = rcu_barrier_bh,
> __INIT_HELD(rcu_read_lock_bh_held)
> },
> + [RCU_NONE] = { },
> };
>
> enum { GP_IDLE = 0, GP_PENDING, GP_PASSED };
> @@ -65,6 +66,9 @@ enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY };
> #ifdef CONFIG_PROVE_RCU
> void rcu_sync_lockdep_assert(struct rcu_sync *rsp)
> {
> + if (rsp->gp_type == RCU_NONE)
> + return;
> +
> RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(),
> "suspicious rcu_sync_is_idle() usage");
> }
> @@ -80,6 +84,8 @@ void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
> memset(rsp, 0, sizeof(*rsp));
> init_waitqueue_head(&rsp->gp_wait);
> rsp->gp_type = type;
> + if (rsp->gp_type == RCU_NONE)
> + rsp->gp_state = GP_PENDING; /* anything !0 */
> }
>
> /**
> @@ -101,6 +107,9 @@ void rcu_sync_enter(struct rcu_sync *rsp)
> {
> bool need_wait, need_sync;
>
> + if (rsp->gp_type == RCU_NONE)
> + return;
> +
> spin_lock_irq(&rsp->rss_lock);
> need_wait = rsp->gp_count++;
> need_sync = rsp->gp_state == GP_IDLE;
> @@ -188,6 +197,9 @@ static void rcu_sync_func(struct rcu_head *rcu)
> */
> void rcu_sync_exit(struct rcu_sync *rsp)
> {
> + if (rsp->gp_type == RCU_NONE)
> + return;
> +
> spin_lock_irq(&rsp->rss_lock);
> if (!--rsp->gp_count) {
> if (rsp->cb_state == CB_IDLE) {
> @@ -208,6 +220,9 @@ void rcu_sync_dtor(struct rcu_sync *rsp)
> {
> int cb_state;
>
> + if (rsp->gp_type == RCU_NONE)
> + return;
> +
> BUG_ON(rsp->gp_count);
>
> spin_lock_irq(&rsp->rss_lock);
>
Powered by blists - more mailing lists