[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAGsJ_4za7KQNdHq2QP5eBNF0D=Npca+O_RJSE_gJN+_654-f6Q@mail.gmail.com>
Date: Wed, 8 Jan 2025 18:25:21 +1300
From: Barry Song <21cnbao@...il.com>
To: Nhat Pham <nphamcs@...il.com>
Cc: Yosry Ahmed <yosryahmed@...gle.com>, Johannes Weiner <hannes@...xchg.org>,
Andrew Morton <akpm@...ux-foundation.org>, Chengming Zhou <chengming.zhou@...ux.dev>,
Vitaly Wool <vitalywool@...il.com>, Sam Sun <samsun1006219@...il.com>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"stable@...r.kernel.org" <stable@...r.kernel.org>,
"Sridhar, Kanchana P" <kanchana.p.sridhar@...el.com>
Subject: Re: [PATCH v2 2/2] mm: zswap: disable migration while using per-CPU acomp_ctx
On Wed, Jan 8, 2025 at 6:06 PM Barry Song <21cnbao@...il.com> wrote:
>
> On Wed, Jan 8, 2025 at 5:46 PM Nhat Pham <nphamcs@...il.com> wrote:
> >
> > On Wed, Jan 8, 2025 at 9:34 AM Yosry Ahmed <yosryahmed@...gle.com> wrote:
> > >
> > >
> > > Actually, using the mutex to protect against CPU hotunplug is not too
> > > complicated. The following diff is one way to do it (lightly tested).
> > > Johannes, Nhat, any preferences between this patch (disabling
> > > migration) and the following diff?
> >
> > I mean if this works, this over migration diasbling any day? :)
> >
> > >
> > > diff --git a/mm/zswap.c b/mm/zswap.c
> > > index f6316b66fb236..4d6817c679a54 100644
> > > --- a/mm/zswap.c
> > > +++ b/mm/zswap.c
> > > @@ -869,17 +869,40 @@ static int zswap_cpu_comp_dead(unsigned int cpu,
> > > struct hlist_node *node)
> > > struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
> > > struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
> > >
> > > + mutex_lock(&acomp_ctx->mutex);
> > > if (!IS_ERR_OR_NULL(acomp_ctx)) {
> > > if (!IS_ERR_OR_NULL(acomp_ctx->req))
> > > acomp_request_free(acomp_ctx->req);
> > > + acomp_ctx->req = NULL;
> > > if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
> > > crypto_free_acomp(acomp_ctx->acomp);
> > > kfree(acomp_ctx->buffer);
> > > }
> > > + mutex_unlock(&acomp_ctx->mutex);
> > >
> > > return 0;
> > > }
> > >
> > > +static struct crypto_acomp_ctx *acomp_ctx_get_cpu_locked(
> > > + struct crypto_acomp_ctx __percpu *acomp_ctx)
> > > +{
> > > + struct crypto_acomp_ctx *ctx;
> > > +
> > > + for (;;) {
> > > + ctx = raw_cpu_ptr(acomp_ctx);
> > > + mutex_lock(&ctx->mutex);
> >
> > I'm a bit confused. IIUC, ctx is per-cpu right? What's protecting this
> > cpu-local data (including the mutex) from being invalidated under us
> > while we're sleeping and waiting for the mutex?
> >
> > If it is somehow protected, then yeah this seems quite elegant :)
>
> thought about this again. Could it be the following?
>
> bool cpus_is_read_locked(void)
> {
> return percpu_is_read_locked(&cpu_hotplug_lock);
> }
>
> in zswap:
>
> bool locked = cpus_is_read_locked();
>
> if (!locked)
> cpus_read_lock();
>
> .... // do our job
>
> if (!locked)
> cpus_read_unlock();
>
> This seems to resolve all three problems:
> 1. if our context has held read lock, we won't hold it again;
> 2. if other contexts are holding write lock, we wait for the
> completion of cpuhotplug
> by acquiring read lock
> 3. if our context hasn't held a read lock, we hold it.
>
sorry for the noise.
This won't work because percpu_is_read_locked() is a sum:
bool percpu_is_read_locked(struct percpu_rw_semaphore *sem)
{
return per_cpu_sum(*sem->read_count) != 0 && !atomic_read(&sem->block);
}
EXPORT_SYMBOL_GPL(percpu_is_read_locked);
If other CPUs hold the read lock, it will also return true. However, once those
CPUs release the lock, our data might still be released by CPU hotplug.
This approach would require something like percpu_is_read_locked_by_me() :-(
> >
> > > + if (likely(ctx->req))
> > > + return ctx;
> > > + /* Raced with zswap_cpu_comp_dead() on CPU hotunplug */
> > > + mutex_unlock(&ctx->mutex);
> > > + }
> > > +}
> > > +
> > > +static void acomp_ctx_put_unlock(struct crypto_acomp_ctx *ctx)
> > > +{
> > > + mutex_unlock(&ctx->mutex);
> > > +}
> > > +
> > > static bool zswap_compress(struct page *page, struct zswap_entry *entry,
> > > struct zswap_pool *pool)
> > > {
> > > @@ -893,10 +916,7 @@ static bool zswap_compress(struct page *page,
> > > struct zswap_entry *entry,
> > > gfp_t gfp;
> > > u8 *dst;
> > >
> > > - acomp_ctx = raw_cpu_ptr(pool->acomp_ctx);
> > > -
> > > - mutex_lock(&acomp_ctx->mutex);
> > > -
> > > + acomp_ctx = acomp_ctx_get_cpu_locked(pool->acomp_ctx);
> > > dst = acomp_ctx->buffer;
> > > sg_init_table(&input, 1);
> > > sg_set_page(&input, page, PAGE_SIZE, 0);
> > > @@ -949,7 +969,7 @@ static bool zswap_compress(struct page *page,
> > > struct zswap_entry *entry,
> > > else if (alloc_ret)
> > > zswap_reject_alloc_fail++;
> > >
> > > - mutex_unlock(&acomp_ctx->mutex);
> > > + acomp_ctx_put_unlock(acomp_ctx);
> > > return comp_ret == 0 && alloc_ret == 0;
> > > }
> > >
> > > @@ -960,9 +980,7 @@ static void zswap_decompress(struct zswap_entry
> > > *entry, struct folio *folio)
> > > struct crypto_acomp_ctx *acomp_ctx;
> > > u8 *src;
> > >
> > > - acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
> > > - mutex_lock(&acomp_ctx->mutex);
> > > -
> > > + acomp_ctx = acomp_ctx_get_cpu_locked(entry->pool->acomp_ctx);
> > > src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
> > > /*
>
> Thanks
> Barry
Powered by blists - more mailing lists