[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <PH7PR11MB8121875CD899E434C81DAABEC942A@PH7PR11MB8121.namprd11.prod.outlook.com>
Date: Fri, 4 Jul 2025 21:35:03 +0000
From: "Sridhar, Kanchana P" <kanchana.p.sridhar@...el.com>
To: Nhat Pham <nphamcs@...il.com>
CC: "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"linux-mm@...ck.org" <linux-mm@...ck.org>, "hannes@...xchg.org"
<hannes@...xchg.org>, "yosry.ahmed@...ux.dev" <yosry.ahmed@...ux.dev>,
"chengming.zhou@...ux.dev" <chengming.zhou@...ux.dev>,
"usamaarif642@...il.com" <usamaarif642@...il.com>, "ryan.roberts@....com"
<ryan.roberts@....com>, "21cnbao@...il.com" <21cnbao@...il.com>,
"ying.huang@...ux.alibaba.com" <ying.huang@...ux.alibaba.com>,
"akpm@...ux-foundation.org" <akpm@...ux-foundation.org>,
"senozhatsky@...omium.org" <senozhatsky@...omium.org>,
"linux-crypto@...r.kernel.org" <linux-crypto@...r.kernel.org>,
"herbert@...dor.apana.org.au" <herbert@...dor.apana.org.au>,
"davem@...emloft.net" <davem@...emloft.net>, "clabbe@...libre.com"
<clabbe@...libre.com>, "ardb@...nel.org" <ardb@...nel.org>,
"ebiggers@...gle.com" <ebiggers@...gle.com>, "surenb@...gle.com"
<surenb@...gle.com>, "Accardi, Kristen C" <kristen.c.accardi@...el.com>,
"Gomes, Vinicius" <vinicius.gomes@...el.com>, "Feghali, Wajdi K"
<wajdi.k.feghali@...el.com>, "Gopal, Vinodh" <vinodh.gopal@...el.com>,
"Sridhar, Kanchana P" <kanchana.p.sridhar@...el.com>
Subject: RE: [PATCH v10 20/25] mm: zswap: Move the CPU hotplug procedures
under "pool functions".
> -----Original Message-----
> From: Nhat Pham <nphamcs@...il.com>
> Sent: Friday, July 4, 2025 11:39 AM
> To: Sridhar, Kanchana P <kanchana.p.sridhar@...el.com>
> Cc: linux-kernel@...r.kernel.org; linux-mm@...ck.org;
> hannes@...xchg.org; yosry.ahmed@...ux.dev; chengming.zhou@...ux.dev;
> usamaarif642@...il.com; ryan.roberts@....com; 21cnbao@...il.com;
> ying.huang@...ux.alibaba.com; akpm@...ux-foundation.org;
> senozhatsky@...omium.org; linux-crypto@...r.kernel.org;
> herbert@...dor.apana.org.au; davem@...emloft.net;
> clabbe@...libre.com; ardb@...nel.org; ebiggers@...gle.com;
> surenb@...gle.com; Accardi, Kristen C <kristen.c.accardi@...el.com>;
> Gomes, Vinicius <vinicius.gomes@...el.com>; Feghali, Wajdi K
> <wajdi.k.feghali@...el.com>; Gopal, Vinodh <vinodh.gopal@...el.com>
> Subject: Re: [PATCH v10 20/25] mm: zswap: Move the CPU hotplug
> procedures under "pool functions".
>
> On Thu, Jul 3, 2025 at 9:23 PM Kanchana P Sridhar
> <kanchana.p.sridhar@...el.com> wrote:
> >
> > This patch merely moves zswap_cpu_comp_prepare() and
> > zswap_cpu_comp_dead() to be in the "pool functions" section because
> > these functions are invoked upon pool creation/deletion.
>
> Hmm idk, "compressed storage" section seems fitting for
> zswap_cpu_comp_prepare() and zswap_cpu_comp_dead().
>
> Is this patch necessary?
Not too sure. I was moving it to be in "pool functions" because the
next patch #21 makes the association between the acomp_ctx resources'
lifetime to be from pool creation to deletion, and also does not register
a teardown callback. I can move the zswap_cpu_comp_prepare()
and modifications back to "compressed storage" if you think that's
where it belongs.
Thanks,
Kanchana
>
> >
> > Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@...el.com>
> > ---
> > mm/zswap.c | 188 ++++++++++++++++++++++++++---------------------------
> > 1 file changed, 94 insertions(+), 94 deletions(-)
> >
> > diff --git a/mm/zswap.c b/mm/zswap.c
> > index 3c0fd8a137182..3538ecaed5e16 100644
> > --- a/mm/zswap.c
> > +++ b/mm/zswap.c
> > @@ -248,6 +248,100 @@ static inline struct xarray
> *swap_zswap_tree(swp_entry_t swp)
> > **********************************/
> > static void __zswap_pool_empty(struct percpu_ref *ref);
> >
> > +static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node
> *node)
> > +{
> > + struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
> > + struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool-
> >acomp_ctx, cpu);
> > + struct crypto_acomp *acomp = NULL;
> > + struct acomp_req *req = NULL;
> > + u8 *buffer = NULL;
> > + int ret;
> > +
> > + buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL,
> cpu_to_node(cpu));
> > + if (!buffer) {
> > + ret = -ENOMEM;
> > + goto fail;
> > + }
> > +
> > + acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0,
> cpu_to_node(cpu));
> > + if (IS_ERR(acomp)) {
> > + pr_err("could not alloc crypto acomp %s : %ld\n",
> > + pool->tfm_name, PTR_ERR(acomp));
> > + ret = PTR_ERR(acomp);
> > + goto fail;
> > + }
> > +
> > + req = acomp_request_alloc(acomp);
> > + if (!req) {
> > + pr_err("could not alloc crypto acomp_request %s\n",
> > + pool->tfm_name);
> > + ret = -ENOMEM;
> > + goto fail;
> > + }
> > +
> > + /*
> > + * Only hold the mutex after completing allocations, otherwise we may
> > + * recurse into zswap through reclaim and attempt to hold the mutex
> > + * again resulting in a deadlock.
> > + */
> > + mutex_lock(&acomp_ctx->mutex);
> > + crypto_init_wait(&acomp_ctx->wait);
> > +
> > + /*
> > + * if the backend of acomp is async zip, crypto_req_done() will wakeup
> > + * crypto_wait_req(); if the backend of acomp is scomp, the callback
> > + * won't be called, crypto_wait_req() will return without blocking.
> > + */
> > + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
> > + crypto_req_done, &acomp_ctx->wait);
> > +
> > + acomp_ctx->buffer = buffer;
> > + acomp_ctx->acomp = acomp;
> > + acomp_ctx->is_sleepable = acomp_is_async(acomp);
> > + acomp_ctx->req = req;
> > + mutex_unlock(&acomp_ctx->mutex);
> > + return 0;
> > +
> > +fail:
> > + if (acomp)
> > + crypto_free_acomp(acomp);
> > + kfree(buffer);
> > + return ret;
> > +}
> > +
> > +static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node
> *node)
> > +{
> > + struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
> > + struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool-
> >acomp_ctx, cpu);
> > + struct acomp_req *req;
> > + struct crypto_acomp *acomp;
> > + u8 *buffer;
> > +
> > + if (IS_ERR_OR_NULL(acomp_ctx))
> > + return 0;
> > +
> > + mutex_lock(&acomp_ctx->mutex);
> > + req = acomp_ctx->req;
> > + acomp = acomp_ctx->acomp;
> > + buffer = acomp_ctx->buffer;
> > + acomp_ctx->req = NULL;
> > + acomp_ctx->acomp = NULL;
> > + acomp_ctx->buffer = NULL;
> > + mutex_unlock(&acomp_ctx->mutex);
> > +
> > + /*
> > + * Do the actual freeing after releasing the mutex to avoid subtle
> > + * locking dependencies causing deadlocks.
> > + */
> > + if (!IS_ERR_OR_NULL(req))
> > + acomp_request_free(req);
> > + if (!IS_ERR_OR_NULL(acomp))
> > + crypto_free_acomp(acomp);
> > + kfree(buffer);
> > +
> > + return 0;
> > +}
> > +
> > static struct zswap_pool *zswap_pool_create(char *type, char
> *compressor)
> > {
> > struct zswap_pool *pool;
> > @@ -818,100 +912,6 @@ static void zswap_entry_free(struct zswap_entry
> *entry)
> > /*********************************
> > * compressed storage functions
> > **********************************/
> > -static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node
> *node)
> > -{
> > - struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
> > - struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx,
> cpu);
> > - struct crypto_acomp *acomp = NULL;
> > - struct acomp_req *req = NULL;
> > - u8 *buffer = NULL;
> > - int ret;
> > -
> > - buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL,
> cpu_to_node(cpu));
> > - if (!buffer) {
> > - ret = -ENOMEM;
> > - goto fail;
> > - }
> > -
> > - acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0,
> cpu_to_node(cpu));
> > - if (IS_ERR(acomp)) {
> > - pr_err("could not alloc crypto acomp %s : %ld\n",
> > - pool->tfm_name, PTR_ERR(acomp));
> > - ret = PTR_ERR(acomp);
> > - goto fail;
> > - }
> > -
> > - req = acomp_request_alloc(acomp);
> > - if (!req) {
> > - pr_err("could not alloc crypto acomp_request %s\n",
> > - pool->tfm_name);
> > - ret = -ENOMEM;
> > - goto fail;
> > - }
> > -
> > - /*
> > - * Only hold the mutex after completing allocations, otherwise we may
> > - * recurse into zswap through reclaim and attempt to hold the mutex
> > - * again resulting in a deadlock.
> > - */
> > - mutex_lock(&acomp_ctx->mutex);
> > - crypto_init_wait(&acomp_ctx->wait);
> > -
> > - /*
> > - * if the backend of acomp is async zip, crypto_req_done() will wakeup
> > - * crypto_wait_req(); if the backend of acomp is scomp, the callback
> > - * won't be called, crypto_wait_req() will return without blocking.
> > - */
> > - acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
> > - crypto_req_done, &acomp_ctx->wait);
> > -
> > - acomp_ctx->buffer = buffer;
> > - acomp_ctx->acomp = acomp;
> > - acomp_ctx->is_sleepable = acomp_is_async(acomp);
> > - acomp_ctx->req = req;
> > - mutex_unlock(&acomp_ctx->mutex);
> > - return 0;
> > -
> > -fail:
> > - if (acomp)
> > - crypto_free_acomp(acomp);
> > - kfree(buffer);
> > - return ret;
> > -}
> > -
> > -static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node
> *node)
> > -{
> > - struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
> > - struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx,
> cpu);
> > - struct acomp_req *req;
> > - struct crypto_acomp *acomp;
> > - u8 *buffer;
> > -
> > - if (IS_ERR_OR_NULL(acomp_ctx))
> > - return 0;
> > -
> > - mutex_lock(&acomp_ctx->mutex);
> > - req = acomp_ctx->req;
> > - acomp = acomp_ctx->acomp;
> > - buffer = acomp_ctx->buffer;
> > - acomp_ctx->req = NULL;
> > - acomp_ctx->acomp = NULL;
> > - acomp_ctx->buffer = NULL;
> > - mutex_unlock(&acomp_ctx->mutex);
> > -
> > - /*
> > - * Do the actual freeing after releasing the mutex to avoid subtle
> > - * locking dependencies causing deadlocks.
> > - */
> > - if (!IS_ERR_OR_NULL(req))
> > - acomp_request_free(req);
> > - if (!IS_ERR_OR_NULL(acomp))
> > - crypto_free_acomp(acomp);
> > - kfree(buffer);
> > -
> > - return 0;
> > -}
> > -
> > static struct crypto_acomp_ctx *acomp_ctx_get_cpu_lock(struct
> zswap_pool *pool)
> > {
> > struct crypto_acomp_ctx *acomp_ctx;
> > --
> > 2.27.0
> >
Powered by blists - more mailing lists