[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAGsJ_4xRij-Vz_-dmL44YLvaQrYLKKnw7O_Skedrxj_YxuaT5Q@mail.gmail.com>
Date: Tue, 26 Aug 2025 11:48:08 +0800
From: Barry Song <21cnbao@...il.com>
To: Kanchana P Sridhar <kanchana.p.sridhar@...el.com>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org, hannes@...xchg.org,
yosry.ahmed@...ux.dev, nphamcs@...il.com, chengming.zhou@...ux.dev,
usamaarif642@...il.com, ryan.roberts@....com, ying.huang@...ux.alibaba.com,
akpm@...ux-foundation.org, senozhatsky@...omium.org,
linux-crypto@...r.kernel.org, herbert@...dor.apana.org.au,
davem@...emloft.net, clabbe@...libre.com, ardb@...nel.org,
ebiggers@...gle.com, surenb@...gle.com, kristen.c.accardi@...el.com,
vinicius.gomes@...el.com, wajdi.k.feghali@...el.com, vinodh.gopal@...el.com
Subject: Re: [PATCH v11 22/24] mm: zswap: Allocate pool batching resources if
the compressor supports batching.
Hi Kanchana,
[...]
>
> + /*
> + * Set the unit of compress batching for large folios, for quick
> + * retrieval in the zswap_compress() fast path:
> + * If the compressor is sequential (@pool->compr_batch_size is 1),
> + * large folios will be compressed in batches of ZSWAP_MAX_BATCH_SIZE
> + * pages, where each page in the batch is compressed sequentially.
> + * We see better performance by processing the folio in batches of
> + * ZSWAP_MAX_BATCH_SIZE, due to cache locality of working set
> + * structures.
> + */
> + pool->batch_size = (pool->compr_batch_size > 1) ?
> + pool->compr_batch_size : ZSWAP_MAX_BATCH_SIZE;
> +
> zswap_pool_debug("created", pool);
>
> return pool;
>
It’s hard to follow — you add batch_size and compr_batch_size in this
patch, but only use them in another. Could we merge the related changes
into one patch instead of splitting them into several that don’t work
independently?
> -
> acomp_ctx->acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
> if (IS_ERR_OR_NULL(acomp_ctx->acomp)) {
> pr_err("could not alloc crypto acomp %s : %ld\n",
> @@ -904,17 +929,36 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
> acomp_ctx->req = acomp_request_alloc(acomp_ctx->acomp);
> if (IS_ERR_OR_NULL(acomp_ctx->req)) {
> pr_err("could not alloc crypto acomp_request %s\n",
> - pool->tfm_name);
> + pool->tfm_name);
> goto fail;
> }
>
> - crypto_init_wait(&acomp_ctx->wait);
> + /*
> + * Allocate up to ZSWAP_MAX_BATCH_SIZE dst buffers if the
> + * compressor supports batching.
> + */
> + pool->compr_batch_size = min(ZSWAP_MAX_BATCH_SIZE,
> + crypto_acomp_batch_size(acomp_ctx->acomp));
> +
> + acomp_ctx->buffers = kcalloc_node(pool->compr_batch_size, sizeof(u8 *),
> + GFP_KERNEL, cpu_to_node(cpu));
> + if (!acomp_ctx->buffers)
> + goto fail;
> +
> + for (i = 0; i < pool->compr_batch_size; ++i) {
> + acomp_ctx->buffers[i] = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL,
> + cpu_to_node(cpu));
> + if (!acomp_ctx->buffers[i])
> + goto fail;
> + }
It’s hard to follow — memory is allocated here but only used in another
patch. Could we merge the related changes into a single patch instead of
splitting them into several that don’t work independently?
>
> /*
> * if the backend of acomp is async zip, crypto_req_done() will wakeup
> * crypto_wait_req(); if the backend of acomp is scomp, the callback
> * won't be called, crypto_wait_req() will return without blocking.
> */
> + crypto_init_wait(&acomp_ctx->wait);
> +
> acomp_request_set_callback(acomp_ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
> crypto_req_done, &acomp_ctx->wait);
>
> @@ -922,7 +966,7 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
> return 0;
>
> fail:
> - acomp_ctx_dealloc(acomp_ctx);
> + acomp_ctx_dealloc(acomp_ctx, pool->compr_batch_size);
> return ret;
> }
>
> @@ -942,7 +986,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
>
> mutex_lock(&acomp_ctx->mutex);
>
> - dst = acomp_ctx->buffer;
> + dst = acomp_ctx->buffers[0];
> sg_init_table(&input, 1);
> sg_set_page(&input, page, PAGE_SIZE, 0);
>
> @@ -1003,19 +1047,19 @@ static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio)
>
> acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
> mutex_lock(&acomp_ctx->mutex);
> - obj = zpool_obj_read_begin(zpool, entry->handle, acomp_ctx->buffer);
> + obj = zpool_obj_read_begin(zpool, entry->handle, acomp_ctx->buffers[0]);
>
> /*
> * zpool_obj_read_begin() might return a kmap address of highmem when
> - * acomp_ctx->buffer is not used. However, sg_init_one() does not
> - * handle highmem addresses, so copy the object to acomp_ctx->buffer.
> + * acomp_ctx->buffers[0] is not used. However, sg_init_one() does not
> + * handle highmem addresses, so copy the object to acomp_ctx->buffers[0].
> */
> if (virt_addr_valid(obj)) {
> src = obj;
> } else {
> - WARN_ON_ONCE(obj == acomp_ctx->buffer);
> - memcpy(acomp_ctx->buffer, obj, entry->length);
> - src = acomp_ctx->buffer;
> + WARN_ON_ONCE(obj == acomp_ctx->buffers[0]);
> + memcpy(acomp_ctx->buffers[0], obj, entry->length);
> + src = acomp_ctx->buffers[0];
Hard to understand what is going on if related changes are not kept in
one self-contained patch.
Thanks
Barry
Powered by blists - more mailing lists