[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJD7tkYLUXCumH7qZDE63qOUbrj3bxnBbgkkdCVGbvL6R_fS8w@mail.gmail.com>
Date: Mon, 6 Jan 2025 17:19:58 -0800
From: Yosry Ahmed <yosryahmed@...gle.com>
To: Kanchana P Sridhar <kanchana.p.sridhar@...el.com>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org, hannes@...xchg.org,
nphamcs@...il.com, chengming.zhou@...ux.dev, usamaarif642@...il.com,
ryan.roberts@....com, 21cnbao@...il.com, akpm@...ux-foundation.org,
linux-crypto@...r.kernel.org, herbert@...dor.apana.org.au,
davem@...emloft.net, clabbe@...libre.com, ardb@...nel.org,
ebiggers@...gle.com, surenb@...gle.com, kristen.c.accardi@...el.com,
wajdi.k.feghali@...el.com, vinodh.gopal@...el.com
Subject: Re: [PATCH v5 12/12] mm: zswap: Compress batching with Intel IAA in
zswap_store() of large folios.
On Fri, Dec 20, 2024 at 10:31 PM Kanchana P Sridhar
<kanchana.p.sridhar@...el.com> wrote:
>
> zswap_compress_folio() is modified to detect if the pool's acomp_ctx has
> more than one "nr_reqs", which will be the case if the cpu onlining code
> has allocated batching resources in the acomp_ctx based on the queries to
> acomp_has_async_batching() and crypto_acomp_batch_size(). If multiple
> "nr_reqs" are available in the acomp_ctx, it means compress batching can be
> used with a batch-size of "acomp_ctx->nr_reqs".
>
> If compress batching can be used with the given zswap pool,
> zswap_compress_folio() will invoke the newly added zswap_batch_compress()
> procedure to compress and store the folio in batches of
> "acomp_ctx->nr_reqs" pages. The batch size is effectively
> "acomp_ctx->nr_reqs".
>
> zswap_batch_compress() calls crypto_acomp_batch_compress() to compress each
> batch of (up to) "acomp_ctx->nr_reqs" pages. The iaa_crypto driver
> will compress each batch of pages in parallel in the Intel IAA hardware
> with 'async' mode and request chaining.
>
> Hence, zswap_batch_compress() does the same computes for a batch, as
> zswap_compress() does for a page; and returns true if the batch was
> successfully compressed/stored, and false otherwise.
>
> If the pool does not support compress batching, zswap_compress_folio()
> calls zswap_compress() for each individual page in the folio, as before.
>
> Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@...el.com>
> ---
> mm/zswap.c | 109 +++++++++++++++++++++++++++++++++++++++++++++++++++--
> 1 file changed, 105 insertions(+), 4 deletions(-)
>
> diff --git a/mm/zswap.c b/mm/zswap.c
> index 1be0f1807bfc..f336fafe24c4 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -1467,17 +1467,118 @@ static void shrink_worker(struct work_struct *w)
> * main API
> **********************************/
>
> +static bool zswap_batch_compress(struct folio *folio,
> + long index,
> + unsigned int batch_size,
> + struct zswap_entry *entries[],
> + struct zswap_pool *pool,
> + struct crypto_acomp_ctx *acomp_ctx)
> +{
> + int comp_errors[ZSWAP_MAX_BATCH_SIZE] = { 0 };
> + unsigned int dlens[ZSWAP_MAX_BATCH_SIZE];
> + struct page *pages[ZSWAP_MAX_BATCH_SIZE];
> + unsigned int i, nr_batch_pages;
> + bool ret = true;
> +
> + nr_batch_pages = min((unsigned int)(folio_nr_pages(folio) - index), batch_size);
> +
> + for (i = 0; i < nr_batch_pages; ++i) {
> + pages[i] = folio_page(folio, index + i);
> + dlens[i] = PAGE_SIZE;
> + }
> +
> + mutex_lock(&acomp_ctx->mutex);
> +
> + /*
> + * Batch compress @nr_batch_pages. If IAA is the compressor, the
> + * hardware will compress @nr_batch_pages in parallel.
> + */
> + ret = crypto_acomp_batch_compress(
> + acomp_ctx->reqs,
> + &acomp_ctx->wait,
> + pages,
> + acomp_ctx->buffers,
> + dlens,
> + comp_errors,
> + nr_batch_pages);
I will hold off on reviewing this patch until the acomp interface is
settled, but I am wondering if this can be a vectorization of
zswap_compress() instead, since there's a lot of common code.
> +
> + if (ret) {
> + /*
> + * All batch pages were successfully compressed.
> + * Store the pages in zpool.
> + */
> + struct zpool *zpool = pool->zpool;
> + gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
> +
> + if (zpool_malloc_support_movable(zpool))
> + gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
> +
> + for (i = 0; i < nr_batch_pages; ++i) {
> + unsigned long handle;
> + char *buf;
> + int err;
> +
> + err = zpool_malloc(zpool, dlens[i], gfp, &handle);
> +
> + if (err) {
> + if (err == -ENOSPC)
> + zswap_reject_compress_poor++;
> + else
> + zswap_reject_alloc_fail++;
> +
> + ret = false;
> + break;
> + }
> +
> + buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
> + memcpy(buf, acomp_ctx->buffers[i], dlens[i]);
> + zpool_unmap_handle(zpool, handle);
> +
> + entries[i]->handle = handle;
> + entries[i]->length = dlens[i];
> + }
> + } else {
> + /* Some batch pages had compression errors. */
> + for (i = 0; i < nr_batch_pages; ++i) {
> + if (comp_errors[i]) {
> + if (comp_errors[i] == -ENOSPC)
> + zswap_reject_compress_poor++;
> + else
> + zswap_reject_compress_fail++;
> + }
> + }
> + }
> +
> + mutex_unlock(&acomp_ctx->mutex);
> +
> + return ret;
> +}
> +
> static bool zswap_compress_folio(struct folio *folio,
> struct zswap_entry *entries[],
> struct zswap_pool *pool)
> {
> long index, nr_pages = folio_nr_pages(folio);
> + struct crypto_acomp_ctx *acomp_ctx;
> + unsigned int batch_size;
>
> - for (index = 0; index < nr_pages; ++index) {
> - struct page *page = folio_page(folio, index);
> + acomp_ctx = raw_cpu_ptr(pool->acomp_ctx);
> + batch_size = acomp_ctx->nr_reqs;
>
> - if (!zswap_compress(page, entries[index], pool))
> - return false;
> + if ((batch_size > 1) && (nr_pages > 1)) {
> + for (index = 0; index < nr_pages; index += batch_size) {
> +
> + if (!zswap_batch_compress(folio, index, batch_size,
> + &entries[index], pool, acomp_ctx))
> + return false;
> + }
> + } else {
> + for (index = 0; index < nr_pages; ++index) {
> + struct page *page = folio_page(folio, index);
> +
> + if (!zswap_compress(page, entries[index], pool))
> + return false;
> + }
> }
>
> return true;
> --
> 2.27.0
>
Powered by blists - more mailing lists