lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJD7tkYYuy8n70tQXAgfi2ag5x2rWA77oxRd7yA+6w+Bbf3vqw@mail.gmail.com>
Date:   Mon, 12 Jun 2023 11:18:07 -0700
From:   Yosry Ahmed <yosryahmed@...gle.com>
To:     Domenico Cerasuolo <cerasuolodomenico@...il.com>
Cc:     vitaly.wool@...sulko.com, minchan@...nel.org,
        senozhatsky@...omium.org, linux-mm@...ck.org, ddstreet@...e.org,
        sjenning@...hat.com, nphamcs@...il.com, hannes@...xchg.org,
        akpm@...ux-foundation.org, linux-kernel@...r.kernel.org,
        kernel-team@...a.com
Subject: Re: [PATCH v3 1/7] mm: zswap: add pool shrinking mechanism

On Mon, Jun 12, 2023 at 2:38 AM Domenico Cerasuolo
<cerasuolodomenico@...il.com> wrote:
>
> Each zpool driver (zbud, z3fold and zsmalloc) implements its own shrink
> function, which is called from zpool_shrink. However, with this commit,
> a unified shrink function is added to zswap. The ultimate goal is to
> eliminate the need for zpool_shrink once all zpool implementations have
> dropped their shrink code.
>
> To ensure the functionality of each commit, this change focuses solely
> on adding the mechanism itself. No modifications are made to
> the backends, meaning that functionally, there are no immediate changes.
> The zswap mechanism will only come into effect once the backends have
> removed their shrink code. The subsequent commits will address the
> modifications needed in the backends.
>
> Acked-by: Nhat Pham <nphamcs@...il.com>
> Tested-by: Yosry Ahmed <yosryahmed@...gle.com>

Reviewed-by: Yosry Ahmed <yosryahmed@...gle.com>

> Signed-off-by: Domenico Cerasuolo <cerasuolodomenico@...il.com>
> ---
>  mm/zswap.c | 97 +++++++++++++++++++++++++++++++++++++++++++++++++++---
>  1 file changed, 92 insertions(+), 5 deletions(-)
>
> diff --git a/mm/zswap.c b/mm/zswap.c
> index 9fa86265f6dd..0024ec5ed574 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -154,6 +154,12 @@ struct crypto_acomp_ctx {
>         struct mutex *mutex;
>  };
>
> +/*
> + * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
> + * The only case where lru_lock is not acquired while holding tree.lock is
> + * when a zswap_entry is taken off the lru for writeback, in that case it
> + * needs to be verified that it's still valid in the tree.
> + */
>  struct zswap_pool {
>         struct zpool *zpool;
>         struct crypto_acomp_ctx __percpu *acomp_ctx;
> @@ -163,6 +169,8 @@ struct zswap_pool {
>         struct work_struct shrink_work;
>         struct hlist_node node;
>         char tfm_name[CRYPTO_MAX_ALG_NAME];
> +       struct list_head lru;
> +       spinlock_t lru_lock;
>  };
>
>  /*
> @@ -180,10 +188,12 @@ struct zswap_pool {
>   *            be held while changing the refcount.  Since the lock must
>   *            be held, there is no reason to also make refcount atomic.
>   * length - the length in bytes of the compressed page data.  Needed during
> - *          decompression. For a same value filled page length is 0.
> + *          decompression. For a same value filled page length is 0, and both
> + *          pool and lru are invalid and must be ignored.
>   * pool - the zswap_pool the entry's data is in
>   * handle - zpool allocation handle that stores the compressed page data
>   * value - value of the same-value filled pages which have same content
> + * lru - handle to the pool's lru used to evict pages.
>   */
>  struct zswap_entry {
>         struct rb_node rbnode;
> @@ -196,6 +206,7 @@ struct zswap_entry {
>                 unsigned long value;
>         };
>         struct obj_cgroup *objcg;
> +       struct list_head lru;
>  };
>
>  struct zswap_header {
> @@ -368,6 +379,12 @@ static void zswap_free_entry(struct zswap_entry *entry)
>         if (!entry->length)
>                 atomic_dec(&zswap_same_filled_pages);
>         else {
> +               /* zpool_evictable will be removed once all 3 backends have migrated */
> +               if (!zpool_evictable(entry->pool->zpool)) {
> +                       spin_lock(&entry->pool->lru_lock);
> +                       list_del(&entry->lru);
> +                       spin_unlock(&entry->pool->lru_lock);
> +               }
>                 zpool_free(entry->pool->zpool, entry->handle);
>                 zswap_pool_put(entry->pool);
>         }
> @@ -588,14 +605,72 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
>         return NULL;
>  }
>
> +static int zswap_reclaim_entry(struct zswap_pool *pool)
> +{
> +       struct zswap_header *zhdr;
> +       struct zswap_entry *entry;
> +       struct zswap_tree *tree;
> +       pgoff_t swpoffset;
> +       int ret;
> +
> +       /* Get an entry off the LRU */
> +       spin_lock(&pool->lru_lock);
> +       if (list_empty(&pool->lru)) {
> +               spin_unlock(&pool->lru_lock);
> +               return -EINVAL;
> +       }
> +       entry = list_last_entry(&pool->lru, struct zswap_entry, lru);
> +       list_del_init(&entry->lru);
> +       zhdr = zpool_map_handle(pool->zpool, entry->handle, ZPOOL_MM_RO);
> +       tree = zswap_trees[swp_type(zhdr->swpentry)];
> +       zpool_unmap_handle(pool->zpool, entry->handle);
> +       /*
> +        * Once the lru lock is dropped, the entry might get freed. The
> +        * swpoffset is copied to the stack, and entry isn't deref'd again
> +        * until the entry is verified to still be alive in the tree.
> +        */
> +       swpoffset = swp_offset(zhdr->swpentry);
> +       spin_unlock(&pool->lru_lock);
> +
> +       /* Check for invalidate() race */
> +       spin_lock(&tree->lock);
> +       if (entry != zswap_rb_search(&tree->rbroot, swpoffset)) {
> +               ret = -EAGAIN;
> +               goto unlock;
> +       }
> +       /* Hold a reference to prevent a free during writeback */
> +       zswap_entry_get(entry);
> +       spin_unlock(&tree->lock);
> +
> +       ret = zswap_writeback_entry(pool->zpool, entry->handle);
> +
> +       spin_lock(&tree->lock);
> +       if (ret) {
> +               /* Writeback failed, put entry back on LRU */
> +               spin_lock(&pool->lru_lock);
> +               list_move(&entry->lru, &pool->lru);
> +               spin_unlock(&pool->lru_lock);
> +       }
> +
> +       /* Drop local reference */
> +       zswap_entry_put(tree, entry);
> +unlock:
> +       spin_unlock(&tree->lock);
> +       return ret ? -EAGAIN : 0;
> +}
> +
>  static void shrink_worker(struct work_struct *w)
>  {
>         struct zswap_pool *pool = container_of(w, typeof(*pool),
>                                                 shrink_work);
>         int ret, failures = 0;
>
> +       /* zpool_evictable will be removed once all 3 backends have migrated */
>         do {
> -               ret = zpool_shrink(pool->zpool, 1, NULL);
> +               if (zpool_evictable(pool->zpool))
> +                       ret = zpool_shrink(pool->zpool, 1, NULL);
> +               else
> +                       ret = zswap_reclaim_entry(pool);
>                 if (ret) {
>                         zswap_reject_reclaim_fail++;
>                         if (ret != -EAGAIN)
> @@ -659,6 +734,8 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
>          */
>         kref_init(&pool->kref);
>         INIT_LIST_HEAD(&pool->list);
> +       INIT_LIST_HEAD(&pool->lru);
> +       spin_lock_init(&pool->lru_lock);
>         INIT_WORK(&pool->shrink_work, shrink_worker);
>
>         zswap_pool_debug("created", pool);
> @@ -1274,7 +1351,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
>         }
>
>         /* store */
> -       hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
> +       hlen = sizeof(zhdr);
>         gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
>         if (zpool_malloc_support_movable(entry->pool->zpool))
>                 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
> @@ -1317,6 +1394,12 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
>                         zswap_entry_put(tree, dupentry);
>                 }
>         } while (ret == -EEXIST);
> +       /* zpool_evictable will be removed once all 3 backends have migrated */
> +       if (entry->length && !zpool_evictable(entry->pool->zpool)) {
> +               spin_lock(&entry->pool->lru_lock);
> +               list_add(&entry->lru, &entry->pool->lru);
> +               spin_unlock(&entry->pool->lru_lock);
> +       }
>         spin_unlock(&tree->lock);
>
>         /* update stats */
> @@ -1398,8 +1481,7 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
>         /* decompress */
>         dlen = PAGE_SIZE;
>         src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
> -       if (zpool_evictable(entry->pool->zpool))
> -               src += sizeof(struct zswap_header);
> +       src += sizeof(struct zswap_header);
>
>         if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
>                 memcpy(tmp, src, entry->length);
> @@ -1432,6 +1514,11 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
>         if (!ret && zswap_exclusive_loads_enabled) {
>                 zswap_invalidate_entry(tree, entry);
>                 *exclusive = true;
> +       } else if (entry->length && !zpool_evictable(entry->pool->zpool)) {
> +               /* zpool_evictable will be removed once all 3 backends have migrated */
> +               spin_lock(&entry->pool->lru_lock);
> +               list_move(&entry->lru, &entry->pool->lru);
> +               spin_unlock(&entry->pool->lru_lock);
>         }
>         spin_unlock(&tree->lock);
>
> --
> 2.34.1
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ