lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJD7tkY2+PgCzViFEH2W3YefnrLx66HiN_MnkZg=usdt_E1Eqw@mail.gmail.com>
Date:   Wed, 7 Jun 2023 02:30:04 -0700
From:   Yosry Ahmed <yosryahmed@...gle.com>
To:     Domenico Cerasuolo <cerasuolodomenico@...il.com>
Cc:     vitaly.wool@...sulko.com, minchan@...nel.org,
        senozhatsky@...omium.org, linux-mm@...ck.org, ddstreet@...e.org,
        sjenning@...hat.com, nphamcs@...il.com, hannes@...xchg.org,
        akpm@...ux-foundation.org, linux-kernel@...r.kernel.org,
        kernel-team@...a.com
Subject: Re: [RFC PATCH v2 7/7] mm: zswap: remove zswap_header

On Tue, Jun 6, 2023 at 7:56 AM Domenico Cerasuolo
<cerasuolodomenico@...il.com> wrote:
>
> Previously, zswap_header served the purpose of storing the swpentry
> within zpool pages. This allowed zpool implementations to pass relevant
> information to the writeback function. However, with the current
> implementation, writeback is directly handled within zswap.
> Consequently, there is no longer a necessity for zswap_header, as the
> swp_entry_t can be stored directly in zswap_entry.
>
> Suggested-by: Yosry Ahmed <yosryahmed@...gle.com>
> Signed-off-by: Domenico Cerasuolo <cerasuolodomenico@...il.com>

Thanks for this cleanup. It gives us back some of the memory used by
list_head in zswap entry, and remove an unnecessary zpool map
operation.

> ---
>  mm/zswap.c | 52 ++++++++++++++++++++++------------------------------
>  1 file changed, 22 insertions(+), 30 deletions(-)
>
> diff --git a/mm/zswap.c b/mm/zswap.c
> index ef8604812352..f689444dd5a7 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -193,7 +193,7 @@ struct zswap_pool {
>   */
>  struct zswap_entry {
>         struct rb_node rbnode;
> -       pgoff_t offset;
> +       swp_entry_t swpentry;
>         int refcount;
>         unsigned int length;
>         struct zswap_pool *pool;
> @@ -205,10 +205,6 @@ struct zswap_entry {
>         struct list_head lru;
>  };
>
> -struct zswap_header {
> -       swp_entry_t swpentry;
> -};
> -
>  /*
>   * The tree lock in the zswap_tree struct protects a few things:
>   * - the rbtree
> @@ -250,7 +246,7 @@ static bool zswap_has_pool;
>         pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,         \
>                  zpool_get_type((p)->zpool))
>
> -static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header *zhdr,
> +static int zswap_writeback_entry(struct zswap_entry *entry,
>                                  struct zswap_tree *tree);
>  static int zswap_pool_get(struct zswap_pool *pool);
>  static void zswap_pool_put(struct zswap_pool *pool);
> @@ -311,12 +307,14 @@ static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
>  {
>         struct rb_node *node = root->rb_node;
>         struct zswap_entry *entry;
> +       pgoff_t entry_offset;
>
>         while (node) {
>                 entry = rb_entry(node, struct zswap_entry, rbnode);
> -               if (entry->offset > offset)
> +               entry_offset = swp_offset(entry->swpentry);
> +               if (entry_offset > offset)
>                         node = node->rb_left;
> -               else if (entry->offset < offset)
> +               else if (entry_offset < offset)
>                         node = node->rb_right;
>                 else
>                         return entry;
> @@ -333,13 +331,15 @@ static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
>  {
>         struct rb_node **link = &root->rb_node, *parent = NULL;
>         struct zswap_entry *myentry;
> +       pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry);
>
>         while (*link) {
>                 parent = *link;
>                 myentry = rb_entry(parent, struct zswap_entry, rbnode);
> -               if (myentry->offset > entry->offset)
> +               myentry_offset = swp_offset(myentry->swpentry);
> +               if (myentry_offset > entry_offset)
>                         link = &(*link)->rb_left;
> -               else if (myentry->offset < entry->offset)
> +               else if (myentry_offset < entry_offset)

This whole myentry thing is very confusing to me. I initially thought
myentry would be the entry passed in as an argument. Can we change the
naming here to make it more consistent with zswap_rb_search() naming?

>                         link = &(*link)->rb_right;
>                 else {
>                         *dupentry = myentry;
> @@ -598,7 +598,6 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
>  static int zswap_shrink(struct zswap_pool *pool)
>  {
>         struct zswap_entry *lru_entry, *tree_entry = NULL;
> -       struct zswap_header *zhdr;
>         struct zswap_tree *tree;
>         int swpoffset;
>         int ret;
> @@ -611,15 +610,13 @@ static int zswap_shrink(struct zswap_pool *pool)
>         }
>         lru_entry = list_last_entry(&pool->lru, struct zswap_entry, lru);
>         list_del_init(&lru_entry->lru);
> -       zhdr = zpool_map_handle(pool->zpool, lru_entry->handle, ZPOOL_MM_RO);
> -       tree = zswap_trees[swp_type(zhdr->swpentry)];
> -       zpool_unmap_handle(pool->zpool, lru_entry->handle);
>         /*
>          * Once the pool lock is dropped, the lru_entry might get freed. The
>          * swpoffset is copied to the stack, and lru_entry isn't deref'd again
>          * until the entry is verified to still be alive in the tree.
>          */
> -       swpoffset = swp_offset(zhdr->swpentry);
> +       swpoffset = swp_offset(lru_entry->swpentry);
> +       tree = zswap_trees[swp_type(lru_entry->swpentry)];
>         spin_unlock(&pool->lru_lock);
>
>         /* hold a reference from tree so it won't be freed during writeback */
> @@ -633,7 +630,7 @@ static int zswap_shrink(struct zswap_pool *pool)
>         }
>         spin_unlock(&tree->lock);
>
> -       ret = zswap_writeback_entry(lru_entry, zhdr, tree);
> +       ret = zswap_writeback_entry(lru_entry, tree);
>
>         spin_lock(&tree->lock);
>         if (ret) {
> @@ -1046,10 +1043,10 @@ static int zswap_get_swap_cache_page(swp_entry_t entry,
>   * the swap cache, the compressed version stored by zswap can be
>   * freed.
>   */
> -static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header *zhdr,
> +static int zswap_writeback_entry(struct zswap_entry *entry,
>                                  struct zswap_tree *tree)
>  {
> -       swp_entry_t swpentry = zhdr->swpentry;
> +       swp_entry_t swpentry = entry->swpentry;
>         struct page *page;
>         struct scatterlist input, output;
>         struct crypto_acomp_ctx *acomp_ctx;
> @@ -1089,7 +1086,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header
>                  * writing.
>                  */
>                 spin_lock(&tree->lock);
> -               if (zswap_rb_search(&tree->rbroot, entry->offset) != entry) {
> +               if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
>                         spin_unlock(&tree->lock);
>                         delete_from_swap_cache(page_folio(page));
>                         ret = -ENOMEM;
> @@ -1101,8 +1098,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header
>                 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
>                 dlen = PAGE_SIZE;
>
> -               zhdr = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO);
> -               src = (u8 *)zhdr + sizeof(struct zswap_header);
> +               src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO);
>                 if (!zpool_can_sleep_mapped(pool)) {
>                         memcpy(tmp, src, entry->length);
>                         src = tmp;
> @@ -1196,11 +1192,10 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
>         struct obj_cgroup *objcg = NULL;
>         struct zswap_pool *pool;
>         int ret;
> -       unsigned int hlen, dlen = PAGE_SIZE;
> +       unsigned int dlen = PAGE_SIZE;
>         unsigned long handle, value;
>         char *buf;
>         u8 *src, *dst;
> -       struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
>         gfp_t gfp;
>
>         /* THP isn't supported */
> @@ -1245,7 +1240,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
>                 src = kmap_atomic(page);
>                 if (zswap_is_page_same_filled(src, &value)) {
>                         kunmap_atomic(src);
> -                       entry->offset = offset;
> +                       entry->swpentry = swp_entry(type, offset);
>                         entry->length = 0;
>                         entry->value = value;
>                         atomic_inc(&zswap_same_filled_pages);
> @@ -1299,11 +1294,10 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
>         }
>
>         /* store */
> -       hlen = sizeof(zhdr);
>         gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
>         if (zpool_malloc_support_movable(entry->pool->zpool))
>                 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
> -       ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle);
> +       ret = zpool_malloc(entry->pool->zpool, dlen, gfp, &handle);
>         if (ret == -ENOSPC) {
>                 zswap_reject_compress_poor++;
>                 goto put_dstmem;
> @@ -1313,13 +1307,12 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
>                 goto put_dstmem;
>         }
>         buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_WO);
> -       memcpy(buf, &zhdr, hlen);
> -       memcpy(buf + hlen, dst, dlen);
> +       memcpy(buf, dst, dlen);
>         zpool_unmap_handle(entry->pool->zpool, handle);
>         mutex_unlock(acomp_ctx->mutex);
>
>         /* populate entry */
> -       entry->offset = offset;
> +       entry->swpentry = swp_entry(type, offset);
>         entry->handle = handle;
>         entry->length = dlen;
>
> @@ -1418,7 +1411,6 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
>         /* decompress */
>         dlen = PAGE_SIZE;
>         src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
> -       src += sizeof(struct zswap_header);
>
>         if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
>                 memcpy(tmp, src, entry->length);
> --
> 2.34.1
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ