lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 18 Nov 2022 11:32:01 -0800
From:   Minchan Kim <minchan@...nel.org>
To:     Nhat Pham <nphamcs@...il.com>
Cc:     akpm@...ux-foundation.org, hannes@...xchg.org, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org, ngupta@...are.org,
        senozhatsky@...omium.org, sjenning@...hat.com, ddstreet@...e.org,
        vitaly.wool@...sulko.com
Subject: Re: [PATCH v5 4/6] zsmalloc: Add a LRU to zs_pool to keep track of
 zspages in LRU order

On Fri, Nov 18, 2022 at 10:24:05AM -0800, Nhat Pham wrote:
> This helps determines the coldest zspages as candidates for writeback.
> 
> Signed-off-by: Nhat Pham <nphamcs@...il.com>
> ---
>  mm/zsmalloc.c | 45 +++++++++++++++++++++++++++++++++++++++++++--
>  1 file changed, 43 insertions(+), 2 deletions(-)
> 
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index 326faa751f0a..9e7b54324181 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -239,6 +239,11 @@ struct zs_pool {
>  	/* Compact classes */
>  	struct shrinker shrinker;
> 
> +#ifdef CONFIG_ZPOOL
> +	/* List tracking the zspages in LRU order by most recently added object */
> +	struct list_head lru;
> +#endif
> +
>  #ifdef CONFIG_ZSMALLOC_STAT
>  	struct dentry *stat_dentry;
>  #endif
> @@ -260,6 +265,12 @@ struct zspage {
>  	unsigned int freeobj;
>  	struct page *first_page;
>  	struct list_head list; /* fullness list */
> +
> +#ifdef CONFIG_ZPOOL
> +	/* links the zspage to the lru list in the pool */
> +	struct list_head lru;
> +#endif
> +
>  	struct zs_pool *pool;
>  #ifdef CONFIG_COMPACTION
>  	rwlock_t lock;
> @@ -352,6 +363,18 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
>  	kmem_cache_free(pool->zspage_cachep, zspage);
>  }
> 
> +#ifdef CONFIG_ZPOOL
> +/* Moves the zspage to the front of the zspool's LRU */
> +static void move_to_front(struct zs_pool *pool, struct zspage *zspage)
> +{
> +	assert_spin_locked(&pool->lock);
> +
> +	if (!list_empty(&zspage->lru))
> +		list_del(&zspage->lru);
> +	list_add(&zspage->lru, &pool->lru);
> +}
> +#endif
> +
>  /* pool->lock(which owns the handle) synchronizes races */
>  static void record_obj(unsigned long handle, unsigned long obj)
>  {
> @@ -953,6 +976,9 @@ static void free_zspage(struct zs_pool *pool, struct size_class *class,
>  	}
> 
>  	remove_zspage(class, zspage, ZS_EMPTY);
> +#ifdef CONFIG_ZPOOL
> +	list_del(&zspage->lru);
> +#endif
>  	__free_zspage(pool, class, zspage);
>  }
> 
> @@ -998,6 +1024,10 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
>  		off %= PAGE_SIZE;
>  	}
> 
> +#ifdef CONFIG_ZPOOL
> +	INIT_LIST_HEAD(&zspage->lru);
> +#endif
> +
>  	set_freeobj(zspage, 0);
>  }
> 
> @@ -1418,9 +1448,8 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
>  		fix_fullness_group(class, zspage);
>  		record_obj(handle, obj);
>  		class_stat_inc(class, OBJ_USED, 1);
> -		spin_unlock(&pool->lock);
> 
> -		return handle;
> +		goto out;
>  	}
> 
>  	spin_unlock(&pool->lock);
> @@ -1444,6 +1473,11 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
> 
>  	/* We completely set up zspage so mark them as movable */
>  	SetZsPageMovable(pool, zspage);
> +out:
> +#ifdef CONFIG_ZPOOL
> +	/* Move the zspage to front of pool's LRU */
> +	move_to_front(pool, zspage);
> +#endif
>  	spin_unlock(&pool->lock);

Please move the move_to_front into zs_map_object with ZS_MM_WO with
comment with "why we are doing only for WO case".

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ