lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Tue, 30 Jan 2024 15:48:13 -0800
From: Nhat Pham <nphamcs@...il.com>
To: Johannes Weiner <hannes@...xchg.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>, Yosry Ahmed <yosryahmed@...gle.com>, 
	Chengming Zhou <zhouchengming@...edance.com>, linux-mm@...ck.org, 
	linux-kernel@...r.kernel.org
Subject: Re: [PATCH 15/20] mm: zswap: function ordering: move entry sections
 out of LRU section

On Mon, Jan 29, 2024 at 5:42 PM Johannes Weiner <hannes@...xchg.org> wrote:
>
> This completes consolidation of the LRU section.
>
> Signed-off-by: Johannes Weiner <hannes@...xchg.org>

LGTM.
Reviewed-by: Nhat Pham <nphamcs@...il.com>

> ---
>  mm/zswap.c | 101 ++++++++++++++++++++++++++---------------------------
>  1 file changed, 49 insertions(+), 52 deletions(-)
>
> diff --git a/mm/zswap.c b/mm/zswap.c
> index 511bfafc1456..756d4d575efe 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -768,58 +768,6 @@ static inline int entry_to_nid(struct zswap_entry *entry)
>         return page_to_nid(virt_to_page(entry));
>  }
>
> -void zswap_lruvec_state_init(struct lruvec *lruvec)
> -{
> -       atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
> -}
> -
> -void zswap_folio_swapin(struct folio *folio)
> -{
> -       struct lruvec *lruvec;
> -
> -       if (folio) {
> -               lruvec = folio_lruvec(folio);
> -               atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
> -       }
> -}
> -
> -void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
> -{
> -       struct zswap_pool *pool;
> -
> -       /* lock out zswap pools list modification */
> -       spin_lock(&zswap_pools_lock);
> -       list_for_each_entry(pool, &zswap_pools, list) {
> -               if (pool->next_shrink == memcg)
> -                       pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
> -       }
> -       spin_unlock(&zswap_pools_lock);
> -}
> -
> -/*********************************
> -* zswap entry functions
> -**********************************/
> -static struct kmem_cache *zswap_entry_cache;
> -
> -static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
> -{
> -       struct zswap_entry *entry;
> -       entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
> -       if (!entry)
> -               return NULL;
> -       entry->refcount = 1;
> -       RB_CLEAR_NODE(&entry->rbnode);
> -       return entry;
> -}
> -
> -static void zswap_entry_cache_free(struct zswap_entry *entry)
> -{
> -       kmem_cache_free(zswap_entry_cache, entry);
> -}
> -
> -/*********************************
> -* lru functions
> -**********************************/
>  static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
>  {
>         atomic_long_t *nr_zswap_protected;
> @@ -872,6 +820,55 @@ static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
>         rcu_read_unlock();
>  }
>
> +void zswap_lruvec_state_init(struct lruvec *lruvec)
> +{
> +       atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
> +}
> +
> +void zswap_folio_swapin(struct folio *folio)
> +{
> +       struct lruvec *lruvec;
> +
> +       if (folio) {
> +               lruvec = folio_lruvec(folio);
> +               atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
> +       }
> +}
> +
> +void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
> +{
> +       struct zswap_pool *pool;
> +
> +       /* lock out zswap pools list modification */
> +       spin_lock(&zswap_pools_lock);
> +       list_for_each_entry(pool, &zswap_pools, list) {
> +               if (pool->next_shrink == memcg)
> +                       pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
> +       }
> +       spin_unlock(&zswap_pools_lock);
> +}
> +
> +/*********************************
> +* zswap entry functions
> +**********************************/
> +static struct kmem_cache *zswap_entry_cache;
> +
> +static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
> +{
> +       struct zswap_entry *entry;
> +       entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
> +       if (!entry)
> +               return NULL;
> +       entry->refcount = 1;
> +       RB_CLEAR_NODE(&entry->rbnode);
> +       return entry;
> +}
> +
> +static void zswap_entry_cache_free(struct zswap_entry *entry)
> +{
> +       kmem_cache_free(zswap_entry_cache, entry);
> +}
> +
>  /*********************************
>  * rbtree functions
>  **********************************/
> --
> 2.43.0
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ