[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAB=+i9Sm7Q6rBm0eQ4gVeToc1Lx8RWSWTaiBUwK1vPSrksCgpg@mail.gmail.com>
Date: Sun, 3 Dec 2023 18:27:43 +0900
From: Hyeonggon Yoo <42.hyeyoo@...il.com>
To: chengming.zhou@...ux.dev
Cc: vbabka@...e.cz, cl@...ux.com, penberg@...nel.org,
rientjes@...gle.com, iamjoonsoo.kim@....com,
akpm@...ux-foundation.org, roman.gushchin@...ux.dev,
linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Chengming Zhou <zhouchengming@...edance.com>
Subject: Re: [PATCH v5 8/9] slub: Rename all *unfreeze_partials* functions to *put_partials*
On Thu, Nov 2, 2023 at 12:25 PM <chengming.zhou@...ux.dev> wrote:
>
> From: Chengming Zhou <zhouchengming@...edance.com>
>
> Since all partial slabs on the CPU partial list are not frozen anymore,
> we don't unfreeze when moving cpu partial slabs to node partial list,
> it's better to rename these functions.
>
> Signed-off-by: Chengming Zhou <zhouchengming@...edance.com>
> Reviewed-by: Vlastimil Babka <vbabka@...e.cz>
> Tested-by: Hyeonggon Yoo <42.hyeyoo@...il.com>
> ---
> mm/slub.c | 34 +++++++++++++++++-----------------
> 1 file changed, 17 insertions(+), 17 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index d137468fe4b9..c20bdf5dab0f 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2546,7 +2546,7 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
> }
>
> #ifdef CONFIG_SLUB_CPU_PARTIAL
> -static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
> +static void __put_partials(struct kmem_cache *s, struct slab *partial_slab)
> {
> struct kmem_cache_node *n = NULL, *n2 = NULL;
> struct slab *slab, *slab_to_discard = NULL;
> @@ -2588,9 +2588,9 @@ static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
> }
>
> /*
> - * Unfreeze all the cpu partial slabs.
> + * Put all the cpu partial slabs to the node partial list.
> */
> -static void unfreeze_partials(struct kmem_cache *s)
> +static void put_partials(struct kmem_cache *s)
> {
> struct slab *partial_slab;
> unsigned long flags;
> @@ -2601,11 +2601,11 @@ static void unfreeze_partials(struct kmem_cache *s)
> local_unlock_irqrestore(&s->cpu_slab->lock, flags);
>
> if (partial_slab)
> - __unfreeze_partials(s, partial_slab);
> + __put_partials(s, partial_slab);
> }
>
> -static void unfreeze_partials_cpu(struct kmem_cache *s,
> - struct kmem_cache_cpu *c)
> +static void put_partials_cpu(struct kmem_cache *s,
> + struct kmem_cache_cpu *c)
> {
> struct slab *partial_slab;
>
> @@ -2613,7 +2613,7 @@ static void unfreeze_partials_cpu(struct kmem_cache *s,
> c->partial = NULL;
>
> if (partial_slab)
> - __unfreeze_partials(s, partial_slab);
> + __put_partials(s, partial_slab);
> }
>
> /*
> @@ -2626,7 +2626,7 @@ static void unfreeze_partials_cpu(struct kmem_cache *s,
> static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
> {
> struct slab *oldslab;
> - struct slab *slab_to_unfreeze = NULL;
> + struct slab *slab_to_put = NULL;
> unsigned long flags;
> int slabs = 0;
>
> @@ -2641,7 +2641,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
> * per node partial list. Postpone the actual unfreezing
> * outside of the critical section.
> */
> - slab_to_unfreeze = oldslab;
> + slab_to_put = oldslab;
> oldslab = NULL;
> } else {
> slabs = oldslab->slabs;
> @@ -2657,17 +2657,17 @@ static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
>
> local_unlock_irqrestore(&s->cpu_slab->lock, flags);
>
> - if (slab_to_unfreeze) {
> - __unfreeze_partials(s, slab_to_unfreeze);
> + if (slab_to_put) {
> + __put_partials(s, slab_to_put);
> stat(s, CPU_PARTIAL_DRAIN);
> }
> }
>
> #else /* CONFIG_SLUB_CPU_PARTIAL */
>
> -static inline void unfreeze_partials(struct kmem_cache *s) { }
> -static inline void unfreeze_partials_cpu(struct kmem_cache *s,
> - struct kmem_cache_cpu *c) { }
> +static inline void put_partials(struct kmem_cache *s) { }
> +static inline void put_partials_cpu(struct kmem_cache *s,
> + struct kmem_cache_cpu *c) { }
>
> #endif /* CONFIG_SLUB_CPU_PARTIAL */
>
> @@ -2709,7 +2709,7 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
> stat(s, CPUSLAB_FLUSH);
> }
>
> - unfreeze_partials_cpu(s, c);
> + put_partials_cpu(s, c);
> }
>
> struct slub_flush_work {
> @@ -2737,7 +2737,7 @@ static void flush_cpu_slab(struct work_struct *w)
> if (c->slab)
> flush_slab(s, c);
>
> - unfreeze_partials(s);
> + put_partials(s);
> }
>
> static bool has_cpu_slab(int cpu, struct kmem_cache *s)
> @@ -3168,7 +3168,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
> if (unlikely(!node_match(slab, node) ||
> !pfmemalloc_match(slab, gfpflags))) {
> slab->next = NULL;
> - __unfreeze_partials(s, slab);
> + __put_partials(s, slab);
> continue;
> }
>
> --
Looks good to me,
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@...il.com>
Thanks!
> 2.20.1
>
Powered by blists - more mailing lists