lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0d6c3e3b-c270-bb7d-c038-64ee3f0257cd@suse.cz>
Date:   Wed, 11 Aug 2021 14:40:36 +0200
From:   Vlastimil Babka <vbabka@...e.cz>
To:     Muchun Song <songmuchun@...edance.com>, cl@...ux.com,
        penberg@...nel.org, rientjes@...gle.com, iamjoonsoo.kim@....com,
        akpm@...ux-foundation.org
Cc:     linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH] mm: slub: remove preemption disabling from
 put_cpu_partial

On 8/11/21 1:19 PM, Muchun Song wrote:
> The commit d6e0b7fa1186 ("slub: make dead caches discard free slabs
> immediately") introduced those logic to speed up the destruction of
> per-memcg kmem caches, because kmem caches created for a memory
> cgroup are only destroyed after the last page charged to the cgroup
> is freed at that time. But since commit 9855609bde03 ("mm: memcg/slab:
> use a single set of kmem_caches for all accounted allocations), we
> do not have per-memcg kmem caches anymore. Are those code pointless?
> No, the kmem_cache->cpu_partial can be set to zero by 'echo 0 > /sys/
> kernel/slab/*/cpu_partial'. In this case, the slab page will be put
> into cpu partial list and then moved to node list (because
> slub_cpu_partial() returns zero). However, we can skip putting the
> slab page to cpu partial list and just move it to node list directly.
> We can adjust the condition of kmem_cache_has_cpu_partial() to
> slub_cpu_partial() in __slab_free() and remove those code from
> put_cpu_partial() for simplification.
> 
> Signed-off-by: Muchun Song <songmuchun@...edance.com>

Please check again current mmotm/next if this still applies, I think it
shouldn't anymore. Thanks.

> ---
>  mm/slub.c | 23 +++--------------------
>  1 file changed, 3 insertions(+), 20 deletions(-)
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index b6c5205252eb..69c8ada322a0 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2438,7 +2438,6 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
>  	int pages;
>  	int pobjects;
>  
> -	preempt_disable();
>  	do {
>  		pages = 0;
>  		pobjects = 0;
> @@ -2470,16 +2469,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
>  		page->pobjects = pobjects;
>  		page->next = oldpage;
>  
> -	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
> -								!= oldpage);
> -	if (unlikely(!slub_cpu_partial(s))) {
> -		unsigned long flags;
> -
> -		local_irq_save(flags);
> -		unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
> -		local_irq_restore(flags);
> -	}
> -	preempt_enable();
> +	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
>  #endif	/* CONFIG_SLUB_CPU_PARTIAL */
>  }
>  
> @@ -3059,9 +3049,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
>  		was_frozen = new.frozen;
>  		new.inuse -= cnt;
>  		if ((!new.inuse || !prior) && !was_frozen) {
> -
> -			if (kmem_cache_has_cpu_partial(s) && !prior) {
> -
> +			if (slub_cpu_partial(s) && !prior) {
>  				/*
>  				 * Slab was on no list before and will be
>  				 * partially empty
> @@ -3069,9 +3057,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
>  				 * freeze it.
>  				 */
>  				new.frozen = 1;
> -
>  			} else { /* Needs to be taken off a list */
> -
>  				n = get_node(s, page_to_nid(page));
>  				/*
>  				 * Speculatively acquire the list_lock.
> @@ -3082,17 +3068,14 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
>  				 * other processors updating the list of slabs.
>  				 */
>  				spin_lock_irqsave(&n->list_lock, flags);
> -
>  			}
>  		}
> -
>  	} while (!cmpxchg_double_slab(s, page,
>  		prior, counters,
>  		head, new.counters,
>  		"__slab_free"));
>  
>  	if (likely(!n)) {
> -
>  		if (likely(was_frozen)) {
>  			/*
>  			 * The list lock was not taken therefore no list
> @@ -3118,7 +3101,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
>  	 * Objects left in the slab. If it was not on the partial list before
>  	 * then add it.
>  	 */
> -	if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
> +	if (unlikely(!prior)) {
>  		remove_full(s, n, page);
>  		add_partial(n, page, DEACTIVATE_TO_TAIL);
>  		stat(s, FREE_ADD_PARTIAL);
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ