lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a53f9039-5cba-955b-009e-12e8c5ffb345@suse.cz>
Date:   Thu, 6 Aug 2020 14:42:44 +0200
From:   Vlastimil Babka <vbabka@...e.cz>
To:     Xunlei Pang <xlpang@...ux.alibaba.com>,
        Christoph Lameter <cl@...ux.com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Wen Yang <wenyang@...ux.alibaba.com>,
        Yang Shi <yang.shi@...ux.alibaba.com>,
        Roman Gushchin <guro@...com>
Cc:     linux-mm@...ck.org, linux-kernel@...r.kernel.org,
        Konstantin Khlebnikov <khlebnikov@...dex-team.ru>,
        David Rientjes <rientjes@...gle.com>
Subject: Re: [PATCH 1/2] mm/slub: Introduce two counters for the partial
 objects

On 7/2/20 10:32 AM, Xunlei Pang wrote:
> The node list_lock in count_partial() spend long time iterating
> in case of large amount of partial page lists, which can cause
> thunder herd effect to the list_lock contention, e.g. it cause
> business response-time jitters when accessing "/proc/slabinfo"
> in our production environments.
> 
> This patch introduces two counters to maintain the actual number
> of partial objects dynamically instead of iterating the partial
> page lists with list_lock held.
> 
> New counters of kmem_cache_node are: pfree_objects, ptotal_objects.
> The main operations are under list_lock in slow path, its performance
> impact is minimal.
> 
> Co-developed-by: Wen Yang <wenyang@...ux.alibaba.com>
> Signed-off-by: Xunlei Pang <xlpang@...ux.alibaba.com>

This or similar things seem to be reported every few months now, last time was
here [1] AFAIK. The solution was to just stop counting at some point.

Shall we perhaps add these counters under CONFIG_SLUB_DEBUG then and be done
with it? If anyone needs the extreme performance and builds without
CONFIG_SLUB_DEBUG, I'd assume they also don't have userspace programs reading
/proc/slabinfo periodically anyway?

[1]
https://lore.kernel.org/linux-mm/158860845968.33385.4165926113074799048.stgit@buzz/

> ---
>  mm/slab.h |  2 ++
>  mm/slub.c | 38 +++++++++++++++++++++++++++++++++++++-
>  2 files changed, 39 insertions(+), 1 deletion(-)
> 
> diff --git a/mm/slab.h b/mm/slab.h
> index 7e94700..5935749 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -616,6 +616,8 @@ struct kmem_cache_node {
>  #ifdef CONFIG_SLUB
>  	unsigned long nr_partial;
>  	struct list_head partial;
> +	atomic_long_t pfree_objects; /* partial free objects */
> +	atomic_long_t ptotal_objects; /* partial total objects */
>  #ifdef CONFIG_SLUB_DEBUG
>  	atomic_long_t nr_slabs;
>  	atomic_long_t total_objects;
> diff --git a/mm/slub.c b/mm/slub.c
> index 6589b41..53890f3 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -1775,10 +1775,24 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
>  /*
>   * Management of partially allocated slabs.
>   */
> +
> +static inline void
> +__update_partial_free(struct kmem_cache_node *n, long delta)
> +{
> +	atomic_long_add(delta, &n->pfree_objects);
> +}
> +
> +static inline void
> +__update_partial_total(struct kmem_cache_node *n, long delta)
> +{
> +	atomic_long_add(delta, &n->ptotal_objects);
> +}
> +
>  static inline void
>  __add_partial(struct kmem_cache_node *n, struct page *page, int tail)
>  {
>  	n->nr_partial++;
> +	__update_partial_total(n, page->objects);
>  	if (tail == DEACTIVATE_TO_TAIL)
>  		list_add_tail(&page->slab_list, &n->partial);
>  	else
> @@ -1798,6 +1812,7 @@ static inline void remove_partial(struct kmem_cache_node *n,
>  	lockdep_assert_held(&n->list_lock);
>  	list_del(&page->slab_list);
>  	n->nr_partial--;
> +	__update_partial_total(n, -page->objects);
>  }
>  
>  /*
> @@ -1842,6 +1857,7 @@ static inline void *acquire_slab(struct kmem_cache *s,
>  		return NULL;
>  
>  	remove_partial(n, page);
> +	__update_partial_free(n, -*objects);
>  	WARN_ON(!freelist);
>  	return freelist;
>  }
> @@ -2174,8 +2190,11 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
>  				"unfreezing slab"))
>  		goto redo;
>  
> -	if (lock)
> +	if (lock) {
> +		if (m == M_PARTIAL)
> +			__update_partial_free(n, page->objects - page->inuse);
>  		spin_unlock(&n->list_lock);
> +	}
>  
>  	if (m == M_PARTIAL)
>  		stat(s, tail);
> @@ -2241,6 +2260,7 @@ static void unfreeze_partials(struct kmem_cache *s,
>  			discard_page = page;
>  		} else {
>  			add_partial(n, page, DEACTIVATE_TO_TAIL);
> +			__update_partial_free(n, page->objects - page->inuse);
>  			stat(s, FREE_ADD_PARTIAL);
>  		}
>  	}
> @@ -2915,6 +2935,14 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
>  		head, new.counters,
>  		"__slab_free"));
>  
> +	if (!was_frozen && prior) {
> +		if (n)
> +			__update_partial_free(n, cnt);
> +		else
> +			__update_partial_free(get_node(s, page_to_nid(page)),
> +					cnt);
> +	}
> +
>  	if (likely(!n)) {
>  
>  		/*
> @@ -2944,6 +2972,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
>  	if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
>  		remove_full(s, n, page);
>  		add_partial(n, page, DEACTIVATE_TO_TAIL);
> +		__update_partial_free(n, page->objects - page->inuse);
>  		stat(s, FREE_ADD_PARTIAL);
>  	}
>  	spin_unlock_irqrestore(&n->list_lock, flags);
> @@ -2955,6 +2984,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
>  		 * Slab on the partial list.
>  		 */
>  		remove_partial(n, page);
> +		__update_partial_free(n, page->inuse - page->objects);
>  		stat(s, FREE_REMOVE_PARTIAL);
>  	} else {
>  		/* Slab must be on the full list */
> @@ -3364,6 +3394,8 @@ static inline int calculate_order(unsigned int size)
>  	n->nr_partial = 0;
>  	spin_lock_init(&n->list_lock);
>  	INIT_LIST_HEAD(&n->partial);
> +	atomic_long_set(&n->pfree_objects, 0);
> +	atomic_long_set(&n->ptotal_objects, 0);
>  #ifdef CONFIG_SLUB_DEBUG
>  	atomic_long_set(&n->nr_slabs, 0);
>  	atomic_long_set(&n->total_objects, 0);
> @@ -3437,6 +3469,7 @@ static void early_kmem_cache_node_alloc(int node)
>  	 * initialized and there is no concurrent access.
>  	 */
>  	__add_partial(n, page, DEACTIVATE_TO_HEAD);
> +	__update_partial_free(n, page->objects - page->inuse);
>  }
>  
>  static void free_kmem_cache_nodes(struct kmem_cache *s)
> @@ -3747,6 +3780,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
>  	list_for_each_entry_safe(page, h, &n->partial, slab_list) {
>  		if (!page->inuse) {
>  			remove_partial(n, page);
> +			__update_partial_free(n, page->objects - page->inuse);
>  			list_add(&page->slab_list, &discard);
>  		} else {
>  			list_slab_objects(s, page,
> @@ -4045,6 +4079,8 @@ int __kmem_cache_shrink(struct kmem_cache *s)
>  			if (free == page->objects) {
>  				list_move(&page->slab_list, &discard);
>  				n->nr_partial--;
> +				__update_partial_free(n, -free);
> +				__update_partial_total(n, -free);
>  			} else if (free <= SHRINK_PROMOTE_MAX)
>  				list_move(&page->slab_list, promote + free - 1);
>  		}
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ