lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <9cb6a9a3-25fb-22ca-8b62-52c60519bee2@google.com>
Date: Thu, 21 Dec 2023 13:50:44 -0800 (PST)
From: David Rientjes <rientjes@...gle.com>
To: Ruipeng Qi <ruipengqi7@...il.com>
cc: cl@...ux.com, penberg@...nel.org, iamjoonsoo.kim@....com, 
    akpm@...ux-foundation.org, vbabka@...e.cz, roman.gushchin@...ux.dev, 
    42.hyeyoo@...il.com, linux-mm@...ck.org, linux-kernel@...r.kernel.org, 
    qiruipeng <qiruipeng@...iang.com>
Subject: Re: [RFC PATCH 6/7] mm/slub: make slab data more observable

On Thu, 21 Dec 2023, Ruipeng Qi wrote:

> From: qiruipeng <qiruipeng@...iang.com>
> 
> Osdump is interested in data stored within slab subsystem. Add full list
> back into corresponding struct, and record full list within respective
> functions instead of enabling SLUB_DEBUG directly, which will intruduce
> sensible overhead.
> 
> Signed-off-by: qiruipeng <qiruipeng@...iang.com>

Hi Ruipeng, please make sure to send your patch sets as a single email 
thread (all patches 1-7 should be a reply to your 0/7 cover letter).

There is some other feedback on previous patches in this series which 
refers to alternatives, so I think the cover letter to the patch series 
will need to spell out why we need a brand new solution to this.

That said, from the slab perspective, this is basically splitting out a 
part of SLUB_DEBUG into a single feature.  Likely best to propose it as a 
feature that SLUB_DEBUG would then directly enable itself rather than 
duplicating code in definitions.

That is, assuming the question about why we need a new solution for this 
can be resolved in the cover letter of a v2.

> ---
>  mm/slab.h |  2 ++
>  mm/slub.c | 38 +++++++++++++++++++++++++++++++++++++-
>  2 files changed, 39 insertions(+), 1 deletion(-)
> 
> diff --git a/mm/slab.h b/mm/slab.h
> index 3d07fb428393..a42a54c9c5de 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -799,6 +799,8 @@ struct kmem_cache_node {
>  	atomic_long_t nr_slabs;
>  	atomic_long_t total_objects;
>  	struct list_head full;
> +#elif defined(CONFIG_OS_MINIDUMP)
> +	struct list_head full;
>  #endif
>  #endif
>  
> diff --git a/mm/slub.c b/mm/slub.c
> index 63d281dfacdb..1a496ec945b6 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -1730,10 +1730,26 @@ static inline int check_object(struct kmem_cache *s, struct slab *slab,
>  static inline depot_stack_handle_t set_track_prepare(void) { return 0; }
>  static inline void set_track(struct kmem_cache *s, void *object,
>  			     enum track_item alloc, unsigned long addr) {}
> +#ifndef CONFIG_OS_MINIDUMP
>  static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
>  					struct slab *slab) {}
>  static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
>  					struct slab *slab) {}
> +#else
> +static inline void add_full(struct kmem_cache *s,
> +	struct kmem_cache_node *n, struct slab *slab)
> +{
> +	lockdep_assert_held(&n->list_lock);
> +	list_add(&slab->slab_list, &n->full);
> +}
> +
> +static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab)
> +{
> +	lockdep_assert_held(&n->list_lock);
> +	list_del(&slab->slab_list);
> +}
> +#endif
> +
>  slab_flags_t kmem_cache_flags(unsigned int object_size,
>  	slab_flags_t flags, const char *name)
>  {
> @@ -2570,6 +2586,14 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
>  		spin_lock_irqsave(&n->list_lock, flags);
>  	} else {
>  		mode = M_FULL_NOLIST;
> +#ifdef CONFIG_OS_MINIDUMP
> +		/*
> +		 * Taking the spinlock removes the possibility that
> +		 * acquire_slab() will see a slab that is frozen
> +		 */
> +		spin_lock_irqsave(&n->list_lock, flags);
> +
> +#endif
>  	}
>  
>  
> @@ -2577,7 +2601,11 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
>  				old.freelist, old.counters,
>  				new.freelist, new.counters,
>  				"unfreezing slab")) {
> +#ifndef CONFIG_OS_MINIDUMP
>  		if (mode == M_PARTIAL)
> +#else
> +		if (mode != M_FREE)
> +#endif
>  			spin_unlock_irqrestore(&n->list_lock, flags);
>  		goto redo;
>  	}
> @@ -2592,6 +2620,10 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
>  		discard_slab(s, slab);
>  		stat(s, FREE_SLAB);
>  	} else if (mode == M_FULL_NOLIST) {
> +#ifdef CONFIG_OS_MINIDUMP
> +		add_full(s, n, slab);
> +		spin_unlock_irqrestore(&n->list_lock, flags);
> +#endif
>  		stat(s, DEACTIVATE_FULL);
>  	}
>  }
> @@ -4202,6 +4234,9 @@ init_kmem_cache_node(struct kmem_cache_node *n)
>  	atomic_long_set(&n->nr_slabs, 0);
>  	atomic_long_set(&n->total_objects, 0);
>  	INIT_LIST_HEAD(&n->full);
> +#elif defined(CONFIG_OS_MINIDUMP)
> +	INIT_LIST_HEAD(&n->full);
> +
>  #endif
>  }
>  
> @@ -5009,7 +5044,8 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
>  		list_for_each_entry(p, &n->partial, slab_list)
>  			p->slab_cache = s;
>  
> -#ifdef CONFIG_SLUB_DEBUG
> +#if defined(CONFIG_SLUB_DEBUG) || \
> +	defined(CONFIG_OS_MINIDUMP)
>  		list_for_each_entry(p, &n->full, slab_list)
>  			p->slab_cache = s;
>  #endif
> -- 
> 2.17.1
> 
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ