lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [day] [month] [year] [list]
Date:   Wed, 17 Feb 2021 17:08:57 +0530
From:   Faiyaz Mohammed <faiyazm@...eaurora.org>
To:     cl@...ux.com, penberg@...nel.org, rientjes@...gle.com,
        iamjoonsoo.kim@....com, akpm@...ux-foundation.org, vbabka@...e.cz,
        willy@...radead.org, linux-kernel@...r.kernel.org,
        linux-mm@...ck.org
Cc:     vinmenon@...eaurora.org
Subject: Re: [PATCH v2] mm: slub: Convert sys slab alloc_calls, free_calls to
 bin attribute

+linux-mm, linux-kernel.

On 2/17/2021 12:01 PM, Faiyaz Mohammed wrote:
> Reading the sys slab alloc_calls, free_calls returns the available object
> owners, but the size of this file is limited to PAGE_SIZE
> because of the limitation of sysfs attributes, it is returning the
> partial owner info, which is not sufficient to debug/account the slab
> memory and alloc_calls output is not matching with /proc/slabinfo.
> 
> To remove the PAGE_SIZE limitation converted the sys slab
> alloc_calls, free_calls to bin attribute.
> 
> Signed-off-by: Faiyaz Mohammed <faiyazm@...eaurora.org>
> ---
>  mm/slub.c | 84 +++++++++++++++++++++++++++++++++++++++++++++++----------------
>  1 file changed, 63 insertions(+), 21 deletions(-)
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index b22a4b1..71cfe3b 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -37,6 +37,9 @@
>  
>  #include <trace/events/kmem.h>
>  
> +#define TRACE_ENTRY_MAX 80
> +#define TRACKS_PER_PAGE  ((PAGE_SIZE - KSYM_SYMBOL_LEN - 100) / TRACE_ENTRY_MAX)
> +
>  #include "internal.h"
>  
>  /*
> @@ -4748,6 +4751,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
>  	struct loc_track t = { 0, 0, NULL };
>  	int node;
>  	struct kmem_cache_node *n;
> +	unsigned int previous_read_count = 0;
>  
>  	if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
>  			     GFP_KERNEL)) {
> @@ -4756,6 +4760,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
>  	/* Push back cpu slabs */
>  	flush_all(s);
>  
> +	if (offset != 0)
> +		previous_read_count = (offset / TRACE_ENTRY_MAX);
> +
> +	memset(buf, 0, PAGE_SIZE);
> +
>  	for_each_kmem_cache_node(s, node, n) {
>  		unsigned long flags;
>  		struct page *page;
> @@ -4771,48 +4780,62 @@ static int list_locations(struct kmem_cache *s, char *buf,
>  		spin_unlock_irqrestore(&n->list_lock, flags);
>  	}
>  
> -	for (i = 0; i < t.count; i++) {
> +	for (i = previous_read_count; i < t.count; i++) {
>  		struct location *l = &t.loc[i];
> +		unsigned int cur_len = 0;
>  
> -		len += sysfs_emit_at(buf, len, "%7ld ", l->count);
> +		cur_len += sysfs_emit_at(buf, cur_len + len, "%7ld ", l->count);
>  
>  		if (l->addr)
> -			len += sysfs_emit_at(buf, len, "%pS", (void *)l->addr);
> +			cur_len += sysfs_emit_at(buf, cur_len + len, "%pS", (void *)l->addr);
>  		else
> -			len += sysfs_emit_at(buf, len, "<not-available>");
> +			cur_len += sysfs_emit_at(buf, cur_len + len, "<not-available>");
>  
>  		if (l->sum_time != l->min_time)
> -			len += sysfs_emit_at(buf, len, " age=%ld/%ld/%ld",
> +			cur_len += sysfs_emit_at(buf, cur_len + len, " age=%ld/%ld/%ld",
>  					     l->min_time,
>  					     (long)div_u64(l->sum_time,
>  							   l->count),
>  					     l->max_time);
>  		else
> -			len += sysfs_emit_at(buf, len, " age=%ld", l->min_time);
> +			cur_len += sysfs_emit_at(buf, cur_len + len, " age=%ld", l->min_time);
>  
>  		if (l->min_pid != l->max_pid)
> -			len += sysfs_emit_at(buf, len, " pid=%ld-%ld",
> +			cur_len += sysfs_emit_at(buf, cur_len + len, " pid=%ld-%ld",
>  					     l->min_pid, l->max_pid);
>  		else
> -			len += sysfs_emit_at(buf, len, " pid=%ld",
> +			cur_len += sysfs_emit_at(buf, cur_len + len, " pid=%ld",
>  					     l->min_pid);
>  
>  		if (num_online_cpus() > 1 &&
>  		    !cpumask_empty(to_cpumask(l->cpus)))
> -			len += sysfs_emit_at(buf, len, " cpus=%*pbl",
> +			cur_len += sysfs_emit_at(buf, cur_len + len, " cpus=%*pbl",
>  					     cpumask_pr_args(to_cpumask(l->cpus)));
>  
>  		if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
> -			len += sysfs_emit_at(buf, len, " nodes=%*pbl",
> +			cur_len += sysfs_emit_at(buf, cur_len + len, " nodes=%*pbl",
>  					     nodemask_pr_args(&l->nodes));
>  
> +		if (cur_len >= TRACE_ENTRY_MAX)
> +			cur_len -= (cur_len % TRACE_ENTRY_MAX) - 1;
> +		else if (cur_len < TRACE_ENTRY_MAX)
> +			cur_len += TRACE_ENTRY_MAX - (cur_len % TRACE_ENTRY_MAX) - 1;
> +
> +		len += cur_len;
> +
>  		len += sysfs_emit_at(buf, len, "\n");
> +
> +		if (i >= (previous_read_count + TRACKS_PER_PAGE))
> +			break;
> +
>  	}
>  
> -	free_loc_track(&t);
> -	if (!t.count)
> -		len += sysfs_emit_at(buf, len, "No data\n");
> +	if (((previous_read_count > t.count) | (i >= t.count)) && (offset != 0))
> +		len = 0;
> +	else if (!t.count)
> +		len += sprintf(buf, "No data\n");
>  
> +	free_loc_track(&t);
>  	return len;
>  }
>  #endif	/* CONFIG_SLUB_DEBUG */
> @@ -5280,21 +5303,33 @@ static ssize_t validate_store(struct kmem_cache *s,
>  }
>  SLAB_ATTR(validate);
>  
> -static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
> +static ssize_t alloc_calls_read(struct file *filp, struct kobject *kobj,
> +				struct bin_attribute *bin_attr, char *buf,
> +					loff_t offset, size_t count)
>  {
> +	struct kmem_cache *s;
> +
> +	s = to_slab(kobj);
>  	if (!(s->flags & SLAB_STORE_USER))
>  		return -ENOSYS;
> -	return list_locations(s, buf, TRACK_ALLOC);
> +
> +	return list_locations(s, buf, offset, TRACK_ALLOC);
>  }
> -SLAB_ATTR_RO(alloc_calls);
> +BIN_ATTR_RO(alloc_calls, 0);
>  
> -static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
> +static ssize_t free_calls_read(struct file *filp, struct kobject *kobj,
> +				struct bin_attribute *bin_attr, char *buf,
> +					loff_t offset, size_t count)
>  {
> +	struct kmem_cache *s;
> +
> +	s = to_slab(kobj);
>  	if (!(s->flags & SLAB_STORE_USER))
>  		return -ENOSYS;
> -	return list_locations(s, buf, TRACK_FREE);
> +
> +	return list_locations(s, buf, offset, TRACK_FREE);
>  }
> -SLAB_ATTR_RO(free_calls);
> +BIN_ATTR_RO(free_calls, 0);
>  #endif /* CONFIG_SLUB_DEBUG */
>  
>  #ifdef CONFIG_FAILSLAB
> @@ -5430,6 +5465,14 @@ STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
>  STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
>  #endif	/* CONFIG_SLUB_STATS */
>  
> +
> +static struct bin_attribute *slab_bin_attrs[] = {
> +#ifdef CONFIG_SLUB_DEBUG
> +	&bin_attr_alloc_calls,
> +	&bin_attr_free_calls,
> +#endif
> +};
> +
>  static struct attribute *slab_attrs[] = {
>  	&slab_size_attr.attr,
>  	&object_size_attr.attr,
> @@ -5458,8 +5501,6 @@ static struct attribute *slab_attrs[] = {
>  	&poison_attr.attr,
>  	&store_user_attr.attr,
>  	&validate_attr.attr,
> -	&alloc_calls_attr.attr,
> -	&free_calls_attr.attr,
>  #endif
>  #ifdef CONFIG_ZONE_DMA
>  	&cache_dma_attr.attr,
> @@ -5505,6 +5546,7 @@ static struct attribute *slab_attrs[] = {
>  
>  static const struct attribute_group slab_attr_group = {
>  	.attrs = slab_attrs,
> +	.bin_attrs = slab_bin_attrs,
>  };
>  
>  static ssize_t slab_attr_show(struct kobject *kobj,
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ