lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1217238098.7813.13.camel@penberg-laptop>
Date:	Mon, 28 Jul 2008 12:41:38 +0300
From:	Pekka Enberg <penberg@...helsinki.fi>
To:	Eduard - Gabriel Munteanu <eduard.munteanu@...ux360.ro>
Cc:	cl@...ux-foundation.org, linux-mm@...ck.org,
	linux-kernel@...r.kernel.org, rdunlap@...otime.net, mpm@...enic.com
Subject: Re: [RFC PATCH 4/4] kmemtrace: SLOB hooks.

On Tue, 2008-07-22 at 21:36 +0300, Eduard - Gabriel Munteanu wrote:
> This adds hooks for the SLOB allocator, to allow tracing with kmemtrace.
> 

The patch description could be little less terse and maybe explain the
__always_inline changes but:

Reviewed-by: Pekka Enberg <penberg@...helsinki.fi>

> Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@...ux360.ro>
> ---
>  include/linux/slob_def.h |    9 +++++----
>  mm/slob.c                |   37 +++++++++++++++++++++++++++++++------
>  2 files changed, 36 insertions(+), 10 deletions(-)
> 
> diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
> index 59a3fa4..0ec00b3 100644
> --- a/include/linux/slob_def.h
> +++ b/include/linux/slob_def.h
> @@ -3,14 +3,15 @@
>  
>  void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
>  
> -static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> +static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
> +					      gfp_t flags)
>  {
>  	return kmem_cache_alloc_node(cachep, flags, -1);
>  }
>  
>  void *__kmalloc_node(size_t size, gfp_t flags, int node);
>  
> -static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
> +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
>  {
>  	return __kmalloc_node(size, flags, node);
>  }
> @@ -23,12 +24,12 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
>   * kmalloc is the normal method of allocating memory
>   * in the kernel.
>   */
> -static inline void *kmalloc(size_t size, gfp_t flags)
> +static __always_inline void *kmalloc(size_t size, gfp_t flags)
>  {
>  	return __kmalloc_node(size, flags, -1);
>  }
>  
> -static inline void *__kmalloc(size_t size, gfp_t flags)
> +static __always_inline void *__kmalloc(size_t size, gfp_t flags)
>  {
>  	return kmalloc(size, flags);
>  }
> diff --git a/mm/slob.c b/mm/slob.c
> index a3ad667..23375ed 100644
> --- a/mm/slob.c
> +++ b/mm/slob.c
> @@ -65,6 +65,7 @@
>  #include <linux/module.h>
>  #include <linux/rcupdate.h>
>  #include <linux/list.h>
> +#include <linux/kmemtrace.h>
>  #include <asm/atomic.h>
>  
>  /*
> @@ -463,27 +464,38 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
>  {
>  	unsigned int *m;
>  	int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
> +	void *ret;
>  
>  	if (size < PAGE_SIZE - align) {
>  		if (!size)
>  			return ZERO_SIZE_PTR;
>  
>  		m = slob_alloc(size + align, gfp, align, node);
> +
>  		if (!m)
>  			return NULL;
>  		*m = size;
> -		return (void *)m + align;
> +		ret = (void *)m + align;
> +
> +		kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
> +					  _RET_IP_, ret,
> +					  size, size + align, gfp, node);
>  	} else {
> -		void *ret;
> +		unsigned int order = get_order(size);
>  
> -		ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node);
> +		ret = slob_new_page(gfp | __GFP_COMP, order, node);
>  		if (ret) {
>  			struct page *page;
>  			page = virt_to_page(ret);
>  			page->private = size;
>  		}
> -		return ret;
> +
> +		kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
> +					  _RET_IP_, ret,
> +					  size, PAGE_SIZE << order, gfp, node);
>  	}
> +
> +	return ret;
>  }
>  EXPORT_SYMBOL(__kmalloc_node);
>  
> @@ -501,6 +513,8 @@ void kfree(const void *block)
>  		slob_free(m, *m + align);
>  	} else
>  		put_page(&sp->page);
> +
> +	kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block);
>  }
>  EXPORT_SYMBOL(kfree);
>  
> @@ -569,10 +583,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
>  {
>  	void *b;
>  
> -	if (c->size < PAGE_SIZE)
> +	if (c->size < PAGE_SIZE) {
>  		b = slob_alloc(c->size, flags, c->align, node);
> -	else
> +		kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
> +					  _RET_IP_, b, c->size,
> +					  SLOB_UNITS(c->size) * SLOB_UNIT,
> +					  flags, node);
> +	} else {
>  		b = slob_new_page(flags, get_order(c->size), node);
> +		kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
> +					  _RET_IP_, b, c->size,
> +					  PAGE_SIZE << get_order(c->size),
> +					  flags, node);
> +	}
>  
>  	if (c->ctor)
>  		c->ctor(c, b);
> @@ -608,6 +631,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
>  	} else {
>  		__kmem_cache_free(b, c->size);
>  	}
> +
> +	kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b);
>  }
>  EXPORT_SYMBOL(kmem_cache_free);
>  

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ