lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <84144f020807170043w725769e5i7c24402613711690@mail.gmail.com>
Date:	Thu, 17 Jul 2008 10:43:14 +0300
From:	"Pekka Enberg" <penberg@...helsinki.fi>
To:	"Eduard - Gabriel Munteanu" <eduard.munteanu@...ux360.ro>
Cc:	cl@...ux-foundation.org, linux-mm@...ck.org,
	linux-kernel@...r.kernel.org, "Matt Mackall" <mpm@...enic.com>
Subject: Re: [RFC PATCH 4/4] kmemtrace: SLOB hooks.

Hi,

[Adding Matt as cc.]

On Thu, Jul 17, 2008 at 3:46 AM, Eduard - Gabriel Munteanu
<eduard.munteanu@...ux360.ro> wrote:
> This adds hooks for the SLOB allocator, to allow tracing with kmemtrace.
>
> Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@...ux360.ro>

Reviewed-by: Pekka Enberg <penberg@...helsinki.fi>

> ---
>  mm/slob.c |   37 +++++++++++++++++++++++++++++++------
>  1 files changed, 31 insertions(+), 6 deletions(-)
>
> diff --git a/mm/slob.c b/mm/slob.c
> index a3ad667..0335c01 100644
> --- a/mm/slob.c
> +++ b/mm/slob.c
> @@ -65,6 +65,7 @@
>  #include <linux/module.h>
>  #include <linux/rcupdate.h>
>  #include <linux/list.h>
> +#include <linux/kmemtrace.h>
>  #include <asm/atomic.h>
>
>  /*
> @@ -463,27 +464,38 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
>  {
>        unsigned int *m;
>        int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
> +       void *ret;
>
>        if (size < PAGE_SIZE - align) {
>                if (!size)
>                        return ZERO_SIZE_PTR;
>
>                m = slob_alloc(size + align, gfp, align, node);
> +
>                if (!m)
>                        return NULL;
>                *m = size;
> -               return (void *)m + align;
> +               ret = (void *)m + align;
> +
> +               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KERNEL,
> +                                         _RET_IP_, ret,
> +                                         size, size + align, gfp, node);
>        } else {
> -               void *ret;
> +               unsigned int order = get_order(size);
>
> -               ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node);
> +               ret = slob_new_page(gfp | __GFP_COMP, order, node);
>                if (ret) {
>                        struct page *page;
>                        page = virt_to_page(ret);
>                        page->private = size;
>                }
> -               return ret;
> +
> +               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KERNEL,
> +                                         _RET_IP_, ret,
> +                                         size, PAGE_SIZE << order, gfp, node);
>        }
> +
> +       return ret;
>  }
>  EXPORT_SYMBOL(__kmalloc_node);
>
> @@ -501,6 +513,8 @@ void kfree(const void *block)
>                slob_free(m, *m + align);
>        } else
>                put_page(&sp->page);
> +
> +       kmemtrace_mark_free(KMEMTRACE_TYPE_KERNEL, _RET_IP_, block);
>  }
>  EXPORT_SYMBOL(kfree);
>
> @@ -569,10 +583,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
>  {
>        void *b;
>
> -       if (c->size < PAGE_SIZE)
> +       if (c->size < PAGE_SIZE) {
>                b = slob_alloc(c->size, flags, c->align, node);
> -       else
> +               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
> +                                         _RET_IP_, b, c->size,
> +                                         SLOB_UNITS(c->size) * SLOB_UNIT,
> +                                         flags, node);
> +       } else {
>                b = slob_new_page(flags, get_order(c->size), node);
> +               kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
> +                                         _RET_IP_, b, c->size,
> +                                         PAGE_SIZE << get_order(c->size),
> +                                         flags, node);
> +       }
>
>        if (c->ctor)
>                c->ctor(c, b);
> @@ -608,6 +631,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
>        } else {
>                __kmem_cache_free(b, c->size);
>        }
> +
> +       kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b);
>  }
>  EXPORT_SYMBOL(kmem_cache_free);
>
> --
> 1.5.6.1
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@...r.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ