[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20160114130919.48254935@redhat.com>
Date: Thu, 14 Jan 2016 13:09:19 +0100
From: Jesper Dangaard Brouer <brouer@...hat.com>
To: Joonsoo Kim <js1304@...il.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Christoph Lameter <cl@...ux.com>,
Pekka Enberg <penberg@...nel.org>,
David Rientjes <rientjes@...gle.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, brouer@...hat.com
Subject: Re: [PATCH 04/16] mm/slab: activate debug_pagealloc in SLAB when it
is actually enabled
On Thu, 14 Jan 2016 14:24:17 +0900
Joonsoo Kim <js1304@...il.com> wrote:
> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@....com>
> ---
> mm/slab.c | 15 ++++++++++-----
> 1 file changed, 10 insertions(+), 5 deletions(-)
>
> diff --git a/mm/slab.c b/mm/slab.c
> index bbe4df2..4b55516 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -1838,7 +1838,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
>
> if (cachep->flags & SLAB_POISON) {
> #ifdef CONFIG_DEBUG_PAGEALLOC
> - if (cachep->size % PAGE_SIZE == 0 &&
> + if (debug_pagealloc_enabled() &&
> + cachep->size % PAGE_SIZE == 0 &&
> OFF_SLAB(cachep))
> kernel_map_pages(virt_to_page(objp),
> cachep->size / PAGE_SIZE, 1);
> @@ -2176,7 +2177,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
> * to check size >= 256. It guarantees that all necessary small
> * sized slab is initialized in current slab initialization sequence.
> */
> - if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
> + if (debug_pagealloc_enabled() &&
> + !slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
> size >= 256 && cachep->object_size > cache_line_size() &&
> ALIGN(size, cachep->align) < PAGE_SIZE) {
> cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
> @@ -2232,7 +2234,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
> * poisoning, then it's going to smash the contents of
> * the redzone and userword anyhow, so switch them off.
> */
> - if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
> + if (debug_pagealloc_enabled() &&
> + size % PAGE_SIZE == 0 && flags & SLAB_POISON)
> flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
Sorry, but I dislike the indention style here (when the if covers
several lines). Same goes for other changes in this patch. Looking,
there are several example of this indention style in the existing
mm/slab.c. Thus, I don't know if this is accepted in the MM area (it is
definitely not accepted in the NET-area).
> #endif
> }
> @@ -2716,7 +2719,8 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
> set_obj_status(page, objnr, OBJECT_FREE);
> if (cachep->flags & SLAB_POISON) {
> #ifdef CONFIG_DEBUG_PAGEALLOC
> - if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
> + if (debug_pagealloc_enabled() &&
> + (cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
> store_stackinfo(cachep, objp, caller);
> kernel_map_pages(virt_to_page(objp),
> cachep->size / PAGE_SIZE, 0);
> @@ -2861,7 +2865,8 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
> return objp;
> if (cachep->flags & SLAB_POISON) {
> #ifdef CONFIG_DEBUG_PAGEALLOC
> - if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
> + if (debug_pagealloc_enabled() &&
> + (cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
> kernel_map_pages(virt_to_page(objp),
> cachep->size / PAGE_SIZE, 1);
> else
--
Best regards,
Jesper Dangaard Brouer
MSc.CS, Principal Kernel Engineer at Red Hat
Author of http://www.iptv-analyzer.org
LinkedIn: http://www.linkedin.com/in/brouer
Powered by blists - more mailing lists