[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201108065758.1815-3-rppt@kernel.org>
Date: Sun, 8 Nov 2020 08:57:55 +0200
From: Mike Rapoport <rppt@...nel.org>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Albert Ou <aou@...s.berkeley.edu>,
Andy Lutomirski <luto@...nel.org>,
Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Borislav Petkov <bp@...en8.de>,
Catalin Marinas <catalin.marinas@....com>,
Christian Borntraeger <borntraeger@...ibm.com>,
Christoph Lameter <cl@...ux.com>,
"David S. Miller" <davem@...emloft.net>,
Dave Hansen <dave.hansen@...ux.intel.com>,
David Hildenbrand <david@...hat.com>,
David Rientjes <rientjes@...gle.com>,
"Edgecombe, Rick P" <rick.p.edgecombe@...el.com>,
"H. Peter Anvin" <hpa@...or.com>,
Heiko Carstens <hca@...ux.ibm.com>,
Ingo Molnar <mingo@...hat.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
"Kirill A. Shutemov" <kirill@...temov.name>,
Len Brown <len.brown@...el.com>,
Michael Ellerman <mpe@...erman.id.au>,
Mike Rapoport <rppt@...nel.org>,
Mike Rapoport <rppt@...ux.ibm.com>,
Palmer Dabbelt <palmer@...belt.com>,
Paul Mackerras <paulus@...ba.org>,
Paul Walmsley <paul.walmsley@...ive.com>,
Pavel Machek <pavel@....cz>, Pekka Enberg <penberg@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
"Rafael J. Wysocki" <rjw@...ysocki.net>,
Thomas Gleixner <tglx@...utronix.de>,
Vasily Gorbik <gor@...ux.ibm.com>,
Vlastimil Babka <vbabka@...e.cz>,
Will Deacon <will@...nel.org>,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, linux-pm@...r.kernel.org,
linux-riscv@...ts.infradead.org, linux-s390@...r.kernel.org,
linuxppc-dev@...ts.ozlabs.org, sparclinux@...r.kernel.org,
x86@...nel.org
Subject: [PATCH v5 2/5] slab: debug: split slab_kernel_map() to map and unmap variants
From: Mike Rapoport <rppt@...ux.ibm.com>
Instead of using slab_kernel_map() with 'map' parameter to remap pages when
DEBUG_PAGEALLOC is enabled, use dedicated helpers slab_kernel_map() and
slab_kernel_unmap().
Signed-off-by: Mike Rapoport <rppt@...ux.ibm.com>
---
mm/slab.c | 26 +++++++++++++++-----------
1 file changed, 15 insertions(+), 11 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c
index 07317386e150..0719421d69f7 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1428,17 +1428,21 @@ static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
return false;
}
-static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map)
+static void slab_kernel_map(struct kmem_cache *cachep, void *objp)
{
if (!is_debug_pagealloc_cache(cachep))
return;
- if (map)
- debug_pagealloc_map_pages(virt_to_page(objp),
- cachep->size / PAGE_SIZE);
- else
- debug_pagealloc_unmap_pages(virt_to_page(objp),
- cachep->size / PAGE_SIZE);
+ debug_pagealloc_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE);
+}
+
+static void slab_kernel_unmap(struct kmem_cache *cachep, void *objp)
+{
+ if (!is_debug_pagealloc_cache(cachep))
+ return;
+
+ debug_pagealloc_unmap_pages(virt_to_page(objp),
+ cachep->size / PAGE_SIZE);
}
static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
@@ -1585,7 +1589,7 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
if (cachep->flags & SLAB_POISON) {
check_poison_obj(cachep, objp);
- slab_kernel_map(cachep, objp, 1);
+ slab_kernel_map(cachep, objp);
}
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
@@ -2360,7 +2364,7 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
/* need to poison the objs? */
if (cachep->flags & SLAB_POISON) {
poison_obj(cachep, objp, POISON_FREE);
- slab_kernel_map(cachep, objp, 0);
+ slab_kernel_unmap(cachep, objp);
}
}
#endif
@@ -2728,7 +2732,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
if (cachep->flags & SLAB_POISON) {
poison_obj(cachep, objp, POISON_FREE);
- slab_kernel_map(cachep, objp, 0);
+ slab_kernel_unmap(cachep, objp);
}
return objp;
}
@@ -2993,7 +2997,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
return objp;
if (cachep->flags & SLAB_POISON) {
check_poison_obj(cachep, objp);
- slab_kernel_map(cachep, objp, 1);
+ slab_kernel_map(cachep, objp);
poison_obj(cachep, objp, POISON_INUSE);
}
if (cachep->flags & SLAB_STORE_USER)
--
2.28.0
Powered by blists - more mailing lists