[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200701151645.GA26223@lilong>
Date: Wed, 1 Jul 2020 15:16:45 +0000
From: Long Li <lonuxli.64@...il.com>
To: willy@...radead.org, cl@...ux.com, penberg@...nel.org,
rientjes@...gle.com, iamjoonsoo.kim@....com,
akpm@...ux-foundation.org
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: [PATCH v3] mm, slab: Check GFP_SLAB_BUG_MASK before alloc_pages in
kmalloc_order
kmalloc cannot allocate memory from HIGHMEM. Allocating large amounts
of memory currently bypasses the check and will simply leak the memory
when page_address() returns NULL. To fix this, factor the
GFP_SLAB_BUG_MASK check out of slab & slub, and call it from
kmalloc_order() as well. In order to make the code clear, the warning
message is put in one place.
Signed-off-by: Long Li <lonuxli.64@...il.com>
---
changes in V3:
-Put the warning message in one place
-updage the change log to be clear
mm/slab.c | 10 +++-------
mm/slab.h | 1 +
mm/slab_common.c | 17 +++++++++++++++++
mm/slub.c | 9 ++-------
4 files changed, 23 insertions(+), 14 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c
index ac7a223d9ac3..2850fe3c5fb8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2573,13 +2573,9 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
* Be lazy and only check for valid flags here, keeping it out of the
* critical path in kmem_cache_alloc().
*/
- if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
- gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
- flags &= ~GFP_SLAB_BUG_MASK;
- pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
- invalid_mask, &invalid_mask, flags, &flags);
- dump_stack();
- }
+ if (unlikely(flags & GFP_SLAB_BUG_MASK))
+ flags = kmalloc_invalid_flags(flags);
+
WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
diff --git a/mm/slab.h b/mm/slab.h
index a06f3313e4a0..ab172dca8ce2 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -90,6 +90,7 @@ void create_kmalloc_caches(slab_flags_t);
struct kmem_cache *kmalloc_slab(size_t, gfp_t);
#endif
+gfp_t kmalloc_invalid_flags(gfp_t flags);
/* Functions provided by the slab allocators */
int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index a143a8c8f874..85a16e323906 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -26,6 +26,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/kmem.h>
+#include "internal.h"
+
#include "slab.h"
enum slab_state slab_state;
@@ -805,6 +807,18 @@ void __init create_kmalloc_caches(slab_flags_t flags)
}
#endif /* !CONFIG_SLOB */
+gfp_t kmalloc_invalid_flags(gfp_t flags)
+{
+ gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
+
+ flags &= ~GFP_SLAB_BUG_MASK;
+ pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
+ invalid_mask, &invalid_mask, flags, &flags);
+ dump_stack();
+
+ return flags;
+}
+
/*
* To avoid unnecessary overhead, we pass through large allocation requests
* directly to the page allocator. We use __GFP_COMP, because we will need to
@@ -815,6 +829,9 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
void *ret = NULL;
struct page *page;
+ if (unlikely(flags & GFP_SLAB_BUG_MASK))
+ flags = kmalloc_invalid_flags(flags);
+
flags |= __GFP_COMP;
page = alloc_pages(flags, order);
if (likely(page)) {
diff --git a/mm/slub.c b/mm/slub.c
index 62d2de56549e..039045211df9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1817,13 +1817,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
{
- if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
- gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
- flags &= ~GFP_SLAB_BUG_MASK;
- pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
- invalid_mask, &invalid_mask, flags, &flags);
- dump_stack();
- }
+ if (unlikely(flags & GFP_SLAB_BUG_MASK))
+ flags = kmalloc_invalid_flags(flags);
return allocate_slab(s,
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
--
2.17.1
Powered by blists - more mailing lists