[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210520013539.3733631-3-swboyd@chromium.org>
Date: Wed, 19 May 2021 18:35:38 -0700
From: Stephen Boyd <swboyd@...omium.org>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-kernel@...r.kernel.org, Christoph Lameter <cl@...ux.com>,
Pekka Enberg <penberg@...nel.org>,
David Rientjes <rientjes@...gle.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
Vlastimil Babka <vbabka@...e.cz>, linux-mm@...ck.org
Subject: [PATCH 2/3] slub: Print raw pointer addresses when debugging
Obscuring the pointers that slub shows when debugging makes for some
confusing slub debug messages:
Padding overwritten. 0x0000000079f0674a-0x000000000d4dce17
Those addresses are hashed for kernel security reasons. If we're trying
to be secure with slub_debug on the commandline we have some big
problems given that we dump whole chunks of kernel memory to the kernel
logs. Let's use %px here and dump buffers with the actual address for
the buffer instead of the hashed version so that the logs are
meaningful. This also helps if a kernel address is in some slub debug
report so we can figure out that the object is referencing itself.
Signed-off-by: Stephen Boyd <swboyd@...omium.org>
---
mm/slub.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index feda53ae62ba..87eeeed1f369 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -549,7 +549,7 @@ static void print_section(char *level, char *text, u8 *addr,
unsigned int length)
{
metadata_access_enable();
- print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS,
+ print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_RAW_ADDRESS,
16, 1, addr, length, 1);
metadata_access_disable();
}
@@ -650,7 +650,7 @@ void print_tracking(struct kmem_cache *s, void *object)
static void print_page_info(struct page *page)
{
- pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%#lx(%pGp)\n",
+ pr_err("Slab 0x%px objects=%u used=%u fp=0x%px flags=%#lx(%pGp)\n",
page, page->objects, page->inuse, page->freelist,
page->flags, &page->flags);
@@ -707,7 +707,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
print_page_info(page);
- pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n",
+ pr_err("Object 0x%px @offset=%tu fp=0x%px\n\n",
p, p - addr, get_freepointer(s, p));
if (s->flags & SLAB_RED_ZONE)
@@ -777,7 +777,7 @@ static void init_object(struct kmem_cache *s, void *object, u8 val)
static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
void *from, void *to)
{
- slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
+ slab_fix(s, "Restoring 0x%px-0x%px=0x%x\n", from, to - 1, data);
memset(from, data, to - from);
}
@@ -800,7 +800,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
end--;
slab_bug(s, "%s overwritten", what);
- pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
+ pr_err("0x%px-0x%px @offset=%tu. First byte 0x%x instead of 0x%x\n",
fault, end - 1, fault - addr,
fault[0], value);
print_trailer(s, page, object);
@@ -893,7 +893,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
while (end > fault && end[-1] == POISON_INUSE)
end--;
- slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu",
+ slab_err(s, page, "Padding overwritten. 0x%px-0x%px @offset=%tu",
fault, end - 1, fault - start);
print_section(KERN_ERR, "Padding ", pad, remainder);
@@ -1041,7 +1041,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
int alloc)
{
if (s->flags & SLAB_TRACE) {
- pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
+ pr_info("TRACE %s %s 0x%px inuse=%d fp=0x%px\n",
s->name,
alloc ? "alloc" : "free",
object, page->inuse,
@@ -1186,7 +1186,7 @@ static inline int free_consistency_checks(struct kmem_cache *s,
struct page *page, void *object, unsigned long addr)
{
if (!check_valid_pointer(s, page, object)) {
- slab_err(s, page, "Invalid object pointer 0x%p", object);
+ slab_err(s, page, "Invalid object pointer 0x%px", object);
return 0;
}
@@ -1200,10 +1200,10 @@ static inline int free_consistency_checks(struct kmem_cache *s,
if (unlikely(s != page->slab_cache)) {
if (!PageSlab(page)) {
- slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
+ slab_err(s, page, "Attempt to free object(0x%px) outside of slab",
object);
} else if (!page->slab_cache) {
- pr_err("SLUB <none>: no slab for object 0x%p.\n",
+ pr_err("SLUB <none>: no slab for object 0x%px.\n",
object);
dump_stack();
} else
@@ -1263,7 +1263,7 @@ static noinline int free_debug_processing(
slab_unlock(page);
spin_unlock_irqrestore(&n->list_lock, flags);
if (!ret)
- slab_fix(s, "Object at 0x%p not freed", object);
+ slab_fix(s, "Object at 0x%px not freed", object);
return ret;
}
@@ -3908,7 +3908,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
for_each_object(p, s, addr, page->objects) {
if (!test_bit(__obj_to_index(s, addr, p), map)) {
- pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
+ pr_err("Object 0x%px @offset=%tu\n", p, p - addr);
print_tracking(s, p);
}
}
--
https://chromeos.dev
Powered by blists - more mailing lists