lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 13 Aug 2019 13:06:44 +0100
From:   Will Deacon <will@...nel.org>
To:     Qian Cai <cai@....pw>
Cc:     Catalin Marinas <catalin.marinas@....com>,
        Andrey Konovalov <andreyknvl@...gle.com>,
        linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
        steve.capper@....com
Subject: Re: "arm64/for-next/core" causes boot panic

[+Steve]

On Tue, Aug 13, 2019 at 11:58:52AM +0100, Will Deacon wrote:
> On Tue, Aug 13, 2019 at 10:02:01AM +0100, Will Deacon wrote:
> > On Mon, Aug 12, 2019 at 05:51:35PM -0400, Qian Cai wrote:
> > > Booting today's linux-next on an arm64 server triggers a panic with
> > > CONFIG_KASAN_SW_TAGS=y pointing to this line,
> > 
> > Is this the only change on top of defconfig? If not, please can you share
> > your full .config?
> > 
> > > kfree()->virt_to_head_page()->compound_head()
> > > 
> > > unsigned long head = READ_ONCE(page->compound_head);
> > > 
> > > The bisect so far indicates one of those could be bad,
> > 
> > I guess that means the issue is reproducible on the arm64 for-next/core
> > branch. Once I have your .config, I'll give it a go.
> 
> FWIW, I've managed to reproduce this using defconfig + SW_TAGS on
> for-next/core, so I'll keep investigating.

Right, hacky diff below seems to resolve this, so I'll split this up into
some proper patches as there is more than one bug here.

Thanks,

Will

--->8

diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index afaf512c0e1b..541e8dcb5ab3 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -244,7 +244,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
 /*
  * The linear kernel range starts in the middle of the virtual adddress
  * space. Testing the top bit for the start of the region is a
- * sufficient check.
+ * sufficient check and avoids having to worry about the tag.
  */
 #define __is_lm_address(addr)	(!((addr) & BIT(vabits_actual - 1)))
 
@@ -252,7 +252,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
 #define __kimg_to_phys(addr)	((addr) - kimage_voffset)
 
 #define __virt_to_phys_nodebug(x) ({					\
-	phys_addr_t __x = (phys_addr_t)(x);				\
+	phys_addr_t __x = (phys_addr_t)(__tag_reset(x));		\
 	__is_lm_address(__x) ? __lm_to_phys(__x) :			\
 			       __kimg_to_phys(__x);			\
 })
@@ -301,8 +301,8 @@ static inline void *phys_to_virt(phys_addr_t x)
 #define __pa_nodebug(x)		__virt_to_phys_nodebug((unsigned long)(x))
 #define __va(x)			((void *)__phys_to_virt((phys_addr_t)(x)))
 #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
-#define virt_to_pfn(x)      __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
-#define sym_to_pfn(x)	    __phys_to_pfn(__pa_symbol(x))
+#define virt_to_pfn(x)		__phys_to_pfn(__virt_to_phys((unsigned long)(x)))
+#define sym_to_pfn(x)		__phys_to_pfn(__pa_symbol(x))
 
 /*
  *  virt_to_page(k)	convert a _valid_ virtual address to struct page *
@@ -311,7 +311,7 @@ static inline void *phys_to_virt(phys_addr_t x)
 #define ARCH_PFN_OFFSET		((unsigned long)PHYS_PFN_OFFSET)
 
 #if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
-#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
 #else
 #define __virt_to_pgoff(kaddr)	(((u64)(kaddr) - PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
 #define __page_to_voff(kaddr)	(((u64)(kaddr) - VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
@@ -324,15 +324,17 @@ static inline void *phys_to_virt(phys_addr_t x)
 	((void *)__addr_tag);						\
 })
 
-#define virt_to_page(vaddr)	((struct page *)((__virt_to_pgoff(vaddr)) + VMEMMAP_START))
+#define virt_to_page(kaddr)	({					\
+	unsigned long __addr = __tag_reset((unsigned long)kaddr);	\
+	(struct page *)((__virt_to_pgoff(__addr) + VMEMMAP_START));	\
+})
 #endif
 #endif
 
-#define _virt_addr_is_linear(kaddr)	\
-	(__tag_reset((u64)(kaddr)) >= PAGE_OFFSET)
-
-#define virt_addr_valid(kaddr)		\
-	(_virt_addr_is_linear(kaddr) && pfn_valid(virt_to_pfn(kaddr)))
+#define virt_addr_valid(kaddr)	({					\
+	unsigned long __addr = (unsigned long)kaddr;			\
+	__is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr));	\
+})
 
 /*
  * Given that the GIC architecture permits ITS implementations that can only be

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ