[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <df30e873-93e4-3288-7135-24954b0f29b7@csgroup.eu>
Date: Thu, 20 Jan 2022 10:56:53 +0000
From: Christophe Leroy <christophe.leroy@...roup.eu>
To: Michael Ellerman <mpe@...erman.id.au>
CC: Benjamin Herrenschmidt <benh@...nel.crashing.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"linuxppc-dev@...ts.ozlabs.org" <linuxppc-dev@...ts.ozlabs.org>,
Maxime Bizon <mbizon@...ebox.fr>,
Paul Mackerras <paulus@...ba.org>
Subject: Re: [PATCH] powerpc/fixmap: Fix VM debug warning on unmap
Hi Michael,
ping ?
Le 06/12/2021 à 12:11, Christophe Leroy a écrit :
> Unmapping a fixmap entry is done by calling __set_fixmap()
> with FIXMAP_PAGE_CLEAR as flags.
>
> Today, powerpc __set_fixmap() calls map_kernel_page().
>
> map_kernel_page() is not happy when called a second time
> for the same page.
>
> WARNING: CPU: 0 PID: 1 at arch/powerpc/mm/pgtable.c:194 set_pte_at+0xc/0x1e8
> CPU: 0 PID: 1 Comm: swapper Not tainted 5.16.0-rc3-s3k-dev-01993-g350ff07feb7d-dirty #682
> NIP: c0017cd4 LR: c00187f0 CTR: 00000010
> REGS: e1011d50 TRAP: 0700 Not tainted (5.16.0-rc3-s3k-dev-01993-g350ff07feb7d-dirty)
> MSR: 00029032 <EE,ME,IR,DR,RI> CR: 42000208 XER: 00000000
>
> GPR00: c0165fec e1011e10 c14c0000 c0ee2550 ff800000 c0f3d000 00000000 c001686c
> GPR08: 00001000 b00045a9 00000001 c0f58460 c0f50000 00000000 c0007e10 00000000
> GPR16: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
> GPR24: 00000000 00000000 c0ee2550 00000000 c0f57000 00000ff8 00000000 ff800000
> NIP [c0017cd4] set_pte_at+0xc/0x1e8
> LR [c00187f0] map_kernel_page+0x9c/0x100
> Call Trace:
> [e1011e10] [c0736c68] vsnprintf+0x358/0x6c8 (unreliable)
> [e1011e30] [c0165fec] __set_fixmap+0x30/0x44
> [e1011e40] [c0c13bdc] early_iounmap+0x11c/0x170
> [e1011e70] [c0c06cb0] ioremap_legacy_serial_console+0x88/0xc0
> [e1011e90] [c0c03634] do_one_initcall+0x80/0x178
> [e1011ef0] [c0c0385c] kernel_init_freeable+0xb4/0x250
> [e1011f20] [c0007e34] kernel_init+0x24/0x140
> [e1011f30] [c0016268] ret_from_kernel_thread+0x5c/0x64
> Instruction dump:
> 7fe3fb78 48019689 80010014 7c630034 83e1000c 5463d97e 7c0803a6 38210010
> 4e800020 81250000 712a0001 41820008 <0fe00000> 9421ffe0 93e1001c 48000030
>
> Implement unmap_kernel_page() which clears an existing pte.
>
> Reported-by: Maxime Bizon <mbizon@...ebox.fr>
> Signed-off-by: Christophe Leroy <christophe.leroy@...roup.eu>
> ---
> arch/powerpc/include/asm/book3s/32/pgtable.h | 1 +
> arch/powerpc/include/asm/book3s/64/pgtable.h | 2 ++
> arch/powerpc/include/asm/fixmap.h | 6 ++++--
> arch/powerpc/include/asm/nohash/32/pgtable.h | 1 +
> arch/powerpc/include/asm/nohash/64/pgtable.h | 1 +
> arch/powerpc/mm/pgtable.c | 9 +++++++++
> 6 files changed, 18 insertions(+), 2 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
> index 609c80f67194..f8b94f78403f 100644
> --- a/arch/powerpc/include/asm/book3s/32/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
> @@ -178,6 +178,7 @@ static inline bool pte_user(pte_t pte)
> #ifndef __ASSEMBLY__
>
> int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
> +void unmap_kernel_page(unsigned long va);
>
> #endif /* !__ASSEMBLY__ */
>
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index 33e073d6b0c4..875730d5af40 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -1082,6 +1082,8 @@ static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t p
> return hash__map_kernel_page(ea, pa, prot);
> }
>
> +void unmap_kernel_page(unsigned long va);
> +
> static inline int __meminit vmemmap_create_mapping(unsigned long start,
> unsigned long page_size,
> unsigned long phys)
> diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
> index 947b5b9c4424..a832aeafe560 100644
> --- a/arch/powerpc/include/asm/fixmap.h
> +++ b/arch/powerpc/include/asm/fixmap.h
> @@ -111,8 +111,10 @@ static inline void __set_fixmap(enum fixed_addresses idx,
> BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
> else if (WARN_ON(idx >= __end_of_fixed_addresses))
> return;
> -
> - map_kernel_page(__fix_to_virt(idx), phys, flags);
> + if (pgprot_val(flags))
> + map_kernel_page(__fix_to_virt(idx), phys, flags);
> + else
> + unmap_kernel_page(__fix_to_virt(idx));
> }
>
> #define __early_set_fixmap __set_fixmap
> diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
> index b67742e2a9b2..d959c2a73fbf 100644
> --- a/arch/powerpc/include/asm/nohash/32/pgtable.h
> +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
> @@ -64,6 +64,7 @@ extern int icache_44x_need_flush;
> #ifndef __ASSEMBLY__
>
> int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
> +void unmap_kernel_page(unsigned long va);
>
> #endif /* !__ASSEMBLY__ */
>
> diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
> index 9d2905a47410..2225991c69b5 100644
> --- a/arch/powerpc/include/asm/nohash/64/pgtable.h
> +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
> @@ -308,6 +308,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
> #define __swp_entry_to_pte(x) __pte((x).val)
>
> int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
> +void unmap_kernel_page(unsigned long va);
> extern int __meminit vmemmap_create_mapping(unsigned long start,
> unsigned long page_size,
> unsigned long phys);
> diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
> index ce9482383144..b7385e637e3e 100644
> --- a/arch/powerpc/mm/pgtable.c
> +++ b/arch/powerpc/mm/pgtable.c
> @@ -203,6 +203,15 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
> __set_pte_at(mm, addr, ptep, pte, 0);
> }
>
> +void unmap_kernel_page(unsigned long va)
> +{
> + pmd_t *pmdp = pmd_off_k(va);
> + pte_t *ptep = pte_offset_kernel(pmdp, va);
> +
> + pte_clear(&init_mm, va, ptep);
> + flush_tlb_kernel_range(va, va + PAGE_SIZE);
> +}
> +
> /*
> * This is called when relaxing access to a PTE. It's also called in the page
> * fault path when we don't hit any of the major fault cases, ie, a minor
Powered by blists - more mailing lists