lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <02b15e82-5da8-468b-f51a-195594746839@csgroup.eu>
Date:   Wed, 19 Jan 2022 12:28:09 +0000
From:   Christophe Leroy <christophe.leroy@...roup.eu>
To:     Michael Ellerman <mpe@...erman.id.au>
CC:     "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
        "linuxppc-dev@...ts.ozlabs.org" <linuxppc-dev@...ts.ozlabs.org>,
        Maxime Bizon <mbizon@...ebox.fr>,
        "stable@...r.kernel.org" <stable@...r.kernel.org>,
        Russell Currey <ruscur@...sell.cc>,
        Paul Mackerras <paulus@...ba.org>,
        Benjamin Herrenschmidt <benh@...nel.crashing.org>
Subject: Re: [PATCH v3 2/2] powerpc: Add set_memory_{p/np}() and remove
 set_memory_attr()

Hi Michael,

Can we get this series in fixes as well ?

Thanks
Christophe

Le 24/12/2021 à 12:07, Christophe Leroy a écrit :
> set_memory_attr() was implemented by commit 4d1755b6a762 ("powerpc/mm:
> implement set_memory_attr()") because the set_memory_xx() couldn't
> be used at that time to modify memory "on the fly" as explained it
> the commit.
> 
> But set_memory_attr() uses set_pte_at() which leads to warnings when
> CONFIG_DEBUG_VM is selected, because set_pte_at() is unexpected for
> updating existing page table entries.
> 
> The check could be bypassed by using __set_pte_at() instead,
> as it was the case before commit c988cfd38e48 ("powerpc/32:
> use set_memory_attr()") but since commit 9f7853d7609d ("powerpc/mm:
> Fix set_memory_*() against concurrent accesses") it is now possible
> to use set_memory_xx() functions to update page table entries
> "on the fly" because the update is now atomic.
> 
> For DEBUG_PAGEALLOC we need to clear and set back _PAGE_PRESENT.
> Add set_memory_np() and set_memory_p() for that.
> 
> Replace all uses of set_memory_attr() by the relevant set_memory_xx()
> and remove set_memory_attr().
> 
> Reported-by: Maxime Bizon <mbizon@...ebox.fr>
> Fixes: c988cfd38e48 ("powerpc/32: use set_memory_attr()")
> Cc: stable@...r.kernel.org
> Depends-on: 9f7853d7609d ("powerpc/mm: Fix set_memory_*() against concurrent accesses")
> Signed-off-by: Christophe Leroy <christophe.leroy@...roup.eu>
> Reviewed-by: Russell Currey <ruscur@...sell.cc>
> Tested-by: Maxime Bizon <mbizon@...ebox.fr>
> ---
> v3: Use _PAGE_PRESENT directly as all platforms have the bit
> 
> v2: Add comment to SET_MEMORY_P and SET_MEMORY_NP
> ---
>   arch/powerpc/include/asm/set_memory.h | 12 ++++++++-
>   arch/powerpc/mm/pageattr.c            | 39 +++++----------------------
>   arch/powerpc/mm/pgtable_32.c          | 24 ++++++++---------
>   3 files changed, 28 insertions(+), 47 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
> index b040094f7920..7ebc807aa8cc 100644
> --- a/arch/powerpc/include/asm/set_memory.h
> +++ b/arch/powerpc/include/asm/set_memory.h
> @@ -6,6 +6,8 @@
>   #define SET_MEMORY_RW	1
>   #define SET_MEMORY_NX	2
>   #define SET_MEMORY_X	3
> +#define SET_MEMORY_NP	4	/* Set memory non present */
> +#define SET_MEMORY_P	5	/* Set memory present */
>   
>   int change_memory_attr(unsigned long addr, int numpages, long action);
>   
> @@ -29,6 +31,14 @@ static inline int set_memory_x(unsigned long addr, int numpages)
>   	return change_memory_attr(addr, numpages, SET_MEMORY_X);
>   }
>   
> -int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot);
> +static inline int set_memory_np(unsigned long addr, int numpages)
> +{
> +	return change_memory_attr(addr, numpages, SET_MEMORY_NP);
> +}
> +
> +static inline int set_memory_p(unsigned long addr, int numpages)
> +{
> +	return change_memory_attr(addr, numpages, SET_MEMORY_P);
> +}
>   
>   #endif
> diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
> index 8812454e70ff..85753e32a4de 100644
> --- a/arch/powerpc/mm/pageattr.c
> +++ b/arch/powerpc/mm/pageattr.c
> @@ -46,6 +46,12 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
>   	case SET_MEMORY_X:
>   		pte_update_delta(ptep, addr, _PAGE_KERNEL_RO, _PAGE_KERNEL_ROX);
>   		break;
> +	case SET_MEMORY_NP:
> +		pte_update(&init_mm, addr, ptep, _PAGE_PRESENT, 0, 0);
> +		break;
> +	case SET_MEMORY_P:
> +		pte_update(&init_mm, addr, ptep, 0, _PAGE_PRESENT, 0);
> +		break;
>   	default:
>   		WARN_ON_ONCE(1);
>   		break;
> @@ -90,36 +96,3 @@ int change_memory_attr(unsigned long addr, int numpages, long action)
>   	return apply_to_existing_page_range(&init_mm, start, size,
>   					    change_page_attr, (void *)action);
>   }
> -
> -/*
> - * Set the attributes of a page:
> - *
> - * This function is used by PPC32 at the end of init to set final kernel memory
> - * protection. It includes changing the maping of the page it is executing from
> - * and data pages it is using.
> - */
> -static int set_page_attr(pte_t *ptep, unsigned long addr, void *data)
> -{
> -	pgprot_t prot = __pgprot((unsigned long)data);
> -
> -	spin_lock(&init_mm.page_table_lock);
> -
> -	set_pte_at(&init_mm, addr, ptep, pte_modify(*ptep, prot));
> -	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
> -
> -	spin_unlock(&init_mm.page_table_lock);
> -
> -	return 0;
> -}
> -
> -int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot)
> -{
> -	unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
> -	unsigned long sz = numpages * PAGE_SIZE;
> -
> -	if (numpages <= 0)
> -		return 0;
> -
> -	return apply_to_existing_page_range(&init_mm, start, sz, set_page_attr,
> -					    (void *)pgprot_val(prot));
> -}
> diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
> index 906e4e4328b2..f71ededdc02a 100644
> --- a/arch/powerpc/mm/pgtable_32.c
> +++ b/arch/powerpc/mm/pgtable_32.c
> @@ -135,10 +135,12 @@ void mark_initmem_nx(void)
>   	unsigned long numpages = PFN_UP((unsigned long)_einittext) -
>   				 PFN_DOWN((unsigned long)_sinittext);
>   
> -	if (v_block_mapped((unsigned long)_sinittext))
> +	if (v_block_mapped((unsigned long)_sinittext)) {
>   		mmu_mark_initmem_nx();
> -	else
> -		set_memory_attr((unsigned long)_sinittext, numpages, PAGE_KERNEL);
> +	} else {
> +		set_memory_nx((unsigned long)_sinittext, numpages);
> +		set_memory_rw((unsigned long)_sinittext, numpages);
> +	}
>   }
>   
>   #ifdef CONFIG_STRICT_KERNEL_RWX
> @@ -152,18 +154,14 @@ void mark_rodata_ro(void)
>   		return;
>   	}
>   
> -	numpages = PFN_UP((unsigned long)_etext) -
> -		   PFN_DOWN((unsigned long)_stext);
> -
> -	set_memory_attr((unsigned long)_stext, numpages, PAGE_KERNEL_ROX);
>   	/*
> -	 * mark .rodata as read only. Use __init_begin rather than __end_rodata
> -	 * to cover NOTES and EXCEPTION_TABLE.
> +	 * mark .text and .rodata as read only. Use __init_begin rather than
> +	 * __end_rodata to cover NOTES and EXCEPTION_TABLE.
>   	 */
>   	numpages = PFN_UP((unsigned long)__init_begin) -
> -		   PFN_DOWN((unsigned long)__start_rodata);
> +		   PFN_DOWN((unsigned long)_stext);
>   
> -	set_memory_attr((unsigned long)__start_rodata, numpages, PAGE_KERNEL_RO);
> +	set_memory_ro((unsigned long)_stext, numpages);
>   
>   	// mark_initmem_nx() should have already run by now
>   	ptdump_check_wx();
> @@ -179,8 +177,8 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
>   		return;
>   
>   	if (enable)
> -		set_memory_attr(addr, numpages, PAGE_KERNEL);
> +		set_memory_p(addr, numpages);
>   	else
> -		set_memory_attr(addr, numpages, __pgprot(0));
> +		set_memory_np(addr, numpages);
>   }
>   #endif /* CONFIG_DEBUG_PAGEALLOC */

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ