lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 24 Jun 2022 13:36:03 +0100
From:   Will Deacon <will@...nel.org>
To:     Ard Biesheuvel <ardb@...nel.org>
Cc:     linux-arm-kernel@...ts.infradead.org,
        linux-hardening@...r.kernel.org, Marc Zyngier <maz@...nel.org>,
        Mark Rutland <mark.rutland@....com>,
        Kees Cook <keescook@...omium.org>,
        Catalin Marinas <catalin.marinas@....com>,
        Mark Brown <broonie@...nel.org>,
        Anshuman Khandual <anshuman.khandual@....com>
Subject: Re: [PATCH v4 03/26] arm64: head: move assignment of idmap_t0sz to C
 code

On Mon, Jun 13, 2022 at 04:45:27PM +0200, Ard Biesheuvel wrote:
> Setting idmap_t0sz involves fiddling with the caches if done with the
> MMU off. Since we will be creating an initial ID map with the MMU and
> caches off, and the permanent ID map with the MMU and caches on, let's
> move this assignment of idmap_t0sz out of the startup code, and replace
> it with a macro that simply issues the three instructions needed to
> calculate the value wherever it is needed before the MMU is turned on.
> 
> Signed-off-by: Ard Biesheuvel <ardb@...nel.org>
> ---
>  arch/arm64/include/asm/assembler.h   | 14 ++++++++++++++
>  arch/arm64/include/asm/mmu_context.h |  2 +-
>  arch/arm64/kernel/head.S             | 13 +------------
>  arch/arm64/mm/mmu.c                  |  5 ++++-
>  arch/arm64/mm/proc.S                 |  2 +-
>  5 files changed, 21 insertions(+), 15 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
> index 8c5a61aeaf8e..9468f45c07a6 100644
> --- a/arch/arm64/include/asm/assembler.h
> +++ b/arch/arm64/include/asm/assembler.h
> @@ -359,6 +359,20 @@ alternative_cb_end
>  	bfi	\valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
>  	.endm
>  
> +/*
> + * idmap_get_t0sz - get the T0SZ value needed to cover the ID map
> + *
> + * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
> + * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
> + * this number conveniently equals the number of leading zeroes in
> + * the physical address of _end.
> + */
> +	.macro	idmap_get_t0sz, reg
> +	adrp	\reg, _end
> +	orr	\reg, \reg, #(1 << VA_BITS_MIN) - 1
> +	clz	\reg, \reg
> +	.endm
> +
>  /*
>   * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
>   * ID_AA64MMFR0_EL1.PARange value
> diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
> index 6770667b34a3..6ac0086ebb1a 100644
> --- a/arch/arm64/include/asm/mmu_context.h
> +++ b/arch/arm64/include/asm/mmu_context.h
> @@ -60,7 +60,7 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
>   * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
>   * physical memory, in which case it will be smaller.
>   */
> -extern u64 idmap_t0sz;
> +extern int idmap_t0sz;
>  extern u64 idmap_ptrs_per_pgd;
>  
>  /*
> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index dc07858eb673..7f361bc72d12 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S
> @@ -299,22 +299,11 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
>  	 * physical address space. So for the ID map, use an extended virtual
>  	 * range in that case, and configure an additional translation level
>  	 * if needed.
> -	 *
> -	 * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
> -	 * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
> -	 * this number conveniently equals the number of leading zeroes in
> -	 * the physical address of __idmap_text_end.
>  	 */
> -	adrp	x5, __idmap_text_end
> -	clz	x5, x5
> +	idmap_get_t0sz x5
>  	cmp	x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
>  	b.ge	1f			// .. then skip VA range extension
>  
> -	adr_l	x6, idmap_t0sz
> -	str	x5, [x6]
> -	dmb	sy
> -	dc	ivac, x6		// Invalidate potentially stale cache line
> -
>  #if (VA_BITS < 48)
>  #define EXTRA_SHIFT	(PGDIR_SHIFT + PAGE_SHIFT - 3)
>  #define EXTRA_PTRS	(1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT))
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 17b339c1a326..103bf4ae408d 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -43,7 +43,7 @@
>  #define NO_CONT_MAPPINGS	BIT(1)
>  #define NO_EXEC_MAPPINGS	BIT(2)	/* assumes FEAT_HPDS is not used */
>  
> -u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
> +int idmap_t0sz __ro_after_init;
>  u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
>  
>  #if VA_BITS > 48
> @@ -785,6 +785,9 @@ void __init paging_init(void)
>  			       (u64)&vabits_actual + sizeof(vabits_actual));
>  #endif
>  
> +	idmap_t0sz = min(63UL - __fls(__pa_symbol(_end)),
> +			 TCR_T0SZ(VA_BITS_MIN));

nit: TCR_T0SZ shifts by TCR_T0SZ_OFFSET, so this is a bit confusing and
works out because the register offset happens to be zero. Maybe it would
be clearer to calculate the maximum of fls(__pa_symbol(_end)) and
VA_BITS_MIN, then subtract that from 64?

Will

Powered by blists - more mailing lists