lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening PHC | |
Open Source and information security mailing list archives
| ||
|
Date: Fri, 27 Apr 2018 16:18:06 +0100 From: Suzuki K Poulose <Suzuki.Poulose@....com> To: Julien Grall <julien.grall@....com>, linux-arm-kernel@...ts.infradead.org Cc: ard.biesheuvel@...aro.org, kvm@...r.kernel.org, marc.zyngier@....com, catalin.marinas@....com, punit.agrawal@....com, will.deacon@....com, linux-kernel@...r.kernel.org, kristina.martsenko@....com, pbonzini@...hat.com, kvmarm@...ts.cs.columbia.edu Subject: Re: [PATCH v2 05/17] arm64: Helper for parange to PASize On 26/04/18 11:58, Julien Grall wrote: > Hi Suzuki, > > On 27/03/18 14:15, Suzuki K Poulose wrote: >> Add a helper to convert ID_AA64MMFR0_EL1:PARange to they physical >> size shift. Limit the size to the maximum supported by the kernel. >> We are about to move the user of this code and this helps to >> keep the changes cleaner. > > It is probably worth to mention that you are also adding 52-bit support in the patch. Sure, will do. Can I take that as a Reviewed-by with the fixed commit description ? Cheers Suzuki > > Cheers, > >> >> Cc: Mark Rutland <mark.rutland@....com> >> Cc: Catalin Marinas <catalin.marinas@....com> >> Cc: Will Deacon <will.deacon@....com> >> Cc: Marc Zyngier <marc.zyngier@....com> >> Cc: Christoffer Dall <cdall@...nel.org> >> Signed-off-by: Suzuki K Poulose <suzuki.poulose@....com> >> --- >> arch/arm64/include/asm/cpufeature.h | 16 ++++++++++++++++ >> arch/arm64/kvm/hyp/s2-setup.c | 28 +++++----------------------- >> 2 files changed, 21 insertions(+), 23 deletions(-) >> >> diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h >> index fbf0aab..1f2a5dd 100644 >> --- a/arch/arm64/include/asm/cpufeature.h >> +++ b/arch/arm64/include/asm/cpufeature.h >> @@ -311,6 +311,22 @@ static inline u64 read_zcr_features(void) >> return zcr; >> } >> +static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) >> +{ >> + switch (parange) { >> + case 0: return 32; >> + case 1: return 36; >> + case 2: return 40; >> + case 3: return 42; >> + case 4: return 44; >> + /* Report 48 bit if the kernel doesn't support 52bit */ >> + default: >> + case 5: return 48; >> +#ifdef CONFIG_ARM64_PA_BITS_52 >> + case 6: return 52; >> +#endif >> + } >> +} >> #endif /* __ASSEMBLY__ */ >> #endif >> diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c >> index 603e1ee..b1129c8 100644 >> --- a/arch/arm64/kvm/hyp/s2-setup.c >> +++ b/arch/arm64/kvm/hyp/s2-setup.c >> @@ -19,11 +19,13 @@ >> #include <asm/kvm_arm.h> >> #include <asm/kvm_asm.h> >> #include <asm/kvm_hyp.h> >> +#include <asm/cpufeature.h> >> u32 __hyp_text __init_stage2_translation(void) >> { >> u64 val = VTCR_EL2_FLAGS; >> u64 parange; >> + u32 phys_shift; >> u64 tmp; >> /* >> @@ -37,27 +39,7 @@ u32 __hyp_text __init_stage2_translation(void) >> val |= parange << 16; >> /* Compute the actual PARange... */ >> - switch (parange) { >> - case 0: >> - parange = 32; >> - break; >> - case 1: >> - parange = 36; >> - break; >> - case 2: >> - parange = 40; >> - break; >> - case 3: >> - parange = 42; >> - break; >> - case 4: >> - parange = 44; >> - break; >> - case 5: >> - default: >> - parange = 48; >> - break; >> - } >> + phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange); >> /* >> * ... and clamp it to 40 bits, unless we have some braindead >> @@ -65,7 +47,7 @@ u32 __hyp_text __init_stage2_translation(void) >> * return that value for the rest of the kernel to decide what >> * to do. >> */ >> - val |= 64 - (parange > 40 ? 40 : parange); >> + val |= 64 - (phys_shift > 40 ? 40 : phys_shift); >> /* >> * Check the availability of Hardware Access Flag / Dirty Bit >> @@ -86,5 +68,5 @@ u32 __hyp_text __init_stage2_translation(void) >> write_sysreg(val, vtcr_el2); >> - return parange; >> + return phys_shift; >> } >> >
Powered by blists - more mailing lists