[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210118112516.6a7lnrtbjvey4iiv@google.com>
Date: Mon, 18 Jan 2021 11:25:16 +0000
From: David Brazdil <dbrazdil@...gle.com>
To: Marc Zyngier <maz@...nel.org>
Cc: linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.cs.columbia.edu,
linux-kernel@...r.kernel.org,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexandru Elisei <alexandru.elisei@....com>,
Ard Biesheuvel <ardb@...nel.org>,
Jing Zhang <jingzhangos@...gle.com>,
Ajay Patil <pajay@....qualcomm.com>,
Prasad Sodagudi <psodagud@...eaurora.org>,
Srinivas Ramana <sramana@...eaurora.org>,
James Morse <james.morse@....com>,
Julien Thierry <julien.thierry.kdev@...il.com>,
Suzuki K Poulose <suzuki.poulose@....com>,
kernel-team@...roid.com
Subject: Re: [PATCH v4 04/21] arm64: Provide an 'upgrade to VHE' stub
hypercall
On Mon, Jan 18, 2021 at 09:45:16AM +0000, Marc Zyngier wrote:
> As we are about to change the way a VHE system boots, let's
> provide the core helper, in the form of a stub hypercall that
> enables VHE and replicates the full EL1 context at EL2, thanks
> to EL1 and VHE-EL2 being extremely similar.
>
> On exception return, the kernel carries on at EL2. Fancy!
>
> Nothing calls this new hypercall yet, so no functional change.
>
> Signed-off-by: Marc Zyngier <maz@...nel.org>
> ---
> arch/arm64/include/asm/virt.h | 7 +++-
> arch/arm64/kernel/hyp-stub.S | 67 +++++++++++++++++++++++++++++++++--
> 2 files changed, 71 insertions(+), 3 deletions(-)
>
> diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
> index ee6a48df89d9..7379f35ae2c6 100644
> --- a/arch/arm64/include/asm/virt.h
> +++ b/arch/arm64/include/asm/virt.h
> @@ -35,8 +35,13 @@
> */
> #define HVC_RESET_VECTORS 2
>
> +/*
> + * HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible
> + */
> +#define HVC_VHE_RESTART 3
> +
> /* Max number of HYP stub hypercalls */
> -#define HVC_STUB_HCALL_NR 3
> +#define HVC_STUB_HCALL_NR 4
>
> /* Error returned when an invalid stub number is passed into x0 */
> #define HVC_STUB_ERR 0xbadca11
> diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
> index 160f5881a0b7..fb12398b5c28 100644
> --- a/arch/arm64/kernel/hyp-stub.S
> +++ b/arch/arm64/kernel/hyp-stub.S
> @@ -8,9 +8,9 @@
>
> #include <linux/init.h>
> #include <linux/linkage.h>
> -#include <linux/irqchip/arm-gic-v3.h>
>
> #include <asm/assembler.h>
> +#include <asm/el2_setup.h>
> #include <asm/kvm_arm.h>
> #include <asm/kvm_asm.h>
> #include <asm/ptrace.h>
> @@ -47,10 +47,13 @@ SYM_CODE_END(__hyp_stub_vectors)
>
> SYM_CODE_START_LOCAL(el1_sync)
> cmp x0, #HVC_SET_VECTORS
> - b.ne 2f
> + b.ne 1f
> msr vbar_el2, x1
> b 9f
>
> +1: cmp x0, #HVC_VHE_RESTART
> + b.eq mutate_to_vhe
> +
> 2: cmp x0, #HVC_SOFT_RESTART
> b.ne 3f
> mov x0, x2
> @@ -70,6 +73,66 @@ SYM_CODE_START_LOCAL(el1_sync)
> eret
> SYM_CODE_END(el1_sync)
>
> +// nVHE? No way! Give me the real thing!
> +SYM_CODE_START_LOCAL(mutate_to_vhe)
> + // Sanity check: MMU *must* be off
> + mrs x0, sctlr_el2
> + tbnz x0, #0, 1f
> +
> + // Needs to be VHE capable, obviously
> + mrs x0, id_aa64mmfr1_el1
> + ubfx x0, x0, #ID_AA64MMFR1_VHE_SHIFT, #4
> + cbz x0, 1f
nit: There is a HVC_STUB_ERR that you could return if these sanity checks fail.
The documentation also states that it should be returned on error.
> +
> + // Engage the VHE magic!
> + mov_q x0, HCR_HOST_VHE_FLAGS
> + msr hcr_el2, x0
> + isb
> +
> + // Doesn't do much on VHE, but still, worth a shot
> + init_el2_state vhe
> +
> + // Use the EL1 allocated stack, per-cpu offset
> + mrs x0, sp_el1
> + mov sp, x0
> + mrs x0, tpidr_el1
> + msr tpidr_el2, x0
> +
> + // FP configuration, vectors
> + mrs_s x0, SYS_CPACR_EL12
> + msr cpacr_el1, x0
> + mrs_s x0, SYS_VBAR_EL12
> + msr vbar_el1, x0
> +
> + // Transfer the MM state from EL1 to EL2
> + mrs_s x0, SYS_TCR_EL12
> + msr tcr_el1, x0
> + mrs_s x0, SYS_TTBR0_EL12
> + msr ttbr0_el1, x0
> + mrs_s x0, SYS_TTBR1_EL12
> + msr ttbr1_el1, x0
> + mrs_s x0, SYS_MAIR_EL12
> + msr mair_el1, x0
> + isb
> +
> + // Invalidate TLBs before enabling the MMU
> + tlbi vmalle1
> + dsb nsh
> +
> + // Enable the EL2 S1 MMU, as set up from EL1
> + mrs_s x0, SYS_SCTLR_EL12
> + set_sctlr_el1 x0
> +
> + // Hack the exception return to stay at EL2
> + mrs x0, spsr_el1
> + and x0, x0, #~PSR_MODE_MASK
> + mov x1, #PSR_MODE_EL2h
> + orr x0, x0, x1
> + msr spsr_el1, x0
> +
> +1: eret
> +SYM_CODE_END(mutate_to_vhe)
> +
> .macro invalid_vector label
> SYM_CODE_START_LOCAL(\label)
> b \label
> --
> 2.29.2
>
Powered by blists - more mailing lists