[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20170730200029.GN5176@cbox>
Date: Sun, 30 Jul 2017 22:00:29 +0200
From: Christoffer Dall <cdall@...aro.org>
To: Jintack Lim <jintack.lim@...aro.org>
Cc: kvmarm@...ts.cs.columbia.edu, christoffer.dall@...aro.org,
marc.zyngier@....com, corbet@....net, pbonzini@...hat.com,
rkrcmar@...hat.com, linux@...linux.org.uk, catalin.marinas@....com,
will.deacon@....com, akpm@...ux-foundation.org, mchehab@...nel.org,
cov@...eaurora.org, daniel.lezcano@...aro.org,
david.daney@...ium.com, mark.rutland@....com,
suzuki.poulose@....com, stefan@...lo-penguin.com,
andy.gross@...aro.org, wcohen@...hat.com,
ard.biesheuvel@...aro.org, shankerd@...eaurora.org,
vladimir.murzin@....com, james.morse@....com,
linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org,
kvm@...r.kernel.org, linux-arm-kernel@...ts.infradead.org
Subject: Re: [RFC PATCH v2 25/38] KVM: arm64: Respect virtual CPTR_EL2.TFP
setting
On Tue, Jul 18, 2017 at 11:58:51AM -0500, Jintack Lim wrote:
> Forward traps due to FP/ASIMD register accesses to the virtual EL2 if
> virtual CPTR_EL2.TFP is set. Note that if TFP bit is set, then even
> accesses to FP/ASIMD register from EL2 as well as NS EL0/1 will trap to
> EL2. So, we don't check the VM's exception level.
>
> Signed-off-by: Jintack Lim <jintack.lim@...aro.org>
> ---
> arch/arm64/kernel/asm-offsets.c | 1 +
> arch/arm64/kvm/handle_exit.c | 15 +++++++++++----
> arch/arm64/kvm/hyp/entry.S | 13 +++++++++++++
> arch/arm64/kvm/hyp/hyp-entry.S | 2 +-
> 4 files changed, 26 insertions(+), 5 deletions(-)
>
> diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
> index b3bb7ef..f5117a3 100644
> --- a/arch/arm64/kernel/asm-offsets.c
> +++ b/arch/arm64/kernel/asm-offsets.c
> @@ -134,6 +134,7 @@ int main(void)
> DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
> DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
> DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
> + DEFINE(VIRTUAL_CPTR_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[CPTR_EL2]));
> #endif
> #ifdef CONFIG_CPU_PM
> DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
> diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
> index 25ec824..d4e7b2b 100644
> --- a/arch/arm64/kvm/handle_exit.c
> +++ b/arch/arm64/kvm/handle_exit.c
> @@ -84,11 +84,18 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
> }
>
> /*
> - * Guest access to FP/ASIMD registers are routed to this handler only
> - * when the system doesn't support FP/ASIMD.
> + * When the system supports FP/ASMID and we are NOT running nested
> + * virtualization, FP/ASMID traps are handled in EL2 directly.
> + * This handler handles the cases those are not belong to the above case.
The parser parses the cases where the sentence are not belong to the
above sentence, and then my head exploted ;)
> */
> -static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
> +static int kvm_handle_fpasimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
> {
> +
> + /* This is for nested virtualization */
> + if (vcpu_sys_reg(vcpu, CPTR_EL2) & CPTR_EL2_TFP)
> + return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_hsr(vcpu));
> +
> + /* This is the case when the system doesn't support FP/ASIMD. */
> kvm_inject_undefined(vcpu);
> return 1;
> }
> @@ -220,7 +227,7 @@ static int kvm_handle_eret(struct kvm_vcpu *vcpu, struct kvm_run *run)
> [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
> [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
> [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
> - [ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd,
> + [ESR_ELx_EC_FP_ASIMD] = kvm_handle_fpasimd,
> };
>
> static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
> diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
> index 12ee62d..95af673 100644
> --- a/arch/arm64/kvm/hyp/entry.S
> +++ b/arch/arm64/kvm/hyp/entry.S
> @@ -158,6 +158,19 @@ abort_guest_exit_end:
> 1: ret
> ENDPROC(__guest_exit)
>
> +ENTRY(__fpsimd_guest_trap)
> + // If virtual CPTR_EL2.TFP is set, then forward the trap to the
> + // virtual EL2. For the non-nested case, this bit is always 0.
> + mrs x1, tpidr_el2
> + ldr x0, [x1, #VIRTUAL_CPTR_EL2]
> + and x0, x0, #CPTR_EL2_TFP
> + cbnz x0, 1f
> + b __fpsimd_guest_restore
> +1:
> + mov x0, #ARM_EXCEPTION_TRAP
> + b __guest_exit
> +ENDPROC(__fpsimd_guest_trap)
> +
> ENTRY(__fpsimd_guest_restore)
> stp x2, x3, [sp, #-16]!
> stp x4, lr, [sp, #-16]!
> diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
> index 5170ce1..ab169fd 100644
> --- a/arch/arm64/kvm/hyp/hyp-entry.S
> +++ b/arch/arm64/kvm/hyp/hyp-entry.S
> @@ -113,7 +113,7 @@ el1_trap:
> */
> alternative_if_not ARM64_HAS_NO_FPSIMD
> cmp x0, #ESR_ELx_EC_FP_ASIMD
> - b.eq __fpsimd_guest_restore
> + b.eq __fpsimd_guest_trap
> alternative_else_nop_endif
>
> mrs x1, tpidr_el2
> --
> 1.9.1
>
Otherwise, I think this is correct.
Have you subjected your L1 and L2 VMs to a nice round of paranoia FP
testing?
Thanks,
-Christoffer
Powered by blists - more mailing lists