[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e391df93-8afd-08fa-2e0a-48515d154a99@arm.com>
Date: Thu, 11 May 2017 08:39:08 +0100
From: Marc Zyngier <marc.zyngier@....com>
To: Alex Bennée <alex.bennee@...aro.org>,
christoffer.dall@...aro.org
Cc: kvm@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
kvmarm@...ts.cs.columbia.edu,
Zhichao Huang <zhichao.huang@...aro.org>,
Paolo Bonzini <pbonzini@...hat.com>,
Radim Krčmář <rkrcmar@...hat.com>,
Russell King <linux@...linux.org.uk>,
Vladimir Murzin <vladimir.murzin@....com>,
open list <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v1 1/2] KVM: arm: plug guest debug exploit
Hi Alex,
On 10/05/17 18:01, Alex Bennée wrote:
> From: Zhichao Huang <zhichao.huang@...aro.org>
>
> Hardware debugging in guests is not intercepted currently, it means
> that a malicious guest can bring down the entire machine by writing
> to the debug registers.
>
> This patch enable trapping of all debug registers, preventing the
> guests to access the debug registers. This includes access to the
> debug mode(DBGDSCR) in the guest world all the time which could
> otherwise mess with the host state. Reads return 0 and writes are
> ignored.
>
> The result is the guest cannot detect any working hardware based debug
> support. As debug exceptions are still routed to the guest normal
> debug using software based breakpoints still works.
>
> To support debugging using hardware registers we need to implement a
> debug register aware world switch as well as special trapping for
> registers that may affect the host state.
>
> Signed-off-by: Zhichao Huang <zhichao.huang@...aro.org>
> Signed-off-by: Alex Bennée <alex.bennee@...aro.org>
>
> ---
> ajb:
> - convert to C world switch
> - reword commit message
> ---
> arch/arm/include/asm/kvm_coproc.h | 3 +-
> arch/arm/kvm/coproc.c | 82 +++++++++++++++++++++++++++++----------
> arch/arm/kvm/handle_exit.c | 4 +-
> arch/arm/kvm/hyp/switch.c | 4 +-
> 4 files changed, 69 insertions(+), 24 deletions(-)
>
> diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
> index 4917c2f7e459..e74ab0fbab79 100644
> --- a/arch/arm/include/asm/kvm_coproc.h
> +++ b/arch/arm/include/asm/kvm_coproc.h
> @@ -31,7 +31,8 @@ void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
> int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
> int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
> int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
> -int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
> +int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
> +int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
> int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
> int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
>
> diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
> index 3e5e4194ef86..b2053393bb1f 100644
> --- a/arch/arm/kvm/coproc.c
> +++ b/arch/arm/kvm/coproc.c
> @@ -93,12 +93,6 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
> return 1;
> }
>
> -int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
> -{
> - kvm_inject_undefined(vcpu);
> - return 1;
> -}
> -
> static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
> {
> /*
> @@ -514,12 +508,8 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
> return 1;
> }
>
> -/**
> - * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
> - * @vcpu: The VCPU pointer
> - * @run: The kvm_run struct
> - */
> -int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
> +static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, struct kvm_run *run,
> + bool cp15)
> {
> struct coproc_params params;
>
> @@ -533,7 +523,35 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
> params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
> params.CRm = 0;
>
> - return emulate_cp15(vcpu, ¶ms);
> + if (cp15)
> + return emulate_cp15(vcpu, ¶ms);
> +
> + /* raz_wi cp14 */
> + (void)pm_fake(vcpu, ¶ms, NULL);
Why this (void) cast?
> +
> + /* handled */
> + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
> + return 1;
> +}
> +
> +/**
> + * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
> + * @vcpu: The VCPU pointer
> + * @run: The kvm_run struct
> + */
> +int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
> +{
> + return kvm_handle_cp_64(vcpu, run, 1);
true instead of 1?
> +}
> +
> +/**
> + * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access
> + * @vcpu: The VCPU pointer
> + * @run: The kvm_run struct
> + */
> +int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
> +{
> + return kvm_handle_cp_64(vcpu, run, 0);
false?
> }
>
> static void reset_coproc_regs(struct kvm_vcpu *vcpu,
> @@ -546,12 +564,8 @@ static void reset_coproc_regs(struct kvm_vcpu *vcpu,
> table[i].reset(vcpu, &table[i]);
> }
>
> -/**
> - * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
> - * @vcpu: The VCPU pointer
> - * @run: The kvm_run struct
> - */
> -int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
> +static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, struct kvm_run *run,
> + bool cp15)
> {
> struct coproc_params params;
>
> @@ -565,7 +579,35 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
> params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
> params.Rt2 = 0;
>
> - return emulate_cp15(vcpu, ¶ms);
> + if (cp15)
> + return emulate_cp15(vcpu, ¶ms);
> +
> + /* raz_wi cp14 */
> + (void)pm_fake(vcpu, ¶ms, NULL);
> +
> + /* handled */
> + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
> + return 1;
> +}
> +
> +/**
> + * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
> + * @vcpu: The VCPU pointer
> + * @run: The kvm_run struct
> + */
> +int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
> +{
> + return kvm_handle_cp_32(vcpu, run, 1);
> +}
> +
> +/**
> + * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access
> + * @vcpu: The VCPU pointer
> + * @run: The kvm_run struct
> + */
> +int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
> +{
> + return kvm_handle_cp_32(vcpu, run, 0);
> }
>
> /******************************************************************************
> diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
> index 96af65a30d78..42f5daf715d0 100644
> --- a/arch/arm/kvm/handle_exit.c
> +++ b/arch/arm/kvm/handle_exit.c
> @@ -95,9 +95,9 @@ static exit_handle_fn arm_exit_handlers[] = {
> [HSR_EC_WFI] = kvm_handle_wfx,
> [HSR_EC_CP15_32] = kvm_handle_cp15_32,
> [HSR_EC_CP15_64] = kvm_handle_cp15_64,
> - [HSR_EC_CP14_MR] = kvm_handle_cp14_access,
> + [HSR_EC_CP14_MR] = kvm_handle_cp14_32,
> [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
> - [HSR_EC_CP14_64] = kvm_handle_cp14_access,
> + [HSR_EC_CP14_64] = kvm_handle_cp14_64,
It feels a bit odd to have separate methods at this level, converging
onto a single one that has to apply separate treatments in
kvm_handle_cp_{32,64} depending on the CP number. I wonder if it
wouldn't be nicer to either keep the paths separate entirely, or have a
single access method that does it all.
> [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
> [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
> [HSR_EC_HVC] = handle_hvc,
> diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
> index 92678b7bd046..624a510d31df 100644
> --- a/arch/arm/kvm/hyp/switch.c
> +++ b/arch/arm/kvm/hyp/switch.c
> @@ -48,7 +48,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
> write_sysreg(HSTR_T(15), HSTR);
> write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
> val = read_sysreg(HDCR);
> - write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR);
> + val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */
> + val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */
> + write_sysreg(val, HDCR);
> }
>
> static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
>
Thanks,
M.
--
Jazz is not dead. It just smells funny...
Powered by blists - more mailing lists