[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CA+EHjTxpHxojNdRm21hYgcWqFJCzPx3jch2bdVqZ4+2NQqNvDA@mail.gmail.com>
Date: Thu, 21 Jul 2022 10:58:32 +0100
From: Fuad Tabba <tabba@...gle.com>
To: Kalesh Singh <kaleshsingh@...gle.com>
Cc: maz@...nel.org, mark.rutland@....com, broonie@...nel.org,
madvenka@...ux.microsoft.com, will@...nel.org, qperret@...gle.com,
james.morse@....com, alexandru.elisei@....com,
suzuki.poulose@....com, catalin.marinas@....com,
andreyknvl@...il.com, vincenzo.frascino@....com,
mhiramat@...nel.org, ast@...nel.org, wangkefeng.wang@...wei.com,
elver@...gle.com, keirf@...gle.com, yuzenghui@...wei.com,
ardb@...nel.org, oupton@...gle.com,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.cs.columbia.edu,
linux-kernel@...r.kernel.org, kernel-team@...roid.com
Subject: Re: [PATCH v5 13/17] KVM: arm64: Prepare non-protected nVHE
hypervisor stacktrace
Hi Kalesh,
On Thu, Jul 21, 2022 at 6:58 AM Kalesh Singh <kaleshsingh@...gle.com> wrote:
>
> In non-protected nVHE mode (non-pKVM) the host can directly access
> hypervisor memory; and unwinding of the hypervisor stacktrace is
> done from EL1 to save on memory for shared buffers.
>
> To unwind the hypervisor stack from EL1 the host needs to know the
> starting point for the unwind and information that will allow it to
> translate hypervisor stack addresses to the corresponding kernel
> addresses. This patch sets up this book keeping. It is made use of
> later in the series.
>
> Signed-off-by: Kalesh Singh <kaleshsingh@...gle.com>
> ---
Reviewed-by: Fuad Tabba <tabba@...gle.com>
Cheers,
/fuad
>
> Changes in v5:
> - Use regular comments instead of doc comments, per Fuad
>
> arch/arm64/include/asm/kvm_asm.h | 16 ++++++++++++++++
> arch/arm64/include/asm/stacktrace/nvhe.h | 4 ++++
> arch/arm64/kvm/hyp/nvhe/stacktrace.c | 24 ++++++++++++++++++++++++
> 3 files changed, 44 insertions(+)
>
> diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
> index 2e277f2ed671..53035763e48e 100644
> --- a/arch/arm64/include/asm/kvm_asm.h
> +++ b/arch/arm64/include/asm/kvm_asm.h
> @@ -176,6 +176,22 @@ struct kvm_nvhe_init_params {
> unsigned long vtcr;
> };
>
> +/*
> + * Used by the host in EL1 to dump the nVHE hypervisor backtrace on
> + * hyp_panic() in non-protected mode.
> + *
> + * @stack_base: hyp VA of the hyp_stack base.
> + * @overflow_stack_base: hyp VA of the hyp_overflow_stack base.
> + * @fp: hyp FP where the backtrace begins.
> + * @pc: hyp PC where the backtrace begins.
> + */
> +struct kvm_nvhe_stacktrace_info {
> + unsigned long stack_base;
> + unsigned long overflow_stack_base;
> + unsigned long fp;
> + unsigned long pc;
> +};
> +
> /* Translate a kernel address @ptr into its equivalent linear mapping */
> #define kvm_ksym_ref(ptr) \
> ({ \
> diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
> index 05d7e03e0a8c..8f02803a005f 100644
> --- a/arch/arm64/include/asm/stacktrace/nvhe.h
> +++ b/arch/arm64/include/asm/stacktrace/nvhe.h
> @@ -19,6 +19,7 @@
> #ifndef __ASM_STACKTRACE_NVHE_H
> #define __ASM_STACKTRACE_NVHE_H
>
> +#include <asm/kvm_asm.h>
> #include <asm/stacktrace/common.h>
>
> /*
> @@ -52,6 +53,9 @@ static inline bool on_accessible_stack(const struct task_struct *tsk,
> * In protected mode, the unwinding is done by the hypervisor in EL2.
> */
>
> +DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
> +DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
> +
> #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
> static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
> struct stack_info *info)
> diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
> index 60461c033a04..cbd365f4f26a 100644
> --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c
> +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
> @@ -9,6 +9,28 @@
> DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
> __aligned(16);
>
> +DEFINE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
> +
> +/*
> + * hyp_prepare_backtrace - Prepare non-protected nVHE backtrace.
> + *
> + * @fp : frame pointer at which to start the unwinding.
> + * @pc : program counter at which to start the unwinding.
> + *
> + * Save the information needed by the host to unwind the non-protected
> + * nVHE hypervisor stack in EL1.
> + */
> +static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
> +{
> + struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
> + struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
> +
> + stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE);
> + stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
> + stacktrace_info->fp = fp;
> + stacktrace_info->pc = pc;
> +}
> +
> #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
> DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
>
> @@ -89,4 +111,6 @@ void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc)
> {
> if (is_protected_kvm_enabled())
> pkvm_save_backtrace(fp, pc);
> + else
> + hyp_prepare_backtrace(fp, pc);
> }
> --
> 2.37.0.170.g444d1eabd0-goog
>
Powered by blists - more mailing lists