[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CA+EHjTw8VwFHYHzpyt+sqwo7CDhGm+F+h2+AX=5FtYcXPECGUA@mail.gmail.com>
Date: Mon, 18 Jul 2022 11:07:00 +0100
From: Fuad Tabba <tabba@...gle.com>
To: Kalesh Singh <kaleshsingh@...gle.com>
Cc: maz@...nel.org, mark.rutland@....com, broonie@...nel.org,
madvenka@...ux.microsoft.com, will@...nel.org, qperret@...gle.com,
james.morse@....com, alexandru.elisei@....com,
suzuki.poulose@....com, catalin.marinas@....com,
andreyknvl@...il.com, russell.king@...cle.com,
vincenzo.frascino@....com, mhiramat@...nel.org, ast@...nel.org,
wangkefeng.wang@...wei.com, elver@...gle.com, keirf@...gle.com,
yuzenghui@...wei.com, ardb@...nel.org, oupton@...gle.com,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.cs.columbia.edu,
linux-kernel@...r.kernel.org, kernel-team@...roid.com
Subject: Re: [PATCH v4 12/18] KVM: arm64: Save protected-nVHE (pKVM) hyp stacktrace
Hi Kalesh,
On Fri, Jul 15, 2022 at 7:11 AM Kalesh Singh <kaleshsingh@...gle.com> wrote:
>
> In protected nVHE mode, the host cannot access private owned hypervisor
> memory. Also the hypervisor aims to remains simple to reduce the attack
> surface and does not provide any printk support.
>
> For the above reasons, the approach taken to provide hypervisor stacktraces
> in protected mode is:
> 1) Unwind and save the hyp stack addresses in EL2 to a shared buffer
> with the host (done in this patch).
> 2) Delegate the dumping and symbolization of the addresses to the
> host in EL1 (later patch in the series).
>
> Signed-off-by: Kalesh Singh <kaleshsingh@...gle.com>
> ---
> arch/arm64/include/asm/stacktrace/nvhe.h | 18 ++++++
> arch/arm64/kvm/hyp/nvhe/stacktrace.c | 70 ++++++++++++++++++++++++
> 2 files changed, 88 insertions(+)
>
> diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
> index 36cf7858ddd8..456a6ae08433 100644
> --- a/arch/arm64/include/asm/stacktrace/nvhe.h
> +++ b/arch/arm64/include/asm/stacktrace/nvhe.h
> @@ -21,6 +21,22 @@
>
> #include <asm/stacktrace/common.h>
>
> +/**
> + * kvm_nvhe_unwind_init - Start an unwind from the given nVHE HYP fp and pc
> + *
> + * @fp : frame pointer at which to start the unwinding.
> + * @pc : program counter at which to start the unwinding.
> + */
> +static __always_inline void kvm_nvhe_unwind_init(struct unwind_state *state,
> + unsigned long fp,
> + unsigned long pc)
> +{
> + unwind_init_common(state, NULL);
> +
> + state->fp = fp;
> + state->pc = pc;
> +}
> +
> static inline bool on_accessible_stack(const struct task_struct *tsk,
> unsigned long sp, unsigned long size,
> struct stack_info *info)
> @@ -33,6 +49,8 @@ static inline bool on_accessible_stack(const struct task_struct *tsk,
> */
> #ifdef __KVM_NVHE_HYPERVISOR__
>
> +extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
> +
> #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
> static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
> struct stack_info *info)
> diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
> index 96c8b93320eb..832a536e440f 100644
> --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c
> +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
> @@ -11,4 +11,74 @@ DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
>
> #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
> DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
> +
> +/**
> + * pkvm_save_backtrace_entry - Saves a protected nVHE HYP stacktrace entry
> + *
> + * @arg : the position of the entry in the stacktrace buffer
> + * @where : the program counter corresponding to the stack frame
> + *
> + * Save the return address of a stack frame to the shared stacktrace buffer.
> + * The host can access this shared buffer from EL1 to dump the backtrace.
> + */
> +static bool pkvm_save_backtrace_entry(void *arg, unsigned long where)
> +{
> + unsigned long **stacktrace_pos = (unsigned long **)arg;
> + unsigned long stacktrace_start, stacktrace_end;
> +
> + stacktrace_start = (unsigned long)this_cpu_ptr(pkvm_stacktrace);
> + stacktrace_end = stacktrace_start + NVHE_STACKTRACE_SIZE - (2 * sizeof(long));
I guess this is related to my comment in patch 9, but why does the end
happen at 2 * instead of just 1 * before the actual end? I guess
because it's inclusive. That said, a comment would be helpful.
Thanks,
/fuad
> +
> + if ((unsigned long) *stacktrace_pos > stacktrace_end)
> + return false;
> +
> + /* Save the entry to the current pos in stacktrace buffer */
> + **stacktrace_pos = where;
> +
> + /* A zero entry delimits the end of the stacktrace. */
> + *(*stacktrace_pos + 1) = 0UL;
> +
> + /* Increment the current pos */
> + ++*stacktrace_pos;
> +
> + return true;
> +}
> +
> +/**
> + * pkvm_save_backtrace - Saves the protected nVHE HYP stacktrace
> + *
> + * @fp : frame pointer at which to start the unwinding.
> + * @pc : program counter at which to start the unwinding.
> + *
> + * Save the unwinded stack addresses to the shared stacktrace buffer.
> + * The host can access this shared buffer from EL1 to dump the backtrace.
> + */
> +static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
> +{
> + void *stacktrace_start = (void *)this_cpu_ptr(pkvm_stacktrace);
> + struct unwind_state state;
> +
> + kvm_nvhe_unwind_init(&state, fp, pc);
> +
> + unwind(&state, pkvm_save_backtrace_entry, &stacktrace_start);
> +}
> +#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
> +static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
> +{
> +}
> #endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
> +
> +/**
> + * kvm_nvhe_prepare_backtrace - prepare to dump the nVHE backtrace
> + *
> + * @fp : frame pointer at which to start the unwinding.
> + * @pc : program counter at which to start the unwinding.
> + *
> + * Saves the information needed by the host to dump the nVHE hypervisor
> + * backtrace.
> + */
> +void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc)
> +{
> + if (is_protected_kvm_enabled())
> + pkvm_save_backtrace(fp, pc);
> +}
> --
> 2.37.0.170.g444d1eabd0-goog
>
Powered by blists - more mailing lists