[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CA+EHjTxJoaeSom51fGZCtFdOo0D16GYC_QUuQ26jLKgo_pfEFw@mail.gmail.com>
Date: Thu, 21 Jul 2022 10:58:04 +0100
From: Fuad Tabba <tabba@...gle.com>
To: Kalesh Singh <kaleshsingh@...gle.com>
Cc: maz@...nel.org, mark.rutland@....com, broonie@...nel.org,
madvenka@...ux.microsoft.com, will@...nel.org, qperret@...gle.com,
james.morse@....com, alexandru.elisei@....com,
suzuki.poulose@....com, catalin.marinas@....com,
andreyknvl@...il.com, vincenzo.frascino@....com,
mhiramat@...nel.org, ast@...nel.org, wangkefeng.wang@...wei.com,
elver@...gle.com, keirf@...gle.com, yuzenghui@...wei.com,
ardb@...nel.org, oupton@...gle.com,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.cs.columbia.edu,
linux-kernel@...r.kernel.org, kernel-team@...roid.com
Subject: Re: [PATCH v5 10/17] KVM: arm64: Stub implementation of pKVM HYP
stack unwinder
Hi Kalesh,
On Thu, Jul 21, 2022 at 6:57 AM Kalesh Singh <kaleshsingh@...gle.com> wrote:
>
> Add some stub implementations of protected nVHE stack unwinder, for
> building. These are implemented later in this series.
>
> Signed-off-by: Kalesh Singh <kaleshsingh@...gle.com>
> ---
Reviewed-by: Fuad Tabba <tabba@...gle.com>
Cheers,
/fuad
>
> Changes in v5:
> - Mark unwind_next() as inline, per Marc
>
> arch/arm64/include/asm/stacktrace/nvhe.h | 59 ++++++++++++++++++++++++
> arch/arm64/kvm/hyp/nvhe/stacktrace.c | 3 +-
> 2 files changed, 60 insertions(+), 2 deletions(-)
> create mode 100644 arch/arm64/include/asm/stacktrace/nvhe.h
>
> diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
> new file mode 100644
> index 000000000000..80d71932afff
> --- /dev/null
> +++ b/arch/arm64/include/asm/stacktrace/nvhe.h
> @@ -0,0 +1,59 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/*
> + * KVM nVHE hypervisor stack tracing support.
> + *
> + * The unwinder implementation depends on the nVHE mode:
> + *
> + * 1) pKVM (protected nVHE) mode - the host cannot directly access
> + * the HYP memory. The stack is unwinded in EL2 and dumped to a shared
> + * buffer where the host can read and print the stacktrace.
> + *
> + * Copyright (C) 2022 Google LLC
> + */
> +#ifndef __ASM_STACKTRACE_NVHE_H
> +#define __ASM_STACKTRACE_NVHE_H
> +
> +#include <asm/stacktrace/common.h>
> +
> +static inline bool on_accessible_stack(const struct task_struct *tsk,
> + unsigned long sp, unsigned long size,
> + struct stack_info *info)
> +{
> + return false;
> +}
> +
> +#ifdef __KVM_NVHE_HYPERVISOR__
> +/*
> + * Protected nVHE HYP stack unwinder
> + *
> + * In protected mode, the unwinding is done by the hypervisor in EL2.
> + */
> +
> +#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
> +static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
> + struct stack_info *info)
> +{
> + return false;
> +}
> +
> +static inline int notrace unwind_next(struct unwind_state *state)
> +{
> + return 0;
> +}
> +NOKPROBE_SYMBOL(unwind_next);
> +#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
> +static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
> + struct stack_info *info)
> +{
> + return false;
> +}
> +
> +static inline int notrace unwind_next(struct unwind_state *state)
> +{
> + return 0;
> +}
> +NOKPROBE_SYMBOL(unwind_next);
> +#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
> +
> +#endif /* __KVM_NVHE_HYPERVISOR__ */
> +#endif /* __ASM_STACKTRACE_NVHE_H */
> diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
> index 69e65b457f1c..96c8b93320eb 100644
> --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c
> +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
> @@ -4,8 +4,7 @@
> *
> * Copyright (C) 2022 Google LLC
> */
> -#include <asm/memory.h>
> -#include <asm/percpu.h>
> +#include <asm/stacktrace/nvhe.h>
>
> DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
> __aligned(16);
> --
> 2.37.0.170.g444d1eabd0-goog
>
Powered by blists - more mailing lists