[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220427184716.1949239-4-kaleshsingh@google.com>
Date: Wed, 27 Apr 2022 11:46:58 -0700
From: Kalesh Singh <kaleshsingh@...gle.com>
To: unlisted-recipients:; (no To-header on input)
Cc: mark.rutland@....com, will@...nel.org, maz@...nel.org,
qperret@...gle.com, tabba@...gle.com, surenb@...gle.com,
kernel-team@...roid.com, Kalesh Singh <kaleshsingh@...gle.com>,
James Morse <james.morse@....com>,
Alexandru Elisei <alexandru.elisei@....com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Catalin Marinas <catalin.marinas@....com>,
Masami Hiramatsu <mhiramat@...nel.org>,
Mark Brown <broonie@...nel.org>,
Peter Collingbourne <pcc@...gle.com>,
Alexei Starovoitov <ast@...nel.org>,
"Madhavan T. Venkataraman" <madvenka@...ux.microsoft.com>,
Andrew Jones <drjones@...hat.com>,
Marco Elver <elver@...gle.com>, Keir Fraser <keirf@...gle.com>,
Kefeng Wang <wangkefeng.wang@...wei.com>,
Zenghui Yu <yuzenghui@...wei.com>,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.cs.columbia.edu,
linux-kernel@...r.kernel.org
Subject: [PATCH 3/4] KVM: arm64: Allocate shared stacktrace pages
The nVHE hypervisor can use this shared area to dump its stacktrace
addresses on hyp_panic(). Symbolization and printing the stacktrace can
then be handled by the host in EL1 (done in a later patch in this series).
Signed-off-by: Kalesh Singh <kaleshsingh@...gle.com>
---
arch/arm64/include/asm/kvm_asm.h | 1 +
arch/arm64/kvm/arm.c | 34 ++++++++++++++++++++++++++++++++
arch/arm64/kvm/hyp/nvhe/setup.c | 11 +++++++++++
3 files changed, 46 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 2e277f2ed671..ad31ac68264f 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -174,6 +174,7 @@ struct kvm_nvhe_init_params {
unsigned long hcr_el2;
unsigned long vttbr;
unsigned long vtcr;
+ unsigned long stacktrace_hyp_va;
};
/* Translate a kernel address @ptr into its equivalent linear mapping */
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index dd257d9f21a2..1b21d5a99bfc 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -50,6 +50,7 @@ DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stacktrace_page);
unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
@@ -1483,6 +1484,7 @@ static void cpu_prepare_hyp_mode(int cpu)
tcr |= (idmap_t0sz & GENMASK(TCR_TxSZ_WIDTH - 1, 0)) << TCR_T0SZ_OFFSET;
params->tcr_el2 = tcr;
+ params->stacktrace_hyp_va = kern_hyp_va(per_cpu(kvm_arm_hyp_stacktrace_page, cpu));
params->pgd_pa = kvm_mmu_get_httbr();
if (is_protected_kvm_enabled())
params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS;
@@ -1776,6 +1778,7 @@ static void teardown_hyp_mode(void)
free_hyp_pgds();
for_each_possible_cpu(cpu) {
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+ free_page(per_cpu(kvm_arm_hyp_stacktrace_page, cpu));
free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order());
}
}
@@ -1867,6 +1870,23 @@ static int init_hyp_mode(void)
per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
}
+ /*
+ * Allocate stacktrace pages for Hypervisor-mode.
+ * This is used by the hypervisor to share its stacktrace
+ * with the host on a hyp_panic().
+ */
+ for_each_possible_cpu(cpu) {
+ unsigned long stacktrace_page;
+
+ stacktrace_page = __get_free_page(GFP_KERNEL);
+ if (!stacktrace_page) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ per_cpu(kvm_arm_hyp_stacktrace_page, cpu) = stacktrace_page;
+ }
+
/*
* Allocate and initialize pages for Hypervisor-mode percpu regions.
*/
@@ -1974,6 +1994,20 @@ static int init_hyp_mode(void)
params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
}
+ /*
+ * Map the hyp stacktrace pages.
+ */
+ for_each_possible_cpu(cpu) {
+ char *stacktrace_page = (char *)per_cpu(kvm_arm_hyp_stacktrace_page, cpu);
+
+ err = create_hyp_mappings(stacktrace_page, stacktrace_page + PAGE_SIZE,
+ PAGE_HYP);
+ if (err) {
+ kvm_err("Cannot map hyp stacktrace page\n");
+ goto out_err;
+ }
+ }
+
for_each_possible_cpu(cpu) {
char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu];
char *percpu_end = percpu_begin + nvhe_percpu_size();
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index e8d4ea2fcfa0..9b81bf2d40d7 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -135,6 +135,17 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
/* Update stack_hyp_va to end of the stack's private VA range */
params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
+
+ /*
+ * Map the stacktrace pages as shared and transfer ownership to
+ * the hypervisor.
+ */
+ prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_OWNED);
+ start = (void *)params->stacktrace_hyp_va;
+ end = start + PAGE_SIZE;
+ ret = pkvm_create_mappings(start, end, prot);
+ if (ret)
+ return ret;
}
/*
--
2.36.0.rc2.479.g8af0fa9b8e-goog
Powered by blists - more mailing lists