[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200908123517.GA3764@8bytes.org>
Date: Tue, 8 Sep 2020 14:35:17 +0200
From: Joerg Roedel <joro@...tes.org>
To: x86@...nel.org
Cc: Joerg Roedel <jroedel@...e.de>, hpa@...or.com,
Andy Lutomirski <luto@...nel.org>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Peter Zijlstra <peterz@...radead.org>,
Jiri Slaby <jslaby@...e.cz>,
Dan Williams <dan.j.williams@...el.com>,
Tom Lendacky <thomas.lendacky@....com>,
Juergen Gross <jgross@...e.com>,
Kees Cook <keescook@...omium.org>,
David Rientjes <rientjes@...gle.com>,
Cfir Cohen <cfir@...gle.com>,
Erdem Aktas <erdemaktas@...gle.com>,
Masami Hiramatsu <mhiramat@...nel.org>,
Mike Stunes <mstunes@...are.com>,
Sean Christopherson <sean.j.christopherson@...el.com>,
Martin Radev <martin.b.radev@...il.com>,
linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
virtualization@...ts.linux-foundation.org
Subject: [PATCH v7.2 39/74] x86/sev-es: Setup early #VC handler
From: Joerg Roedel <jroedel@...e.de>
Setup an early handler for #VC exceptions. There is no GHCB mapped
yet, so just re-use the vc_no_ghcb_handler. It can only handle CPUID
exit-codes, but that should be enough to get the kernel through
verify_cpu() and __startup_64() until it runs on virtual addresses.
Signed-off-by: Joerg Roedel <jroedel@...e.de>
---
arch/x86/include/asm/sev-es.h | 3 +++
arch/x86/kernel/head64.c | 25 ++++++++++++++++++++++++-
arch/x86/kernel/head_64.S | 30 ++++++++++++++++++++++++++++++
3 files changed, 57 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/sev-es.h b/arch/x86/include/asm/sev-es.h
index 6dc52440c4b4..7175d432ebfe 100644
--- a/arch/x86/include/asm/sev-es.h
+++ b/arch/x86/include/asm/sev-es.h
@@ -73,4 +73,7 @@ static inline u64 lower_bits(u64 val, unsigned int bits)
return (val & mask);
}
+/* Early IDT entry points for #VC handler */
+extern void vc_no_ghcb(void);
+
#endif
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 4282dac694c3..fc55cc9ccb0f 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -40,6 +40,7 @@
#include <asm/desc.h>
#include <asm/extable.h>
#include <asm/trapnr.h>
+#include <asm/sev-es.h>
/*
* Manage page tables very early on.
@@ -540,12 +541,34 @@ static struct desc_ptr bringup_idt_descr = {
.address = 0, /* Set at runtime */
};
+static void set_bringup_idt_handler(gate_desc *idt, int n, void *handler)
+{
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ struct idt_data data;
+ gate_desc desc;
+
+ init_idt_data(&data, n, handler);
+ idt_init_desc(&desc, &data);
+ native_write_idt_entry(idt, n, &desc);
+#endif
+}
+
/* This runs while still in the direct mapping */
static void startup_64_load_idt(unsigned long physbase)
{
struct desc_ptr *desc = fixup_pointer(&bringup_idt_descr, physbase);
+ gate_desc *idt = fixup_pointer(bringup_idt_table, physbase);
+
+
+ if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
+ void *handler;
+
+ /* VMM Communication Exception */
+ handler = fixup_pointer(vc_no_ghcb, physbase);
+ set_bringup_idt_handler(idt, X86_TRAP_VC, handler);
+ }
- desc->address = (unsigned long)fixup_pointer(bringup_idt_table, physbase);
+ desc->address = (unsigned long)idt;
native_load_idt(desc);
}
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 3b40ec44a67d..6e68bca64ae4 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -348,6 +348,36 @@ SYM_CODE_START_LOCAL(early_idt_handler_common)
jmp restore_regs_and_return_to_kernel
SYM_CODE_END(early_idt_handler_common)
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+/*
+ * VC Exception handler used during very early boot. The
+ * early_idt_handler_array can't be used because it returns via the
+ * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early.
+ *
+ * This handler will end up in the .init.text section and not be
+ * available to boot secondary CPUs.
+ */
+SYM_CODE_START_NOALIGN(vc_no_ghcb)
+ UNWIND_HINT_IRET_REGS offset=8
+
+ /* Build pt_regs */
+ PUSH_AND_CLEAR_REGS
+
+ /* Call C handler */
+ movq %rsp, %rdi
+ movq ORIG_RAX(%rsp), %rsi
+ call do_vc_no_ghcb
+
+ /* Unwind pt_regs */
+ POP_REGS
+
+ /* Remove Error Code */
+ addq $8, %rsp
+
+ /* Pure iret required here - don't use INTERRUPT_RETURN */
+ iretq
+SYM_CODE_END(vc_no_ghcb)
+#endif
#define SYM_DATA_START_PAGE_ALIGNED(name) \
SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
--
2.28.0
Powered by blists - more mailing lists