lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240226143630.33643-63-jiangshanlai@gmail.com>
Date: Mon, 26 Feb 2024 22:36:19 +0800
From: Lai Jiangshan <jiangshanlai@...il.com>
To: linux-kernel@...r.kernel.org
Cc: Hou Wenlong <houwenlong.hwl@...group.com>,
	Lai Jiangshan <jiangshan.ljs@...group.com>,
	Linus Torvalds <torvalds@...ux-foundation.org>,
	Peter Zijlstra <peterz@...radead.org>,
	Sean Christopherson <seanjc@...gle.com>,
	Thomas Gleixner <tglx@...utronix.de>,
	Borislav Petkov <bp@...en8.de>,
	Ingo Molnar <mingo@...hat.com>,
	kvm@...r.kernel.org,
	Paolo Bonzini <pbonzini@...hat.com>,
	x86@...nel.org,
	Kees Cook <keescook@...omium.org>,
	Juergen Gross <jgross@...e.com>,
	Dave Hansen <dave.hansen@...ux.intel.com>,
	"H. Peter Anvin" <hpa@...or.com>,
	David Woodhouse <dwmw@...zon.co.uk>,
	Brian Gerst <brgerst@...il.com>,
	Josh Poimboeuf <jpoimboe@...nel.org>,
	Thomas Garnier <thgarnie@...omium.org>,
	Ard Biesheuvel <ardb@...nel.org>,
	Tom Lendacky <thomas.lendacky@....com>
Subject: [RFC PATCH 62/73] x86/pvm: Add early kernel event entry and dispatch code

From: Hou Wenlong <houwenlong.hwl@...group.com>

Since PVM doesn't support IDT-based event delivery, it needs to handle
early kernel events during the booting. Currently, there are two stages
before the final IDT setup. Firstly, all exception handlers are set as
do_early_exception() in idt_setup_early_handlers(). Later, #DB, #BP, and
dispatch code.

Signed-off-by: Hou Wenlong <houwenlong.hwl@...group.com>
Signed-off-by: Lai Jiangshan <jiangshan.ljs@...group.com>
---
 arch/x86/include/asm/pvm_para.h |  5 +++++
 arch/x86/kernel/head_64.S       | 21 +++++++++++++++++++++
 arch/x86/kernel/pvm.c           | 33 +++++++++++++++++++++++++++++++++
 3 files changed, 59 insertions(+)

diff --git a/arch/x86/include/asm/pvm_para.h b/arch/x86/include/asm/pvm_para.h
index 9216e539fea8..bfb08f0ea293 100644
--- a/arch/x86/include/asm/pvm_para.h
+++ b/arch/x86/include/asm/pvm_para.h
@@ -13,6 +13,7 @@ typedef void (*idtentry_t)(struct pt_regs *regs);
 #include <uapi/asm/kvm_para.h>
 
 void __init pvm_early_setup(void);
+void __init pvm_setup_early_traps(void);
 void __init pvm_install_sysvec(unsigned int sysvec, idtentry_t handler);
 bool __init pvm_kernel_layout_relocate(void);
 
@@ -70,6 +71,10 @@ static inline void pvm_early_setup(void)
 {
 }
 
+static inline void pvm_setup_early_traps(void)
+{
+}
+
 static inline void pvm_install_sysvec(unsigned int sysvec, idtentry_t handler)
 {
 }
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 1d931bab4393..6ad3aedca7da 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -633,6 +633,27 @@ SYM_CODE_START_NOALIGN(vc_no_ghcb)
 SYM_CODE_END(vc_no_ghcb)
 #endif
 
+#ifdef CONFIG_PVM_GUEST
+	.align 256
+SYM_CODE_START_NOALIGN(pvm_early_kernel_event_entry)
+	UNWIND_HINT_ENTRY
+	ENDBR
+
+	incl	early_recursion_flag(%rip)
+
+	/* set %rcx, %r11 per PVM event handling specification */
+	movq	6*8(%rsp), %rcx
+	movq	7*8(%rsp), %r11
+
+	PUSH_AND_CLEAR_REGS
+	movq	%rsp, %rdi	/* %rdi -> pt_regs */
+	call	pvm_early_event
+
+	decl	early_recursion_flag(%rip)
+	jmp	pvm_restore_regs_and_return_to_kernel
+SYM_CODE_END(pvm_early_kernel_event_entry)
+#endif
+
 #define SYM_DATA_START_PAGE_ALIGNED(name)			\
 	SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
 
diff --git a/arch/x86/kernel/pvm.c b/arch/x86/kernel/pvm.c
index 88b013185ecd..b3b4ff0bbc91 100644
--- a/arch/x86/kernel/pvm.c
+++ b/arch/x86/kernel/pvm.c
@@ -17,6 +17,7 @@
 #include <asm/cpu_entry_area.h>
 #include <asm/desc.h>
 #include <asm/pvm_para.h>
+#include <asm/setup.h>
 #include <asm/traps.h>
 
 DEFINE_PER_CPU_PAGE_ALIGNED(struct pvm_vcpu_struct, pvm_vcpu_struct);
@@ -24,6 +25,38 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct pvm_vcpu_struct, pvm_vcpu_struct);
 unsigned long pvm_range_start __initdata;
 unsigned long pvm_range_end __initdata;
 
+static bool early_traps_setup __initdata;
+
+void __init pvm_early_event(struct pt_regs *regs)
+{
+	int vector = regs->orig_ax >> 32;
+
+	if (!early_traps_setup) {
+		do_early_exception(regs, vector);
+		return;
+	}
+
+	switch (vector) {
+	case X86_TRAP_DB:
+		exc_debug(regs);
+		return;
+	case X86_TRAP_BP:
+		exc_int3(regs);
+		return;
+	case X86_TRAP_PF:
+		exc_page_fault(regs, regs->orig_ax);
+		return;
+	default:
+		do_early_exception(regs, vector);
+		return;
+	}
+}
+
+void __init pvm_setup_early_traps(void)
+{
+	early_traps_setup = true;
+}
+
 static noinstr void pvm_bad_event(struct pt_regs *regs, unsigned long vector,
 				  unsigned long error_code)
 {
-- 
2.19.1.6.gb485710b


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ