lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251110180131.28264-3-chang.seok.bae@intel.com>
Date: Mon, 10 Nov 2025 18:01:13 +0000
From: "Chang S. Bae" <chang.seok.bae@...el.com>
To: kvm@...r.kernel.org,
	linux-kernel@...r.kernel.org
Cc: pbonzini@...hat.com,
	seanjc@...gle.com,
	chao.gao@...el.com,
	zhao1.liu@...el.com,
	chang.seok.bae@...el.com
Subject: [PATCH RFC v1 02/20] KVM: x86: Refactor GPR accessors to differentiate register access types

Refactor the GPR accessors to introduce internal helpers to distinguish
between legacy and extended registers.

x86 CPUs introduce additional GPRs, but those registers will initially
remain unused in the kernel and will not be saved in KVM register cache
on every VM exit. Guest states are expected to remain live in hardware
registers.

This abstraction layer centralizes the selection of access methods,
providing a unified interface. For now, the EGPR accessors are
placeholders to be implemented later.

Signed-off-by: Chang S. Bae <chang.seok.bae@...el.com>
---
 arch/x86/include/asm/kvm_host.h      | 18 ++++++++++++
 arch/x86/include/asm/kvm_vcpu_regs.h | 16 ++++++++++
 arch/x86/kvm/fpu.h                   |  6 ++++
 arch/x86/kvm/x86.h                   | 44 ++++++++++++++++++++++++++--
 4 files changed, 82 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 48598d017d6f..940f83c121cf 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -212,6 +212,24 @@ enum {
 	VCPU_SREG_GS,
 	VCPU_SREG_TR,
 	VCPU_SREG_LDTR,
+#ifdef CONFIG_X86_64
+	VCPU_XREG_R16 = __VCPU_XREG_R16,
+	VCPU_XREG_R17 = __VCPU_XREG_R17,
+	VCPU_XREG_R18 = __VCPU_XREG_R18,
+	VCPU_XREG_R19 = __VCPU_XREG_R19,
+	VCPU_XREG_R20 = __VCPU_XREG_R20,
+	VCPU_XREG_R21 = __VCPU_XREG_R21,
+	VCPU_XREG_R22 = __VCPU_XREG_R22,
+	VCPU_XREG_R23 = __VCPU_XREG_R23,
+	VCPU_XREG_R24 = __VCPU_XREG_R24,
+	VCPU_XREG_R25 = __VCPU_XREG_R25,
+	VCPU_XREG_R26 = __VCPU_XREG_R26,
+	VCPU_XREG_R27 = __VCPU_XREG_R27,
+	VCPU_XREG_R28 = __VCPU_XREG_R28,
+	VCPU_XREG_R29 = __VCPU_XREG_R29,
+	VCPU_XREG_R30 = __VCPU_XREG_R30,
+	VCPU_XREG_R31 = __VCPU_XREG_R31,
+#endif
 };
 
 enum exit_fastpath_completion {
diff --git a/arch/x86/include/asm/kvm_vcpu_regs.h b/arch/x86/include/asm/kvm_vcpu_regs.h
index 1af2cb59233b..dd0cc171f405 100644
--- a/arch/x86/include/asm/kvm_vcpu_regs.h
+++ b/arch/x86/include/asm/kvm_vcpu_regs.h
@@ -20,6 +20,22 @@
 #define __VCPU_REGS_R13 13
 #define __VCPU_REGS_R14 14
 #define __VCPU_REGS_R15 15
+#define __VCPU_XREG_R16 16
+#define __VCPU_XREG_R17 17
+#define __VCPU_XREG_R18 18
+#define __VCPU_XREG_R19 19
+#define __VCPU_XREG_R20 20
+#define __VCPU_XREG_R21 21
+#define __VCPU_XREG_R22 22
+#define __VCPU_XREG_R23 23
+#define __VCPU_XREG_R24 24
+#define __VCPU_XREG_R25 25
+#define __VCPU_XREG_R26 26
+#define __VCPU_XREG_R27 27
+#define __VCPU_XREG_R28 28
+#define __VCPU_XREG_R29 29
+#define __VCPU_XREG_R30 30
+#define __VCPU_XREG_R31 31
 #endif
 
 #endif /* _ASM_X86_KVM_VCPU_REGS_H */
diff --git a/arch/x86/kvm/fpu.h b/arch/x86/kvm/fpu.h
index 3ba12888bf66..159239b3a651 100644
--- a/arch/x86/kvm/fpu.h
+++ b/arch/x86/kvm/fpu.h
@@ -4,6 +4,7 @@
 #define __KVM_FPU_H_
 
 #include <asm/fpu/api.h>
+#include <asm/kvm_vcpu_regs.h>
 
 typedef u32		__attribute__((vector_size(16))) sse128_t;
 #define __sse128_u	union { sse128_t vec; u64 as_u64[2]; u32 as_u32[4]; }
@@ -137,4 +138,9 @@ static inline void kvm_write_mmx_reg(int reg, const u64 *data)
 	kvm_fpu_put();
 }
 
+#ifdef CONFIG_X86_64
+static inline unsigned long kvm_read_egpr(int reg) { return 0; }
+static inline void kvm_write_egpr(int reg, unsigned long data) { }
+#endif
+
 #endif
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 4edadd64d3d5..74ae8f12b5a1 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -400,9 +400,49 @@ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
 	return false;
 }
 
+#ifdef CONFIG_X86_64
+static inline unsigned long _kvm_gpr_read(struct kvm_vcpu *vcpu, int reg)
+{
+	switch (reg) {
+	case VCPU_REGS_RAX ... VCPU_REGS_R15:
+		return kvm_register_read_raw(vcpu, reg);
+	case VCPU_XREG_R16 ... VCPU_XREG_R31:
+		return kvm_read_egpr(reg);
+	default:
+		WARN_ON_ONCE(1);
+	}
+
+	return 0;
+}
+
+static inline void _kvm_gpr_write(struct kvm_vcpu *vcpu, int reg, unsigned long val)
+{
+	switch (reg) {
+	case VCPU_REGS_RAX ... VCPU_REGS_R15:
+		kvm_register_write_raw(vcpu, reg, val);
+		break;
+	case VCPU_XREG_R16 ... VCPU_XREG_R31:
+		kvm_write_egpr(reg, val);
+		break;
+	default:
+		WARN_ON_ONCE(1);
+	}
+}
+#else
+static inline unsigned long _kvm_gpr_read(struct kvm_vcpu *vcpu, int reg)
+{
+	return kvm_register_read_raw(vcpu, reg);
+}
+
+static inline void _kvm_gpr_write(struct kvm_vcpu *vcpu, int reg, unsigned long val)
+{
+	kvm_register_write_raw(vcpu, reg, val);
+}
+#endif
+
 static inline unsigned long kvm_gpr_read(struct kvm_vcpu *vcpu, int reg)
 {
-	unsigned long val = kvm_register_read_raw(vcpu, reg);
+	unsigned long val = _kvm_gpr_read(vcpu, reg);
 
 	return is_64_bit_mode(vcpu) ? val : (u32)val;
 }
@@ -411,7 +451,7 @@ static inline void kvm_gpr_write(struct kvm_vcpu *vcpu, int reg, unsigned long v
 {
 	if (!is_64_bit_mode(vcpu))
 		val = (u32)val;
-	return kvm_register_write_raw(vcpu, reg, val);
+	_kvm_gpr_write(vcpu, reg, val);
 }
 
 static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
-- 
2.51.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ