[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251110180131.28264-18-chang.seok.bae@intel.com>
Date: Mon, 10 Nov 2025 18:01:28 +0000
From: "Chang S. Bae" <chang.seok.bae@...el.com>
To: kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: pbonzini@...hat.com,
seanjc@...gle.com,
chao.gao@...el.com,
zhao1.liu@...el.com,
chang.seok.bae@...el.com
Subject: [PATCH RFC v1 17/20] KVM: x86: Prepare APX state setting in XCR0
Prepare the APX state enabling in XCR0 by implementing the previous
placeholders and ensuring its readiness.
APX introduces EGPRs, tracked as XSTATE component 19. Like other
XSAVE-managed states, EGPR availability is controlled through XCR0, and
the registers are accessible only in 64-bit mode.
At this point, only VMX supports EGPRs. SVM will require corresponding
extensions to handle EGPR indices.
The addition to the supported XCR0 mask should accompany guest CPUID
exposure, which will be done separately.
Signed-off-by: Chang S. Bae <chang.seok.bae@...el.com>
---
RFC note
Not all callers may need to validate the XCR0 bit -- maybe a capability
bit. However, every exit associated with EGPRs should already have that
control bit set in the first place. Checking it explicitly does not
charge additional cost, so I have this for consistency.
---
arch/x86/kvm/emulate.c | 9 +++++++--
arch/x86/kvm/kvm_cache_regs.h | 1 +
arch/x86/kvm/kvm_emulate.h | 1 +
arch/x86/kvm/svm/svm.c | 7 ++++++-
arch/x86/kvm/vmx/vmx.h | 9 ++++++++-
arch/x86/kvm/x86.c | 11 +++++++++++
6 files changed, 34 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index f9381a4055d6..ba3020e6f469 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -4787,9 +4787,14 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
return rc;
}
-static inline bool emul_egpr_enabled(struct x86_emulate_ctxt *ctxt __maybe_unused)
+/* EGPR availability is controlled by the APX feature bit in XCR0. */
+static inline bool emul_egpr_enabled(struct x86_emulate_ctxt *ctxt)
{
- return false;
+ u64 xcr0;
+
+ ctxt->ops->get_xcr(ctxt, XCR_XFEATURE_ENABLED_MASK, &xcr0);
+
+ return xcr0 & XFEATURE_MASK_APX;
}
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 8ddb01191d6f..acdb3751317c 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -3,6 +3,7 @@
#define ASM_KVM_CACHE_REGS_H
#include <linux/kvm_host.h>
+#include <asm/fpu/xcr.h>
#define KVM_POSSIBLE_CR0_GUEST_BITS (X86_CR0_TS | X86_CR0_WP)
#define KVM_POSSIBLE_CR4_GUEST_BITS \
diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index cc16211d61f6..673a82532c78 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -237,6 +237,7 @@ struct x86_emulate_ops {
bool (*is_smm)(struct x86_emulate_ctxt *ctxt);
int (*leave_smm)(struct x86_emulate_ctxt *ctxt);
void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
+ int (*get_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 *xcr);
int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
gva_t (*get_untagged_addr)(struct x86_emulate_ctxt *ctxt, gva_t addr,
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 3aa2c37754ef..e6a082686000 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -5288,8 +5288,13 @@ static __init int svm_hardware_setup(void)
}
kvm_enable_efer_bits(EFER_NX);
+ /*
+ * APX introduces EGPRs, which require additional VMCB support.
+ * Disable APX until the necessary extensions are handled.
+ */
kvm_caps.supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS |
- XFEATURE_MASK_BNDCSR);
+ XFEATURE_MASK_BNDCSR |
+ XFEATURE_MASK_APX);
if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
kvm_enable_efer_bits(EFER_FFXSR);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 6cf1eb739caf..784aa0504dce 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -372,7 +372,14 @@ struct vmx_insn_info {
union insn_info info;
};
-static inline bool vmx_egpr_enabled(struct kvm_vcpu *vcpu __maybe_unused) { return false; }
+/*
+ * EGPR availability is controlled by the APX xfeature bit in XCR0 and is
+ * only accessible in 64-bit mode.
+ */
+static inline bool vmx_egpr_enabled(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.xcr0 & XFEATURE_MASK_APX && is_64_bit_mode(vcpu);
+}
static inline struct vmx_insn_info vmx_get_insn_info(struct kvm_vcpu *vcpu)
{
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4c8c2fc3bda6..e087db0f4153 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8843,6 +8843,16 @@ static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt));
}
+static int emulator_get_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 *xcr)
+{
+ /* Only support XCR_XFEATURE_ENABLED_MASK now */
+ if (index != XCR_XFEATURE_ENABLED_MASK)
+ return 1;
+
+ *xcr = emul_to_vcpu(ctxt)->arch.xcr0;
+ return 0;
+}
+
static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr)
{
return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr);
@@ -8915,6 +8925,7 @@ static const struct x86_emulate_ops emulate_ops = {
.is_smm = emulator_is_smm,
.leave_smm = emulator_leave_smm,
.triple_fault = emulator_triple_fault,
+ .get_xcr = emulator_get_xcr,
.set_xcr = emulator_set_xcr,
.get_untagged_addr = emulator_get_untagged_addr,
.is_canonical_addr = emulator_is_canonical_addr,
--
2.51.0
Powered by blists - more mailing lists