[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aS6H/vIdKA/rLOxu@intel.com>
Date: Tue, 2 Dec 2025 14:32:30 +0800
From: Chao Gao <chao.gao@...el.com>
To: "Xin Li (Intel)" <xin@...or.com>
CC: <linux-kernel@...r.kernel.org>, <kvm@...r.kernel.org>,
<linux-doc@...r.kernel.org>, <pbonzini@...hat.com>, <seanjc@...gle.com>,
<corbet@....net>, <tglx@...utronix.de>, <mingo@...hat.com>, <bp@...en8.de>,
<dave.hansen@...ux.intel.com>, <x86@...nel.org>, <hpa@...or.com>,
<luto@...nel.org>, <peterz@...radead.org>, <andrew.cooper3@...rix.com>,
<hch@...radead.org>, <sohil.mehta@...el.com>
Subject: Re: [PATCH v9 19/22] KVM: nVMX: Handle FRED VMCS fields in nested
VMX context
>diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
>index f390f9f883c3..5eba2530ffb4 100644
>--- a/arch/x86/kvm/vmx/capabilities.h
>+++ b/arch/x86/kvm/vmx/capabilities.h
>@@ -80,6 +80,11 @@ static inline bool cpu_has_vmx_basic_no_hw_errcode_cc(void)
> return vmcs_config.basic & VMX_BASIC_NO_HW_ERROR_CODE_CC;
> }
>
>+static inline bool cpu_has_vmx_nested_exception(void)
>+{
>+ return vmcs_config.basic & VMX_BASIC_NESTED_EXCEPTION;
>+}
>+
> static inline bool cpu_has_virtual_nmis(void)
> {
> return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS &&
>diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
>index cbb682424a5b..63cdfffba58b 100644
>--- a/arch/x86/kvm/vmx/nested.c
>+++ b/arch/x86/kvm/vmx/nested.c
>@@ -708,6 +708,9 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
>
> nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
> MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
>+
>+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
>+ MSR_IA32_FRED_RSP0, MSR_TYPE_RW);
Why is only this specific MSR handled? What about other FRED MSRs?
> #endif
> nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
> MSR_IA32_SPEC_CTRL, MSR_TYPE_RW);
>@@ -1294,9 +1297,11 @@ static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
> const u64 feature_bits = VMX_BASIC_DUAL_MONITOR_TREATMENT |
> VMX_BASIC_INOUT |
> VMX_BASIC_TRUE_CTLS |
>- VMX_BASIC_NO_HW_ERROR_CODE_CC;
>+ VMX_BASIC_NO_HW_ERROR_CODE_CC |
>+ VMX_BASIC_NESTED_EXCEPTION;
>
>- const u64 reserved_bits = GENMASK_ULL(63, 57) |
>+ const u64 reserved_bits = GENMASK_ULL(63, 59) |
>+ BIT_ULL(57) |
> GENMASK_ULL(47, 45) |
> BIT_ULL(31);
>
>@@ -2539,6 +2544,8 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
> vmcs12->vm_entry_instruction_len);
> vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
> vmcs12->guest_interruptibility_info);
>+ if (cpu_has_vmx_fred())
>+ vmcs_write64(INJECTED_EVENT_DATA, vmcs12->injected_event_data);
> vmx->loaded_vmcs->nmi_known_unmasked =
> !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
> } else {
>@@ -2693,6 +2700,18 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
> vmcs12->guest_ssp, vmcs12->guest_ssp_tbl);
>
> set_cr4_guest_host_mask(vmx);
>+
>+ if (guest_cpu_cap_has(&vmx->vcpu, X86_FEATURE_FRED) &&
>+ nested_cpu_load_guest_fred_state(vmcs12)) {
>+ vmcs_write64(GUEST_IA32_FRED_CONFIG, vmcs12->guest_ia32_fred_config);
>+ vmcs_write64(GUEST_IA32_FRED_RSP1, vmcs12->guest_ia32_fred_rsp1);
>+ vmcs_write64(GUEST_IA32_FRED_RSP2, vmcs12->guest_ia32_fred_rsp2);
>+ vmcs_write64(GUEST_IA32_FRED_RSP3, vmcs12->guest_ia32_fred_rsp3);
>+ vmcs_write64(GUEST_IA32_FRED_STKLVLS, vmcs12->guest_ia32_fred_stklvls);
>+ vmcs_write64(GUEST_IA32_FRED_SSP1, vmcs12->guest_ia32_fred_ssp1);
>+ vmcs_write64(GUEST_IA32_FRED_SSP2, vmcs12->guest_ia32_fred_ssp2);
>+ vmcs_write64(GUEST_IA32_FRED_SSP3, vmcs12->guest_ia32_fred_ssp3);
...
>+ }
> }
>
> /*
>@@ -2759,6 +2778,18 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
> vmcs_write64(GUEST_IA32_PAT, vcpu->arch.pat);
> }
>
>+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_FRED) &&
>+ (!vmx->nested.nested_run_pending || !nested_cpu_load_guest_fred_state(vmcs12))) {
>+ vmcs_write64(GUEST_IA32_FRED_CONFIG, vmx->nested.pre_vmenter_fred_config);
>+ vmcs_write64(GUEST_IA32_FRED_RSP1, vmx->nested.pre_vmenter_fred_rsp1);
>+ vmcs_write64(GUEST_IA32_FRED_RSP2, vmx->nested.pre_vmenter_fred_rsp2);
>+ vmcs_write64(GUEST_IA32_FRED_RSP3, vmx->nested.pre_vmenter_fred_rsp3);
>+ vmcs_write64(GUEST_IA32_FRED_STKLVLS, vmx->nested.pre_vmenter_fred_stklvls);
>+ vmcs_write64(GUEST_IA32_FRED_SSP1, vmx->nested.pre_vmenter_fred_ssp1);
>+ vmcs_write64(GUEST_IA32_FRED_SSP2, vmx->nested.pre_vmenter_fred_ssp2);
>+ vmcs_write64(GUEST_IA32_FRED_SSP3, vmx->nested.pre_vmenter_fred_ssp3);
Would it be clearer to add two helpers to read/write FRED VMCS fields? e.g., (compile test only)
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index c8edbe9c7e00..b709f4cdcba3 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2614,6 +2614,30 @@ static void vmcs_write_cet_state(struct kvm_vcpu *vcpu, u64 s_cet,
}
}
+static void vmcs_read_fred_msrs(struct fred_msrs *msrs)
+{
+ msrs->fred_config = vmcs_read64(GUEST_IA32_FRED_CONFIG);
+ msrs->fred_rsp1 = vmcs_read64(GUEST_IA32_FRED_RSP1);
+ msrs->fred_rsp2 = vmcs_read64(GUEST_IA32_FRED_RSP2);
+ msrs->fred_rsp3 = vmcs_read64(GUEST_IA32_FRED_RSP3);
+ msrs->fred_stklvls = vmcs_read64(GUEST_IA32_FRED_STKLVLS);
+ msrs->fred_ssp1 = vmcs_read64(GUEST_IA32_FRED_SSP1);
+ msrs->fred_ssp2 = vmcs_read64(GUEST_IA32_FRED_SSP2);
+ msrs->fred_ssp3 = vmcs_read64(GUEST_IA32_FRED_SSP3);
+}
+
+static void vmcs_write_fred_msrs(struct fred_msrs *msrs)
+{
+ vmcs_write64(GUEST_IA32_FRED_CONFIG, msrs->fred_config);
+ vmcs_write64(GUEST_IA32_FRED_RSP1, msrs->fred_rsp1);
+ vmcs_write64(GUEST_IA32_FRED_RSP2, msrs->fred_rsp2);
+ vmcs_write64(GUEST_IA32_FRED_RSP3, msrs->fred_rsp3);
+ vmcs_write64(GUEST_IA32_FRED_STKLVLS, msrs->fred_stklvls);
+ vmcs_write64(GUEST_IA32_FRED_SSP1, msrs->fred_ssp1);
+ vmcs_write64(GUEST_IA32_FRED_SSP2, msrs->fred_ssp2);
+ vmcs_write64(GUEST_IA32_FRED_SSP3, msrs->fred_ssp3);
+}
+
static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
{
struct hv_enlightened_vmcs *hv_evmcs = nested_vmx_evmcs(vmx);
@@ -2736,16 +2760,8 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
set_cr4_guest_host_mask(vmx);
- if (nested_cpu_load_guest_fred_state(vmcs12)) {
- vmcs_write64(GUEST_IA32_FRED_CONFIG, vmcs12->guest_ia32_fred_config);
- vmcs_write64(GUEST_IA32_FRED_RSP1, vmcs12->guest_ia32_fred_rsp1);
- vmcs_write64(GUEST_IA32_FRED_RSP2, vmcs12->guest_ia32_fred_rsp2);
- vmcs_write64(GUEST_IA32_FRED_RSP3, vmcs12->guest_ia32_fred_rsp3);
- vmcs_write64(GUEST_IA32_FRED_STKLVLS, vmcs12->guest_ia32_fred_stklvls);
- vmcs_write64(GUEST_IA32_FRED_SSP1, vmcs12->guest_ia32_fred_ssp1);
- vmcs_write64(GUEST_IA32_FRED_SSP2, vmcs12->guest_ia32_fred_ssp2);
- vmcs_write64(GUEST_IA32_FRED_SSP3, vmcs12->guest_ia32_fred_ssp3);
- }
+ if (nested_cpu_load_guest_fred_state(vmcs12))
+ vmcs_write_fred_msrs((struct fred_msrs *)&vmcs12->guest_ia32_fred_config);
}
/*
@@ -2813,16 +2829,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
}
if (!vmx->nested.nested_run_pending ||
- !nested_cpu_load_guest_fred_state(vmcs12)) {
- vmcs_write64(GUEST_IA32_FRED_CONFIG, vmx->nested.pre_vmenter_fred_config);
- vmcs_write64(GUEST_IA32_FRED_RSP1, vmx->nested.pre_vmenter_fred_rsp1);
- vmcs_write64(GUEST_IA32_FRED_RSP2, vmx->nested.pre_vmenter_fred_rsp2);
- vmcs_write64(GUEST_IA32_FRED_RSP3, vmx->nested.pre_vmenter_fred_rsp3);
- vmcs_write64(GUEST_IA32_FRED_STKLVLS, vmx->nested.pre_vmenter_fred_stklvls);
- vmcs_write64(GUEST_IA32_FRED_SSP1, vmx->nested.pre_vmenter_fred_ssp1);
- vmcs_write64(GUEST_IA32_FRED_SSP2, vmx->nested.pre_vmenter_fred_ssp2);
- vmcs_write64(GUEST_IA32_FRED_SSP3, vmx->nested.pre_vmenter_fred_ssp3);
- }
+ !nested_cpu_load_guest_fred_state(vmcs12))
+ vmcs_write_fred_msrs(&vmx->nested.fred_msrs_pre_vmenter);
vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
vcpu->arch.l1_tsc_offset,
@@ -3830,16 +3838,8 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
&vmx->nested.pre_vmenter_ssp_tbl);
if (!vmx->nested.nested_run_pending ||
- !nested_cpu_load_guest_fred_state(vmcs12)) {
- vmx->nested.pre_vmenter_fred_config = vmcs_read64(GUEST_IA32_FRED_CONFIG);
- vmx->nested.pre_vmenter_fred_rsp1 = vmcs_read64(GUEST_IA32_FRED_RSP1);
- vmx->nested.pre_vmenter_fred_rsp2 = vmcs_read64(GUEST_IA32_FRED_RSP2);
- vmx->nested.pre_vmenter_fred_rsp3 = vmcs_read64(GUEST_IA32_FRED_RSP3);
- vmx->nested.pre_vmenter_fred_stklvls = vmcs_read64(GUEST_IA32_FRED_STKLVLS);
- vmx->nested.pre_vmenter_fred_ssp1 = vmcs_read64(GUEST_IA32_FRED_SSP1);
- vmx->nested.pre_vmenter_fred_ssp2 = vmcs_read64(GUEST_IA32_FRED_SSP2);
- vmx->nested.pre_vmenter_fred_ssp3 = vmcs_read64(GUEST_IA32_FRED_SSP3);
- }
+ !nested_cpu_load_guest_fred_state(vmcs12))
+ vmcs_read_fred_msrs(&vmx->nested.fred_msrs_pre_vmenter);
/*
* Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and*
@@ -4938,25 +4938,10 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
&vmcs12->guest_ssp,
&vmcs12->guest_ssp_tbl);
- vmx->nested.fred_msr_at_vmexit.fred_config = vmcs_read64(GUEST_IA32_FRED_CONFIG);
- vmx->nested.fred_msr_at_vmexit.fred_rsp1 = vmcs_read64(GUEST_IA32_FRED_RSP1);
- vmx->nested.fred_msr_at_vmexit.fred_rsp2 = vmcs_read64(GUEST_IA32_FRED_RSP2);
- vmx->nested.fred_msr_at_vmexit.fred_rsp3 = vmcs_read64(GUEST_IA32_FRED_RSP3);
- vmx->nested.fred_msr_at_vmexit.fred_stklvls = vmcs_read64(GUEST_IA32_FRED_STKLVLS);
- vmx->nested.fred_msr_at_vmexit.fred_ssp1 = vmcs_read64(GUEST_IA32_FRED_SSP1);
- vmx->nested.fred_msr_at_vmexit.fred_ssp2 = vmcs_read64(GUEST_IA32_FRED_SSP2);
- vmx->nested.fred_msr_at_vmexit.fred_ssp3 = vmcs_read64(GUEST_IA32_FRED_SSP3);
+ vmcs_read_fred_msrs(&vmx->nested.fred_msrs_at_vmexit);
- if (nested_cpu_save_guest_fred_state(vmcs12)) {
- vmcs12->guest_ia32_fred_config = vmx->nested.fred_msr_at_vmexit.fred_config;
- vmcs12->guest_ia32_fred_rsp1 = vmx->nested.fred_msr_at_vmexit.fred_rsp1;
- vmcs12->guest_ia32_fred_rsp2 = vmx->nested.fred_msr_at_vmexit.fred_rsp2;
- vmcs12->guest_ia32_fred_rsp3 = vmx->nested.fred_msr_at_vmexit.fred_rsp3;
- vmcs12->guest_ia32_fred_stklvls = vmx->nested.fred_msr_at_vmexit.fred_stklvls;
- vmcs12->guest_ia32_fred_ssp1 = vmx->nested.fred_msr_at_vmexit.fred_ssp1;
- vmcs12->guest_ia32_fred_ssp2 = vmx->nested.fred_msr_at_vmexit.fred_ssp2;
- vmcs12->guest_ia32_fred_ssp3 = vmx->nested.fred_msr_at_vmexit.fred_ssp3;
- }
+ if (nested_cpu_save_guest_fred_state(vmcs12))
+ memcpy(&vmcs12->guest_ia32_fred_config, &vmx->nested.fred_msrs_at_vmexit, sizeof(struct fred_msrs));
}
/*
@@ -5119,25 +5104,10 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
WARN_ON_ONCE(__kvm_emulate_msr_write(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
vmcs12->host_ia32_perf_global_ctrl));
- if (nested_cpu_load_host_fred_state(vmcs12)) {
- vmcs_write64(GUEST_IA32_FRED_CONFIG, vmcs12->host_ia32_fred_config);
- vmcs_write64(GUEST_IA32_FRED_RSP1, vmcs12->host_ia32_fred_rsp1);
- vmcs_write64(GUEST_IA32_FRED_RSP2, vmcs12->host_ia32_fred_rsp2);
- vmcs_write64(GUEST_IA32_FRED_RSP3, vmcs12->host_ia32_fred_rsp3);
- vmcs_write64(GUEST_IA32_FRED_STKLVLS, vmcs12->host_ia32_fred_stklvls);
- vmcs_write64(GUEST_IA32_FRED_SSP1, vmcs12->host_ia32_fred_ssp1);
- vmcs_write64(GUEST_IA32_FRED_SSP2, vmcs12->host_ia32_fred_ssp2);
- vmcs_write64(GUEST_IA32_FRED_SSP3, vmcs12->host_ia32_fred_ssp3);
- } else {
- vmcs_write64(GUEST_IA32_FRED_CONFIG, vmx->nested.fred_msr_at_vmexit.fred_config);
- vmcs_write64(GUEST_IA32_FRED_RSP1, vmx->nested.fred_msr_at_vmexit.fred_rsp1);
- vmcs_write64(GUEST_IA32_FRED_RSP2, vmx->nested.fred_msr_at_vmexit.fred_rsp2);
- vmcs_write64(GUEST_IA32_FRED_RSP3, vmx->nested.fred_msr_at_vmexit.fred_rsp3);
- vmcs_write64(GUEST_IA32_FRED_STKLVLS, vmx->nested.fred_msr_at_vmexit.fred_stklvls);
- vmcs_write64(GUEST_IA32_FRED_SSP1, vmx->nested.fred_msr_at_vmexit.fred_ssp1);
- vmcs_write64(GUEST_IA32_FRED_SSP2, vmx->nested.fred_msr_at_vmexit.fred_ssp2);
- vmcs_write64(GUEST_IA32_FRED_SSP3, vmx->nested.fred_msr_at_vmexit.fred_ssp3);
- }
+ if (nested_cpu_load_host_fred_state(vmcs12))
+ vmcs_write_fred_msrs((struct fred_msrs *)vmcs12->host_ia32_fred_config);
+ else
+ vmcs_write_fred_msrs(&vmx->nested.fred_msrs_at_vmexit);
/* Set L1 segment info according to Intel SDM
27.5.2 Loading Host Segment and Descriptor-Table Registers */
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 36dcc888e5c6..c1c32e8ae068 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -87,7 +87,7 @@ struct pt_desc {
* running the L1 VMM if SECONDARY_VM_EXIT_LOAD_IA32_FRED is cleared in
* vmcs12.
*/
-struct fred_msr_at_vmexit {
+struct fred_msrs {
u64 fred_config;
u64 fred_rsp1;
u64 fred_rsp2;
@@ -215,16 +215,8 @@ struct nested_vmx {
u64 pre_vmenter_s_cet;
u64 pre_vmenter_ssp;
u64 pre_vmenter_ssp_tbl;
- u64 pre_vmenter_fred_config;
- u64 pre_vmenter_fred_rsp1;
- u64 pre_vmenter_fred_rsp2;
- u64 pre_vmenter_fred_rsp3;
- u64 pre_vmenter_fred_stklvls;
- u64 pre_vmenter_fred_ssp1;
- u64 pre_vmenter_fred_ssp2;
- u64 pre_vmenter_fred_ssp3;
-
- struct fred_msr_at_vmexit fred_msr_at_vmexit;
+
+ struct fred_msrs fred_msrs_at_vmexit, fred_msrs_pre_vmenter;
/* to migrate it to L1 if L2 writes to L1's CR8 directly */
int l1_tpr_threshold;
Powered by blists - more mailing lists