[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <aXA7B9bbMNGBocTC@intel.com>
Date: Wed, 21 Jan 2026 10:33:43 +0800
From: Chao Gao <chao.gao@...el.com>
To: Xin Li <xin@...or.com>
CC: <linux-kernel@...r.kernel.org>, <kvm@...r.kernel.org>,
<linux-doc@...r.kernel.org>, <pbonzini@...hat.com>, <seanjc@...gle.com>,
<corbet@....net>, <tglx@...utronix.de>, <mingo@...hat.com>, <bp@...en8.de>,
<dave.hansen@...ux.intel.com>, <x86@...nel.org>, <hpa@...or.com>,
<luto@...nel.org>, <peterz@...radead.org>, <andrew.cooper3@...rix.com>,
<hch@...radead.org>, <sohil.mehta@...el.com>
Subject: Re: [PATCH v9 20/22] KVM: nVMX: Validate FRED-related VMCS fields
On Tue, Jan 20, 2026 at 01:19:55AM -0800, Xin Li wrote:
>>> @@ -3047,22 +3049,11 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
>>> u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
>>> u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
>>> bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
>>> + bool has_nested_exception = vmx->nested.msrs.basic & VMX_BASIC_NESTED_EXCEPTION;
>>
>> has_error_code reflects whether the to-be-injected event has an error code.
>> Using has_nested_exception for CPU capabilities here is a bit confusing.
>
>Looks better to just remove has_error_code.
>
>>
>>> bool urg = nested_cpu_has2(vmcs12,
>>> SECONDARY_EXEC_UNRESTRICTED_GUEST);
>>> bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
>>>
>>> - /* VM-entry interruption-info field: interruption type */
>>> - if (CC(intr_type == INTR_TYPE_RESERVED) ||
>>> - CC(intr_type == INTR_TYPE_OTHER_EVENT &&
>>> - !nested_cpu_supports_monitor_trap_flag(vcpu)))
>>> - return -EINVAL;
>>> -
>>> - /* VM-entry interruption-info field: vector */
>>> - if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
>>> - CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
>>> - CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
>>> - return -EINVAL;
>>> -
>>> /*
>>> * Cannot deliver error code in real mode or if the interrupt
>>> * type is not hardware exception. For other cases, do the
>>> @@ -3086,8 +3077,28 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
>>> if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK))
>>> return -EINVAL;
>>>
>>> - /* VM-entry instruction length */
>>> + /*
>>> + * When the CPU enumerates VMX nested-exception support, bit 13
>>> + * (set to indicate a nested exception) of the intr info field
>>> + * may have value 1. Otherwise bit 13 is reserved.
>>> + */
>>> + if (CC(!(has_nested_exception && intr_type == INTR_TYPE_HARD_EXCEPTION) &&
>>> + intr_info & INTR_INFO_NESTED_EXCEPTION_MASK))
>>> + return -EINVAL;
>>> +
>>> switch (intr_type) {
>>> + case INTR_TYPE_EXT_INTR:
>>> + break;
>>
>> This can be dropped, as the "default" case will handle it.
>
>We don’t have a default case, as all 8 cases are listed (INTR_INFO_INTR_TYPE_MASK is 0x700).
>
>>
>>> + case INTR_TYPE_RESERVED:
>>> + return -EINVAL;
>>
>> I think we need to add a CC() statement to make it easier to correlate a
>> VM-entry failure with a specific consistency check.
>
>What do you want me to put in CC()?
>
>CC(intr_type == INTR_TYPE_RESERVED)?
how about this incremental change?
I prefer to make has_error_code and has_nested_exception consistent, and add a
CC() statement before all "return -EINVAL" statements for debugging.
t a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 8682709d8759..f13df70405d9 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3049,7 +3049,7 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
- bool has_nested_exception = vmx->nested.msrs.basic & VMX_BASIC_NESTED_EXCEPTION;
+ bool has_nested_exception = intr_info & INTR_INFO_NESTED_EXCEPTION_MASK;
bool urg = nested_cpu_has2(vmcs12,
SECONDARY_EXEC_UNRESTRICTED_GUEST);
bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
@@ -3077,20 +3077,10 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK))
return -EINVAL;
- /*
- * When the CPU enumerates VMX nested-exception support, bit 13
- * (set to indicate a nested exception) of the intr info field
- * may have value 1. Otherwise bit 13 is reserved.
- */
- if (CC(!(has_nested_exception && intr_type == INTR_TYPE_HARD_EXCEPTION) &&
- intr_info & INTR_INFO_NESTED_EXCEPTION_MASK))
+ if (CC(intr_type == INTR_TYPE_RESERVED))
return -EINVAL;
switch (intr_type) {
- case INTR_TYPE_EXT_INTR:
- break;
- case INTR_TYPE_RESERVED:
- return -EINVAL;
case INTR_TYPE_NMI_INTR:
if (CC(vector != NMI_VECTOR))
return -EINVAL;
@@ -3098,6 +3088,13 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
case INTR_TYPE_HARD_EXCEPTION:
if (CC(vector > 31))
return -EINVAL;
+ /*
+ * When the CPU enumerates VMX nested-exception support, bit 13
+ * (set to indicate a nested exception) of the intr info field
+ * may have value 1. Otherwise bit 13 is reserved.
+ */
+ if (CC(has_nested_exception && !(vmx->nested.msrs.basic & VMX_BASIC_NESTED_EXCEPTION)))
+ return -EINVAL;
break;
case INTR_TYPE_SOFT_EXCEPTION:
case INTR_TYPE_SOFT_INTR:
@@ -3108,6 +3105,9 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
return -EINVAL;
break;
case INTR_TYPE_OTHER_EVENT:
+ if (CC(vector > 3))
+ return -EINVAL;
+
switch (vector) {
case 0:
if (CC(!nested_cpu_supports_monitor_trap_flag(vcpu)))
@@ -3121,7 +3121,7 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
return -EINVAL;
break;
default:
- return -EINVAL;
+ break;
}
break;
}
@@ -3454,14 +3454,15 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
}
if (vmcs12->guest_cr4 & X86_CR4_FRED) {
unsigned int ss_dpl = VMX_AR_DPL(vmcs12->guest_ss_ar_bytes);
+
+ if (CC(ss_dpl == 1 || ss_dpl == 2))
+ return -EINVAL;
+
switch (ss_dpl) {
case 0:
if (CC(!(vmcs12->guest_cs_ar_bytes & VMX_AR_L_MASK)))
return -EINVAL;
break;
- case 1:
- case 2:
- return -EINVAL;
case 3:
if (CC(vmcs12->guest_rflags & X86_EFLAGS_IOPL))
return -EINVAL;
Powered by blists - more mailing lists