[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260115011312.3675857-26-yosry.ahmed@linux.dev>
Date: Thu, 15 Jan 2026 01:13:11 +0000
From: Yosry Ahmed <yosry.ahmed@...ux.dev>
To: Sean Christopherson <seanjc@...gle.com>
Cc: Paolo Bonzini <pbonzini@...hat.com>,
kvm@...r.kernel.org,
linux-kernel@...r.kernel.org,
Yosry Ahmed <yosry.ahmed@...ux.dev>,
Jim Mattson <jmattson@...gle.com>
Subject: [PATCH v4 25/26] KVM: nSVM: Sanitize control fields copied from VMCB12
Make sure all fields used from VMCB12 in creating the VMCB02 are
sanitized, such that no unhandled or reserved bits end up in the VMCB02.
The following control fields are read from VMCB12 and have bits that are
either reserved or not handled/advertised by KVM: tlb_ctl, int_ctl,
int_state, int_vector, event_inj, misc_ctl, and misc_ctl2.
The following fields do not require any extra sanitizing:
- int_ctl: bits from VMCB12 are copied bit-by-bit as needed.
- misc_ctl: only used in consistency checks (particularly NP_ENABLE).
- misc_ctl2: bits from VMCB12 are copied bit-by-bit as needed.
For the remaining fields, make sure only defined bits are copied from
L1's VMCB12 into KVM'cache by defining appropriate masks where needed.
The only exception is tlb_ctl, which is unused, so remove it.
Opportunistically cleanup ignoring the lower bits of {io/msr}pm_base_pa
in __nested_copy_vmcb_control_to_cache() by using PAGE_MASK. Also, move
the ASID copying ahead with other special cases, and expand the comment
about the ASID being copied only for consistency checks.
Suggested-by: Jim Mattson <jmattson@...gle.com>
Signed-off-by: Yosry Ahmed <yosry.ahmed@...ux.dev>
---
arch/x86/include/asm/svm.h | 5 +++++
arch/x86/kvm/svm/nested.c | 28 +++++++++++++++-------------
arch/x86/kvm/svm/svm.h | 1 -
3 files changed, 20 insertions(+), 14 deletions(-)
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 0bc26b2b3fd7..d3632fbb80be 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -223,6 +223,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define AVIC_ENABLE_SHIFT 31
#define AVIC_ENABLE_MASK BIT(AVIC_ENABLE_SHIFT)
+#define SVM_INT_VECTOR_MASK GENMASK(7, 0)
+
#define SVM_INTERRUPT_SHADOW_MASK BIT_ULL(0)
#define SVM_GUEST_INTERRUPT_MASK BIT_ULL(1)
@@ -636,6 +638,9 @@ static inline void __unused_size_checks(void)
#define SVM_EVTINJ_VALID_ERR BIT(11)
#define SVM_EVTINJ_VALID BIT(31)
+#define SVM_EVTINJ_RESERVED_BITS ~(SVM_EVTINJ_VEC_MASK | SVM_EVTINJ_TYPE_MASK | \
+ SVM_EVTINJ_VALID_ERR | SVM_EVTINJ_VALID)
+
#define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK
#define SVM_EXITINTINFO_TYPE_MASK SVM_EVTINJ_TYPE_MASK
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index ffb741f401d0..e62fd6524feb 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -500,33 +500,36 @@ void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_NPT))
to->misc_ctl &= ~SVM_MISC_CTL_NP_ENABLE;
- to->iopm_base_pa = from->iopm_base_pa;
- to->msrpm_base_pa = from->msrpm_base_pa;
+ /*
+ * Copy the ASID here because nested_vmcb_check_controls() will check
+ * it. The ASID could be invalid, or conflict with another VM's ASID ,
+ * so it should never be used directly to run L2.
+ */
+ to->asid = from->asid;
+
+ /* Lower bits of IOPM_BASE_PA and MSRPM_BASE_PA are ignored */
+ to->iopm_base_pa = from->iopm_base_pa & PAGE_MASK;
+ to->msrpm_base_pa = from->msrpm_base_pa & PAGE_MASK;
+
to->tsc_offset = from->tsc_offset;
- to->tlb_ctl = from->tlb_ctl;
to->erap_ctl = from->erap_ctl;
to->int_ctl = from->int_ctl;
- to->int_vector = from->int_vector;
- to->int_state = from->int_state;
+ to->int_vector = from->int_vector & SVM_INT_VECTOR_MASK;
+ to->int_state = from->int_state & SVM_INTERRUPT_SHADOW_MASK;
to->exit_code = from->exit_code;
to->exit_code_hi = from->exit_code_hi;
to->exit_info_1 = from->exit_info_1;
to->exit_info_2 = from->exit_info_2;
to->exit_int_info = from->exit_int_info;
to->exit_int_info_err = from->exit_int_info_err;
- to->event_inj = from->event_inj;
+ to->event_inj = from->event_inj & ~SVM_EVTINJ_RESERVED_BITS;
to->event_inj_err = from->event_inj_err;
to->next_rip = from->next_rip;
to->nested_cr3 = from->nested_cr3;
- to->misc_ctl2 = from->misc_ctl2;
+ to->misc_ctl2 = from->misc_ctl2;
to->pause_filter_count = from->pause_filter_count;
to->pause_filter_thresh = from->pause_filter_thresh;
- /* Copy asid here because nested_vmcb_check_controls() will check it */
- to->asid = from->asid;
- to->msrpm_base_pa &= ~0x0fffULL;
- to->iopm_base_pa &= ~0x0fffULL;
-
#ifdef CONFIG_KVM_HYPERV
/* Hyper-V extensions (Enlightened VMCB) */
if (kvm_hv_hypercall_enabled(vcpu)) {
@@ -1836,7 +1839,6 @@ static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst,
dst->msrpm_base_pa = from->msrpm_base_pa;
dst->tsc_offset = from->tsc_offset;
dst->asid = from->asid;
- dst->tlb_ctl = from->tlb_ctl;
dst->erap_ctl = from->erap_ctl;
dst->int_ctl = from->int_ctl;
dst->int_vector = from->int_vector;
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 2fc25803d0c7..4bc69e0d7e0a 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -175,7 +175,6 @@ struct vmcb_ctrl_area_cached {
u64 msrpm_base_pa;
u64 tsc_offset;
u32 asid;
- u8 tlb_ctl;
u8 erap_ctl;
u32 int_ctl;
u32 int_vector;
--
2.52.0.457.g6b5491de43-goog
Powered by blists - more mailing lists