lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <3d79f3816536abb0e81a19aac60bb5a213b67803.1770116051.git.isaku.yamahata@intel.com>
Date: Tue,  3 Feb 2026 10:16:52 -0800
From: isaku.yamahata@...el.com
To: kvm@...r.kernel.org
Cc: isaku.yamahata@...el.com,
	isaku.yamahata@...il.com,
	Paolo Bonzini <pbonzini@...hat.com>,
	Sean Christopherson <seanjc@...gle.com>,
	linux-kernel@...r.kernel.org
Subject: [PATCH 09/32] KVM: nVMX: Pass struct msr_data to VMX MSRs emulation

From: Isaku Yamahata <isaku.yamahata@...el.com>

Pass struct msr_data, which has host_initiated member in addition to
msr_index and data to vmx_set/get_vmx_msr().

Processor-based tertiary control access depends on which initiated the
operation, the host or the guest. For host-initiated access (KVM_GET_MSRS,
KVM_SET_MSRS), if the host supports processor-based tertiary controls,
allow access.  If guest-initiated access (emulation for guest rdmsr/wrmsr),
allow/disallow based on guest tertiary controls is advertised to the guest
(guest processor-based control high &
CPU_BASED_ACTIVATE_TERTIARY_CONTROLS).  Prepare to add the check.

No functional change intended.

Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
---
 arch/x86/kvm/vmx/nested.c |  9 +++++++--
 arch/x86/kvm/vmx/nested.h |  4 ++--
 arch/x86/kvm/vmx/vmx.c    | 18 ++++++++++++++----
 3 files changed, 23 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 77521e37cfc6..b1b8f0c88ca5 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -1479,9 +1479,11 @@ static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
  *
  * Returns 0 on success, non-0 otherwise.
  */
-int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	u32 msr_index = msr_info->index;
+	u64 data = msr_info->data;
 
 	/*
 	 * Don't allow changes to the VMX capability MSRs while the vCPU
@@ -1544,8 +1546,11 @@ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 }
 
 /* Returns 0 on success, non-0 otherwise. */
-int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
+int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, struct msr_data *msr_info)
 {
+	u32 msr_index = msr_info->index;
+	u64 *pdata = &msr_info->data;
+
 	switch (msr_index) {
 	case MSR_IA32_VMX_BASIC:
 		*pdata = msrs->basic;
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index 983484d42ebf..f51d7cac8a58 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -47,8 +47,8 @@ static inline void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
 }
 
 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu);
-int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
-int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
+int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
+int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, struct msr_data *msr);
 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
 			u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
 void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index c625c46658dc..dc6b6659a093 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2065,11 +2065,22 @@ static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx,
 
 int vmx_get_feature_msr(u32 msr, u64 *data)
 {
+	struct msr_data msr_info;
+	int r;
+
 	switch (msr) {
 	case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
 		if (!nested)
 			return 1;
-		return vmx_get_vmx_msr(&vmcs_config.nested, msr, data);
+
+		msr_info = (struct msr_data) {
+			.index = msr,
+			.host_initiated = true,
+		};
+		r = vmx_get_vmx_msr(&vmcs_config.nested, &msr_info);
+		if (!r)
+			*data = msr_info.data;
+		return r;
 	default:
 		return KVM_MSR_RET_UNSUPPORTED;
 	}
@@ -2154,8 +2165,7 @@ int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
 		if (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
 			return 1;
-		if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
-				    &msr_info->data))
+		if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info))
 			return 1;
 #ifdef CONFIG_KVM_HYPERV
 		/*
@@ -2482,7 +2492,7 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 			return 1; /* they are read-only */
 		if (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
 			return 1;
-		return vmx_set_vmx_msr(vcpu, msr_index, data);
+		return vmx_set_vmx_msr(vcpu, msr_info);
 	case MSR_IA32_RTIT_CTL:
 		if (!vmx_pt_mode_is_host_guest() ||
 			vmx_rtit_ctl_check(vcpu, data) ||
-- 
2.45.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ