lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <93344a29-0231-0e38-0951-1519ff6979a8@redhat.com>
Date:   Fri, 26 Nov 2021 18:39:49 +0100
From:   Paolo Bonzini <pbonzini@...hat.com>
To:     Hou Wenlong <houwenlong93@...ux.alibaba.com>, kvm@...r.kernel.org
Cc:     Sean Christopherson <seanjc@...gle.com>,
        Vitaly Kuznetsov <vkuznets@...hat.com>,
        Wanpeng Li <wanpengli@...cent.com>,
        Jim Mattson <jmattson@...gle.com>,
        Joerg Roedel <joro@...tes.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
        x86@...nel.org, "H. Peter Anvin" <hpa@...or.com>,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 3/4] KVM: x86: Use different callback if msr access
 comes from the emulator

On 11/2/21 10:15, Hou Wenlong wrote:
> If msr access triggers an exit to userspace, the
> complete_userspace_io callback would skip instruction by vendor
> callback for kvm_skip_emulated_instruction(). However, when msr
> access comes from the emulator, e.g. if kvm.force_emulation_prefix
> is enabled and the guest uses rdmsr/wrmsr with kvm prefix,
> VM_EXIT_INSTRUCTION_LEN in vmcs is invalid and
> kvm_emulate_instruction() should be used to skip instruction
> instead.
> 
> As Sean noted, unlike the previous case, there's no #UD if
> unrestricted guest is disabled and the guest accesses an MSR in
> Big RM. So the correct way to fix this is to attach a different
> callback when the msr access comes from the emulator.
> 
> Suggested-by: Sean Christopherson <seanjc@...gle.com>
> Signed-off-by: Hou Wenlong <houwenlong93@...ux.alibaba.com>
> ---

Queued with a small tweak: complete_emulated_msr_access is a version
of kvm_complete_insn_gp for emulated instructions, so call it
complete_emulated_insn_gp and give it an err argument.

Also I renamed __complete_emulated to complete_userspace_rdmsr, since
it applies also to the "fast" case.

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e651ff56b4ad..3928c96d28be 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -711,6 +711,17 @@ int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
  }
  EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
  
+static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err)
+{
+	if (err) {
+		kvm_inject_gp(vcpu, 0);
+		return 1;
+	}
+
+	return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE | EMULTYPE_SKIP |
+				       EMULTYPE_COMPLETE_USER_EXIT);
+}
+
  void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
  {
  	++vcpu->stat.pf_guest;
@@ -1816,7 +1827,7 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
  }
  EXPORT_SYMBOL_GPL(kvm_set_msr);
  
-static void __complete_emulated_rdmsr(struct kvm_vcpu *vcpu)
+static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu)
  {
  	if (!vcpu->run->msr.error) {
  		kvm_rax_write(vcpu, (u32)vcpu->run->msr.data);
@@ -1826,37 +1837,24 @@ static void __complete_emulated_rdmsr(struct kvm_vcpu *vcpu)
  
  static int complete_emulated_msr_access(struct kvm_vcpu *vcpu)
  {
-	if (vcpu->run->msr.error) {
-		kvm_inject_gp(vcpu, 0);
-		return 1;
-	}
-
-	return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE | EMULTYPE_SKIP |
-				       EMULTYPE_COMPLETE_USER_EXIT);
+	return complete_emulated_insn_gp(vcpu, vcpu->run->msr.error);
  }
  
  static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu)
  {
-	__complete_emulated_rdmsr(vcpu);
-
+	complete_userspace_rdmsr(vcpu);
  	return complete_emulated_msr_access(vcpu);
  }
  
-static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu)
+static int complete_fast_msr_access(struct kvm_vcpu *vcpu)
  {
-	return complete_emulated_msr_access(vcpu);
+	return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error);
  }
  
  static int complete_fast_rdmsr(struct kvm_vcpu *vcpu)
  {
-	__complete_emulated_rdmsr(vcpu);
-
-	return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error);
-}
-
-static int complete_fast_wrmsr(struct kvm_vcpu *vcpu)
-{
-	return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error);
+	complete_userspace_rdmsr(vcpu);
+	return complete_fast_msr_access(vcpu);
  }
  
  static u64 kvm_msr_reason(int r)
@@ -1931,7 +1929,7 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
  	} else {
  		/* MSR write failed? See if we should ask user space */
  		if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_WRMSR, data,
-				       complete_fast_wrmsr, r))
+				       complete_fast_msr_access, r))
  			return 0;
  		/* Signal all other negative errors to userspace */
  		if (r < 0)
@@ -7429,7 +7427,7 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
  	r = kvm_set_msr(vcpu, msr_index, data);
  
  	if (r && kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data,
-				    complete_emulated_wrmsr, r)) {
+				    complete_emulated_msr_access, r)) {
  		/* Bounce to user space */
  		return X86EMUL_IO_NEEDED;
  	}

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ