[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <163724608104.11128.12039087886235952211.tip-bot2@tip-bot2>
Date: Thu, 18 Nov 2021 14:34:41 -0000
From: "tip-bot2 for Sean Christopherson" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Sean Christopherson <seanjc@...gle.com>,
"Peter Zijlstra (Intel)" <peterz@...radead.org>,
Paolo Bonzini <pbonzini@...hat.com>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [tip: perf/core] KVM: x86: More precisely identify NMI from guest
when handling PMI
The following commit has been merged into the perf/core branch of tip:
Commit-ID: db215756ae5970aec8ad50257d2eb1678b552b91
Gitweb: https://git.kernel.org/tip/db215756ae5970aec8ad50257d2eb1678b552b91
Author: Sean Christopherson <seanjc@...gle.com>
AuthorDate: Thu, 11 Nov 2021 02:07:32
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Wed, 17 Nov 2021 14:49:09 +01:00
KVM: x86: More precisely identify NMI from guest when handling PMI
Differentiate between IRQ and NMI for KVM's PMC overflow callback, which
was originally invoked in response to an NMI that arrived while the guest
was running, but was inadvertantly changed to fire on IRQs as well when
support for perf without PMU/NMI was added to KVM. In practice, this
should be a nop as the PMC overflow callback shouldn't be reached, but
it's a cheap and easy fix that also better documents the situation.
Note, this also doesn't completely prevent false positives if perf
somehow ends up calling into KVM, e.g. an NMI can arrive in host after
KVM sets its flag.
Fixes: dd60d217062f ("KVM: x86: Fix perf timer mode IP reporting")
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Reviewed-by: Paolo Bonzini <pbonzini@...hat.com>
Link: https://lore.kernel.org/r/20211111020738.2512932-12-seanjc@google.com
---
arch/x86/kvm/svm/svm.c | 2 +-
arch/x86/kvm/vmx/vmx.c | 4 +++-
arch/x86/kvm/x86.c | 2 +-
arch/x86/kvm/x86.h | 13 ++++++++++---
4 files changed, 15 insertions(+), 6 deletions(-)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 5630c24..b2f0c6c 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3931,7 +3931,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
}
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
- kvm_before_interrupt(vcpu);
+ kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
kvm_load_host_xsave_state(vcpu);
stgi();
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 7d90c8d..a0c2497 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6317,7 +6317,9 @@ void vmx_do_interrupt_nmi_irqoff(unsigned long entry);
static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu,
unsigned long entry)
{
- kvm_before_interrupt(vcpu);
+ bool is_nmi = entry == (unsigned long)asm_exc_nmi_noist;
+
+ kvm_before_interrupt(vcpu, is_nmi ? KVM_HANDLING_NMI : KVM_HANDLING_IRQ);
vmx_do_interrupt_nmi_irqoff(entry);
kvm_after_interrupt(vcpu);
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bb71e10..ab032ef 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9896,7 +9896,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* interrupts on processors that implement an interrupt shadow, the
* stat.exits increment will do nicely.
*/
- kvm_before_interrupt(vcpu);
+ kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
local_irq_enable();
++vcpu->stat.exits;
local_irq_disable();
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index d070043..f8d2c58 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -385,9 +385,16 @@ static inline bool kvm_cstate_in_guest(struct kvm *kvm)
return kvm->arch.cstate_in_guest;
}
-static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu)
+enum kvm_intr_type {
+ /* Values are arbitrary, but must be non-zero. */
+ KVM_HANDLING_IRQ = 1,
+ KVM_HANDLING_NMI,
+};
+
+static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu,
+ enum kvm_intr_type intr)
{
- WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 1);
+ WRITE_ONCE(vcpu->arch.handling_intr_from_guest, (u8)intr);
}
static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
@@ -397,7 +404,7 @@ static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu)
{
- return !!vcpu->arch.handling_intr_from_guest;
+ return vcpu->arch.handling_intr_from_guest == KVM_HANDLING_NMI;
}
static inline bool kvm_pat_valid(u64 data)
Powered by blists - more mailing lists