[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220207155447.840194-28-mlevitsk@redhat.com>
Date: Mon, 7 Feb 2022 17:54:44 +0200
From: Maxim Levitsky <mlevitsk@...hat.com>
To: kvm@...r.kernel.org
Cc: Tony Luck <tony.luck@...el.com>,
"Chang S. Bae" <chang.seok.bae@...el.com>,
Thomas Gleixner <tglx@...utronix.de>,
Wanpeng Li <wanpengli@...cent.com>,
Ingo Molnar <mingo@...hat.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Pawan Gupta <pawan.kumar.gupta@...ux.intel.com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Paolo Bonzini <pbonzini@...hat.com>,
linux-kernel@...r.kernel.org,
Rodrigo Vivi <rodrigo.vivi@...el.com>,
"H. Peter Anvin" <hpa@...or.com>,
intel-gvt-dev@...ts.freedesktop.org,
Joonas Lahtinen <joonas.lahtinen@...ux.intel.com>,
Joerg Roedel <joro@...tes.org>,
Sean Christopherson <seanjc@...gle.com>,
David Airlie <airlied@...ux.ie>,
Zhi Wang <zhi.a.wang@...el.com>,
Brijesh Singh <brijesh.singh@....com>,
Jim Mattson <jmattson@...gle.com>, x86@...nel.org,
Daniel Vetter <daniel@...ll.ch>,
Borislav Petkov <bp@...en8.de>,
Zhenyu Wang <zhenyuw@...ux.intel.com>,
Kan Liang <kan.liang@...ux.intel.com>,
Jani Nikula <jani.nikula@...ux.intel.com>,
Maxim Levitsky <mlevitsk@...hat.com>,
Borislav Petkov <bp@...e.de>
Subject: [PATCH RESEND 27/30] KVM: x86: add force_intercept_exceptions_mask
This parameter will be used by VMX and SVM code to force
interception of a set of exceptions, given by a bitmask
for guest debug and/or kvm debug.
This is based on an idea first shown here:
https://patchwork.kernel.org/project/kvm/patch/20160301192822.GD22677@pd.tnic/
CC: Borislav Petkov <bp@...e.de>
Signed-off-by: Maxim Levitsky <mlevitsk@...hat.com>
---
arch/x86/include/asm/kvm_host.h | 7 +++++++
arch/x86/kvm/x86.c | 9 +++++++++
arch/x86/kvm/x86.h | 5 +++++
3 files changed, 21 insertions(+)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 428ab1cc7dd34..fa498612839a0 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1168,6 +1168,13 @@ struct kvm_arch {
struct kvm_pmu_event_filter __rcu *pmu_event_filter;
struct task_struct *nx_lpage_recovery_thread;
+ /*
+ * Bitmask of exceptions that KVM will intercept
+ * and forward to the guest, even if that is not needed
+ * for normal operation. Debug feature.
+ */
+ u32 force_intercept_exceptions_bitmask;
+
#ifdef CONFIG_X86_64
/*
* Whether the TDP MMU is enabled for this VM. This contains a
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 63d84c373e465..202c34697852f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -193,6 +193,13 @@ module_param(enable_pmu, bool, 0444);
bool __read_mostly eager_page_split = true;
module_param(eager_page_split, bool, 0644);
+/*
+ * force_intercept_exceptions_mask is a writable param and its value
+ * is snapshotted when a VM is created
+ */
+static uint force_intercept_exceptions_mask;
+module_param(force_intercept_exceptions_mask, uint, S_IRUGO | S_IWUSR);
+
/*
* Restoring the host value for MSRs that are only consumed when running in
* usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU
@@ -11646,6 +11653,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
kvm->arch.guest_can_read_msr_platform_info = true;
+ kvm->arch.force_intercept_exceptions_bitmask = force_intercept_exceptions_mask;
#if IS_ENABLED(CONFIG_HYPERV)
spin_lock_init(&kvm->arch.hv_root_tdp_lock);
@@ -12886,6 +12894,7 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
}
EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
+
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index e9b303b21f173..34f96f483c7e5 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -91,6 +91,11 @@ static inline bool kvm_exception_is_soft(unsigned int nr)
return (nr == BP_VECTOR) || (nr == OF_VECTOR);
}
+static inline bool kvm_is_exception_force_intercepted(struct kvm *kvm, int exception)
+{
+ return kvm->arch.force_intercept_exceptions_bitmask & BIT(exception);
+}
+
static inline bool is_protmode(struct kvm_vcpu *vcpu)
{
return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
--
2.26.3
Powered by blists - more mailing lists