[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <DS0PR11MB6373E404A16BD8CC55128FDFDC172@DS0PR11MB6373.namprd11.prod.outlook.com>
Date: Thu, 25 Apr 2024 11:17:10 +0000
From: "Wang, Wei W" <wei.w.wang@...el.com>
To: Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini
<pbonzini@...hat.com>
CC: "kvm@...r.kernel.org" <kvm@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: RE: [PATCH 0/4] KVM: x86: Collect host state snapshots into a struct
On Wednesday, April 24, 2024 6:15 AM, Sean Christopherson wrote:
> Add a global "kvm_host" structure to hold various host values, e.g. for EFER,
> XCR0, raw MAXPHYADDR etc., instead of having a bunch of one-off variables
> that inevitably need to be exported, or in the case of shadow_phys_bits, are
> buried in a random location and are awkward to use, leading to duplicate
> code.
Looks good. How about applying similar improvements to the module
parameters as well? I've changed the "enable_pmu" parameter as an example below:
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 77352a4abd87..a221ba7b546f 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -1013,7 +1013,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
union cpuid10_eax eax;
union cpuid10_edx edx;
- if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
+ if (!kvm_caps.enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
break;
}
@@ -1306,7 +1306,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
union cpuid_0x80000022_ebx ebx;
entry->ecx = entry->edx = 0;
- if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {
+ if (!kvm_caps.enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {
entry->eax = entry->ebx;
break;
}
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 4d52b0b539ba..7e359db64dbd 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -190,9 +190,9 @@ static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
* for hybrid PMUs until KVM gains a way to let userspace opt-in.
*/
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
- enable_pmu = false;
+ kvm_caps.enable_pmu = false;
- if (enable_pmu) {
+ if (kvm_caps.enable_pmu) {
perf_get_x86_pmu_capability(&kvm_pmu_cap);
/*
@@ -203,12 +203,12 @@ static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
*/
if (!kvm_pmu_cap.num_counters_gp ||
WARN_ON_ONCE(kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs))
- enable_pmu = false;
+ kvm_caps.enable_pmu = false;
else if (is_intel && !kvm_pmu_cap.version)
- enable_pmu = false;
+ kvm_caps.enable_pmu = false;
}
- if (!enable_pmu) {
+ if (!kvm_caps.enable_pmu) {
memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
return;
}
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 7ea0e7f13da4..4ed8c73f88e4 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7869,7 +7869,7 @@ static __init u64 vmx_get_perf_capabilities(void)
u64 perf_cap = PMU_CAP_FW_WRITES;
u64 host_perf_cap = 0;
- if (!enable_pmu)
+ if (!kvm_caps.enable_pmu)
return 0;
if (boot_cpu_has(X86_FEATURE_PDCM))
@@ -7938,7 +7938,7 @@ static __init void vmx_set_cpu_caps(void)
kvm_cpu_cap_check_and_set(X86_FEATURE_DTES64);
}
- if (!enable_pmu)
+ if (!kvm_caps.enable_pmu)
kvm_cpu_cap_clear(X86_FEATURE_PDCM);
kvm_caps.supported_perf_cap = vmx_get_perf_capabilities();
@@ -8683,7 +8683,7 @@ static __init int hardware_setup(void)
if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST)
return -EINVAL;
- if (!enable_ept || !enable_pmu || !cpu_has_vmx_intel_pt())
+ if (!enable_ept || !kvm_caps.enable_pmu || !cpu_has_vmx_intel_pt())
pt_mode = PT_MODE_SYSTEM;
if (pt_mode == PT_MODE_HOST_GUEST)
vmx_init_ops.handle_intel_pt_intr = vmx_handle_intel_pt_intr;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cdcda1bbf5a3..36d471a7af87 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -94,6 +94,7 @@
struct kvm_caps kvm_caps __read_mostly = {
.supported_mce_cap = MCG_CTL_P | MCG_SER_P,
+ .enable_pmu = true,
};
EXPORT_SYMBOL_GPL(kvm_caps);
@@ -192,9 +193,7 @@ int __read_mostly pi_inject_timer = -1;
module_param(pi_inject_timer, bint, 0644);
/* Enable/disable PMU virtualization */
-bool __read_mostly enable_pmu = true;
-EXPORT_SYMBOL_GPL(enable_pmu);
-module_param(enable_pmu, bool, 0444);
+module_param_named(enable_pmu, kvm_caps.enable_pmu, bool, 0444);
bool __read_mostly eager_page_split = true;
module_param(eager_page_split, bool, 0644);
@@ -4815,7 +4814,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
}
case KVM_CAP_PMU_CAPABILITY:
- r = enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0;
+ r = kvm_caps.enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0;
break;
case KVM_CAP_DISABLE_QUIRKS2:
r = KVM_X86_VALID_QUIRKS;
@@ -6652,7 +6651,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
break;
case KVM_CAP_PMU_CAPABILITY:
r = -EINVAL;
- if (!enable_pmu || (cap->args[0] & ~KVM_CAP_PMU_VALID_MASK))
+ if (!kvm_caps.enable_pmu || (cap->args[0] & ~KVM_CAP_PMU_VALID_MASK))
break;
mutex_lock(&kvm->lock);
@@ -7438,7 +7437,7 @@ static void kvm_init_msr_lists(void)
for (i = 0; i < ARRAY_SIZE(msrs_to_save_base); i++)
kvm_probe_msr_to_save(msrs_to_save_base[i]);
- if (enable_pmu) {
+ if (kvm_caps.enable_pmu) {
for (i = 0; i < ARRAY_SIZE(msrs_to_save_pmu); i++)
kvm_probe_msr_to_save(msrs_to_save_pmu[i]);
}
@@ -12555,7 +12554,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.default_tsc_khz = max_tsc_khz ? : tsc_khz;
kvm->arch.guest_can_read_msr_platform_info = true;
- kvm->arch.enable_pmu = enable_pmu;
+ kvm->arch.enable_pmu = kvm_caps.enable_pmu;
#if IS_ENABLED(CONFIG_HYPERV)
spin_lock_init(&kvm->arch.hv_root_tdp_lock);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 102754dc85bc..c4d99338aaa1 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -29,6 +29,9 @@ struct kvm_caps {
u64 supported_xcr0;
u64 supported_xss;
u64 supported_perf_cap;
+
+ /* KVM module parameters */
+ bool enable_pmu;
};
struct kvm_host_values {
@@ -340,8 +343,6 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
extern struct kvm_caps kvm_caps;
extern struct kvm_host_values kvm_host;
-extern bool enable_pmu;
Powered by blists - more mailing lists