[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220531175450.295552-3-pbonzini@redhat.com>
Date: Tue, 31 May 2022 13:54:50 -0400
From: Paolo Bonzini <pbonzini@...hat.com>
To: linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc: likexu@...cent.com
Subject: [PATCH 2/2] KVM: x86: always allow host-initiated writes to PMU MSRs
Whenever an MSR is part of KVM_GET_MSR_INDEX_LIST, it has to be always
retrievable and settable with KVM_GET_MSR and KVM_SET_MSR. Accept
the PMU MSRs unconditionally in intel_is_valid_msr, if the access was
host-initiated.
Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
---
arch/x86/kvm/pmu.c | 4 ++--
arch/x86/kvm/pmu.h | 4 ++--
arch/x86/kvm/svm/pmu.c | 2 +-
arch/x86/kvm/vmx/pmu_intel.c | 27 +++++++++++++++++----------
arch/x86/kvm/x86.c | 10 +++++-----
5 files changed, 27 insertions(+), 20 deletions(-)
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index a2eaae85d97b..c6e57367f009 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -455,10 +455,10 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
}
}
-bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
+bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr, bool host_initiated)
{
return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
- static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
+ static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr, host_initiated);
}
static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 7824bdd8626e..cea52d1bcc76 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -29,7 +29,7 @@ struct kvm_pmu_ops {
unsigned int idx, u64 *mask);
struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
bool (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
- bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
+ bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr, bool host_initiated);
int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
void (*refresh)(struct kvm_vcpu *vcpu);
@@ -186,7 +186,7 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
-bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
+bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr, bool host_initiated);
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 136039fc6d01..0e5784371ac0 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -229,7 +229,7 @@ static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
return &counters[idx];
}
-static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
+static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr, bool host_initiated)
{
/* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */
return false;
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 66496cb41494..c8c3f55630ea 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -207,38 +207,45 @@ static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
return false;
}
-static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
+static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr, bool host_initiated)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
u64 perf_capabilities = vcpu->arch.perf_capabilities;
- int ret;
switch (msr) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
case MSR_CORE_PERF_GLOBAL_STATUS:
case MSR_CORE_PERF_GLOBAL_CTRL:
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
- ret = pmu->version > 1;
+ if (host_initiated)
+ return true;
+ return pmu->version > 1;
break;
case MSR_IA32_PEBS_ENABLE:
- ret = perf_capabilities & PERF_CAP_PEBS_FORMAT;
+ if (host_initiated)
+ return true;
+ return perf_capabilities & PERF_CAP_PEBS_FORMAT;
break;
case MSR_IA32_DS_AREA:
- ret = guest_cpuid_has(vcpu, X86_FEATURE_DS);
+ if (host_initiated)
+ return true;
+ return guest_cpuid_has(vcpu, X86_FEATURE_DS);
break;
case MSR_PEBS_DATA_CFG:
- ret = (perf_capabilities & PERF_CAP_PEBS_BASELINE) &&
+ if (host_initiated)
+ return true;
+ return (perf_capabilities & PERF_CAP_PEBS_BASELINE) &&
((perf_capabilities & PERF_CAP_PEBS_FORMAT) > 3);
break;
default:
- ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
+ if (host_initiated)
+ return true;
+ return get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) ||
intel_pmu_is_valid_lbr_msr(vcpu, msr);
break;
}
-
- return ret;
}
static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
@@ -688,7 +695,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
nested_vmx_pmu_refresh(vcpu,
- intel_is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL));
+ intel_is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, false));
if (cpuid_model_is_consistent(vcpu)) {
x86_perf_get_lbr(&lbr_desc->records);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a291236b4695..7460b9a77d9a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3725,7 +3725,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
fallthrough;
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
- if (kvm_pmu_is_valid_msr(vcpu, msr))
+ if (kvm_pmu_is_valid_msr(vcpu, msr, msr_info->host_initiated))
return kvm_pmu_set_msr(vcpu, msr_info);
if (pr || data != 0)
@@ -3808,7 +3808,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
#endif
default:
- if (kvm_pmu_is_valid_msr(vcpu, msr))
+ if (kvm_pmu_is_valid_msr(vcpu, msr, msr_info->host_initiated))
return kvm_pmu_set_msr(vcpu, msr_info);
return KVM_MSR_RET_INVALID;
}
@@ -3888,7 +3888,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = 0;
break;
case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
- if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
+ if (kvm_pmu_is_valid_msr(vcpu, msr_info->index, msr_info->host_initiated))
return kvm_pmu_get_msr(vcpu, msr_info);
if (!msr_info->host_initiated)
return 1;
@@ -3898,7 +3898,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
- if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
+ if (kvm_pmu_is_valid_msr(vcpu, msr_info->index, msr_info->host_initiated))
return kvm_pmu_get_msr(vcpu, msr_info);
msr_info->data = 0;
break;
@@ -4144,7 +4144,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
#endif
default:
- if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
+ if (kvm_pmu_is_valid_msr(vcpu, msr_info->index, msr_info->host_initiated))
return kvm_pmu_get_msr(vcpu, msr_info);
return KVM_MSR_RET_INVALID;
}
--
2.31.1
Powered by blists - more mailing lists