[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220217083601.24829-1-likexu@tencent.com>
Date: Thu, 17 Feb 2022 16:36:00 +0800
From: Like Xu <like.xu.linux@...il.com>
To: Paolo Bonzini <pbonzini@...hat.com>
Cc: Sean Christopherson <seanjc@...gle.com>,
Eric Hankland <ehankland@...gle.com>,
Jim Mattson <jmattson@...gle.com>,
Wanpeng Li <wanpengli@...cent.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Joerg Roedel <joro@...tes.org>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 1/2] KVM: x86/pmu: Extract check_pmu_event_filter() from the same semantics
From: Like Xu <likexu@...cent.com>
Checking the kvm->arch.pmu_event_filter policy in both gp and fixed
code paths was somewhat redundant, so common parts can be extracted,
which reduces code footprint, improves readability and facilitates
the maintenance of SRCU logic for pmu_event_filter in a one place.
Signed-off-by: Like Xu <likexu@...cent.com>
Reviewed-by: Wanpeng Li <wanpengli@...cent.com>
---
arch/x86/kvm/pmu.c | 61 +++++++++++++++++++++++++++-------------------
1 file changed, 36 insertions(+), 25 deletions(-)
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index f614f95acc6b..af2a3dd22dd9 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -179,12 +179,42 @@ static int cmp_u64(const void *a, const void *b)
return *(__u64 *)a - *(__u64 *)b;
}
+static bool check_pmu_event_filter(struct kvm_pmc *pmc)
+{
+ struct kvm_pmu_event_filter *filter;
+ struct kvm *kvm = pmc->vcpu->kvm;
+ bool allow_event = true;
+ __u64 key;
+ int idx;
+
+ filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
+ if (!filter)
+ goto out;
+
+ if (pmc_is_gp(pmc)) {
+ key = pmc->eventsel & AMD64_RAW_EVENT_MASK_NB;
+ if (bsearch(&key, filter->events, filter->nevents,
+ sizeof(__u64), cmp_u64))
+ allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
+ else
+ allow_event = filter->action == KVM_PMU_EVENT_DENY;
+ } else {
+ idx = pmc->idx - INTEL_PMC_IDX_FIXED;
+ if (filter->action == KVM_PMU_EVENT_DENY &&
+ test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
+ allow_event = false;
+ if (filter->action == KVM_PMU_EVENT_ALLOW &&
+ !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
+ allow_event = false;
+ }
+
+out:
+ return allow_event;
+}
+
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
{
unsigned config, type = PERF_TYPE_RAW;
- struct kvm *kvm = pmc->vcpu->kvm;
- struct kvm_pmu_event_filter *filter;
- bool allow_event = true;
if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
printk_once("kvm pmu: pin control bit is ignored\n");
@@ -196,17 +226,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
return;
- filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
- if (filter) {
- __u64 key = eventsel & AMD64_RAW_EVENT_MASK_NB;
-
- if (bsearch(&key, filter->events, filter->nevents,
- sizeof(__u64), cmp_u64))
- allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
- else
- allow_event = filter->action == KVM_PMU_EVENT_DENY;
- }
- if (!allow_event)
+ if (!check_pmu_event_filter(pmc))
return;
if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
@@ -241,23 +261,14 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
{
unsigned en_field = ctrl & 0x3;
bool pmi = ctrl & 0x8;
- struct kvm_pmu_event_filter *filter;
- struct kvm *kvm = pmc->vcpu->kvm;
pmc_pause_counter(pmc);
if (!en_field || !pmc_is_enabled(pmc))
return;
- filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
- if (filter) {
- if (filter->action == KVM_PMU_EVENT_DENY &&
- test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
- return;
- if (filter->action == KVM_PMU_EVENT_ALLOW &&
- !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
- return;
- }
+ if (!check_pmu_event_filter(pmc))
+ return;
if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc))
return;
--
2.35.0
Powered by blists - more mailing lists