lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 20 Sep 2019 17:25:05 -0400
From:   Andrea Arcangeli <aarcange@...hat.com>
To:     Paolo Bonzini <pbonzini@...hat.com>
Cc:     Vitaly Kuznetsov <vkuznets@...hat.com>,
        "Dr. David Alan Gilbert" <dgilbert@...hat.com>,
        Marcelo Tosatti <mtosatti@...hat.com>,
        Peter Xu <peterx@...hat.com>, kvm@...r.kernel.org,
        linux-kernel@...r.kernel.org
Subject: [PATCH 13/17] KVM: monolithic: x86: drop the kvm_pmu_ops structure

Cleanup after this was finally left fully unused.

Signed-off-by: Andrea Arcangeli <aarcange@...hat.com>
---
 arch/x86/include/asm/kvm_host.h |  3 ---
 arch/x86/kvm/pmu.h              | 19 -------------------
 arch/x86/kvm/pmu_amd.c          | 15 ---------------
 arch/x86/kvm/svm.c              |  1 -
 arch/x86/kvm/vmx/pmu_intel.c    | 15 ---------------
 arch/x86/kvm/vmx/vmx.c          |  2 --
 6 files changed, 55 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index bd5f4f900288..000d4e5a5664 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1160,9 +1160,6 @@ struct kvm_x86_ops {
 					   gfn_t offset, unsigned long mask);
 	int (*write_log_dirty)(struct kvm_vcpu *vcpu);
 
-	/* pmu operations of sub-arch */
-	const struct kvm_pmu_ops *pmu_ops;
-
 	/*
 	 * Architecture specific hooks for vCPU blocking due to
 	 * HLT instruction.
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 09e80e8ee21a..513366c8b794 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -21,23 +21,6 @@ struct kvm_event_hw_type_mapping {
 
 #include "pmu_ops.h"
 
-struct kvm_pmu_ops {
-	unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
-				    u8 unit_mask);
-	unsigned (*find_fixed_event)(int idx);
-	bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
-	struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
-	struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx,
-					  u64 *mask);
-	int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
-	bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
-	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
-	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
-	void (*refresh)(struct kvm_vcpu *vcpu);
-	void (*init)(struct kvm_vcpu *vcpu);
-	void (*reset)(struct kvm_vcpu *vcpu);
-};
-
 static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
 {
 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -124,6 +107,4 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
 
 bool is_vmware_backdoor_pmc(u32 pmc_idx);
 
-extern struct kvm_pmu_ops intel_pmu_ops;
-extern struct kvm_pmu_ops amd_pmu_ops;
 #endif /* __KVM_X86_PMU_H */
diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
index 12d1fa3ba35a..0e01b48b3b0b 100644
--- a/arch/x86/kvm/pmu_amd.c
+++ b/arch/x86/kvm/pmu_amd.c
@@ -302,18 +302,3 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
 }
 
 #include "pmu_amd_ops.c"
-
-struct kvm_pmu_ops amd_pmu_ops = {
-	.find_arch_event = amd_find_arch_event,
-	.find_fixed_event = amd_find_fixed_event,
-	.pmc_is_enabled = amd_pmc_is_enabled,
-	.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
-	.msr_idx_to_pmc = amd_msr_idx_to_pmc,
-	.is_valid_msr_idx = amd_is_valid_msr_idx,
-	.is_valid_msr = amd_is_valid_msr,
-	.get_msr = amd_pmu_get_msr,
-	.set_msr = amd_pmu_set_msr,
-	.refresh = amd_pmu_refresh,
-	.init = amd_pmu_init,
-	.reset = amd_pmu_reset,
-};
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 5a48beb58083..ca9fd1762519 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -7298,7 +7298,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 
 	.sched_in = svm_sched_in,
 
-	.pmu_ops = &amd_pmu_ops,
 	.deliver_posted_interrupt = svm_deliver_avic_intr,
 	.dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
 	.update_pi_irte = svm_update_pi_irte,
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index bfa765842772..e7c1253f47a2 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -359,18 +359,3 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu)
 }
 
 #include "pmu_intel_ops.c"
-
-struct kvm_pmu_ops intel_pmu_ops = {
-	.find_arch_event = intel_find_arch_event,
-	.find_fixed_event = intel_find_fixed_event,
-	.pmc_is_enabled = intel_pmc_is_enabled,
-	.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
-	.msr_idx_to_pmc = intel_msr_idx_to_pmc,
-	.is_valid_msr_idx = intel_is_valid_msr_idx,
-	.is_valid_msr = intel_is_valid_msr,
-	.get_msr = intel_pmu_get_msr,
-	.set_msr = intel_pmu_set_msr,
-	.refresh = intel_pmu_refresh,
-	.init = intel_pmu_init,
-	.reset = intel_pmu_reset,
-};
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 09e6a477e06f..ff46008dc514 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7779,8 +7779,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 	.pre_block = vmx_pre_block,
 	.post_block = vmx_post_block,
 
-	.pmu_ops = &intel_pmu_ops,
-
 	.update_pi_irte = vmx_update_pi_irte,
 
 #ifdef CONFIG_X86_64

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ