lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1584628430-23220-5-git-send-email-luwei.kang@intel.com>
Date:   Thu, 19 Mar 2020 22:33:49 +0800
From:   Luwei Kang <luwei.kang@...el.com>
To:     x86@...nel.org, linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc:     peterz@...radead.org, mingo@...hat.com, acme@...nel.org,
        mark.rutland@....com, alexander.shishkin@...ux.intel.com,
        jolsa@...hat.com, namhyung@...nel.org, tglx@...utronix.de,
        bp@...en8.de, hpa@...or.com, pbonzini@...hat.com,
        sean.j.christopherson@...el.com, vkuznets@...hat.com,
        wanpengli@...cent.com, jmattson@...gle.com, joro@...tes.org,
        pawan.kumar.gupta@...ux.intel.com, ak@...ux.intel.com,
        thomas.lendacky@....com, fenghua.yu@...el.com,
        kan.liang@...ux.intel.com, Luwei Kang <luwei.kang@...el.com>
Subject: [PATCH v2 4/5] KVM: x86/pmu: Add counter reload register to MSR list

The guest counter reload registers need to be loaded to real
HW before VM-entry. This patch add the counter reload registers to
MSR-load list when the corresponding counter is enabled, and remove
them when the counter is disabled.

Following the description in SDM, there are 3 fixed counters per
core and 4 general-purpose counters per core in Tremont
Microarchitecture. This patch extended the value of NR_LOADSTORE_MSRS
from 8 to 16 because there are 7 counter reload registers need to be
added into the MSR-load list when all the counters are enabled.

Signed-off-by: Luwei Kang <luwei.kang@...el.com>
---
 arch/x86/kvm/vmx/pmu_intel.c | 41 ++++++++++++++++++++++++++++++++++++++++-
 arch/x86/kvm/vmx/vmx.h       |  2 +-
 2 files changed, 41 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index a8b0a8d..75e1d2c 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -68,12 +68,42 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
 		reprogram_counter(pmu, bit);
 }
 
+static void intel_pmu_set_reload_counter(struct kvm_vcpu *vcpu, u64 data,
+								bool add)
+{
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+	struct kvm_pmc *pmc;
+	unsigned long bit;
+	u64 set, host_reload_ctr;
+	u32 msr;
+
+	set = data & ~pmu->global_ctrl_mask;
+
+	for_each_set_bit(bit, (unsigned long *)&set, X86_PMC_IDX_MAX) {
+		if (bit < INTEL_PMC_IDX_FIXED) {
+			msr = MSR_RELOAD_PMC0 + bit;
+			pmc = &pmu->gp_counters[bit];
+		} else {
+			msr = MSR_RELOAD_PMC0 + bit - INTEL_PMC_IDX_FIXED;
+			pmc = &pmu->gp_counters[bit - INTEL_PMC_IDX_FIXED];
+		}
+
+		rdmsrl_safe(msr, &host_reload_ctr);
+		if (add)
+			add_atomic_switch_msr(vmx, msr,
+				pmc->reload_cnt, host_reload_ctr, false);
+		else
+			clear_atomic_switch_msr(vmx, msr);
+	}
+}
+
 static void pebs_enable_changed(struct kvm_pmu *pmu, u64 data)
 {
 	struct vcpu_vmx *vmx = to_vmx(pmu_to_vcpu(pmu));
 	u64 host_ds_area, host_pebs_data_cfg;
 
-	if (data) {
+	if (data && ((data & PEBS_OUTPUT_MASK) == 0)) {
 		rdmsrl_safe(MSR_IA32_DS_AREA, &host_ds_area);
 		add_atomic_switch_msr(vmx, MSR_IA32_DS_AREA,
 			pmu->ds_area, host_ds_area, false);
@@ -81,10 +111,19 @@ static void pebs_enable_changed(struct kvm_pmu *pmu, u64 data)
 		rdmsrl_safe(MSR_PEBS_DATA_CFG, &host_pebs_data_cfg);
 		add_atomic_switch_msr(vmx, MSR_PEBS_DATA_CFG,
 			pmu->pebs_data_cfg, host_pebs_data_cfg, false);
+	} else if (data && ((data & PEBS_OUTPUT_MASK) == PEBS_OUTPUT_PT)) {
+		intel_pmu_set_reload_counter(pmu_to_vcpu(pmu), data, true);
 
+		rdmsrl_safe(MSR_PEBS_DATA_CFG, &host_pebs_data_cfg);
+		add_atomic_switch_msr(vmx, MSR_PEBS_DATA_CFG,
+			pmu->pebs_data_cfg, host_pebs_data_cfg, false);
 	} else {
 		clear_atomic_switch_msr(vmx, MSR_IA32_DS_AREA);
 		clear_atomic_switch_msr(vmx, MSR_PEBS_DATA_CFG);
+
+		if (pmu->has_pebs_via_pt)
+			intel_pmu_set_reload_counter(pmu_to_vcpu(pmu),
+							data, false);
 	}
 
 	pmu->pebs_enable = data;
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index ea899e7..f185144 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -28,7 +28,7 @@
 #define NR_SHARED_MSRS	4
 #endif
 
-#define NR_LOADSTORE_MSRS 8
+#define NR_LOADSTORE_MSRS 16
 
 struct vmx_msrs {
 	unsigned int		nr;
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ