lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220221073140.10618-2-ravi.bangoria@amd.com>
Date:   Mon, 21 Feb 2022 13:01:38 +0530
From:   Ravi Bangoria <ravi.bangoria@....com>
To:     <pbonzini@...hat.com>
CC:     <ravi.bangoria@....com>, <seanjc@...gle.com>,
        <jmattson@...gle.com>, <dave.hansen@...ux.intel.com>,
        <peterz@...radead.org>, <alexander.shishkin@...ux.intel.com>,
        <eranian@...gle.com>, <daviddunn@...gle.com>, <ak@...ux.intel.com>,
        <kan.liang@...ux.intel.com>, <like.xu.linux@...il.com>,
        <x86@...nel.org>, <kvm@...r.kernel.org>,
        <linux-kernel@...r.kernel.org>, <kim.phillips@....com>,
        <santosh.shukla@....com>
Subject: [PATCH 1/3] x86/pmu: Add INTEL_ prefix in some Intel specific macros

Replace:
  s/HSW_IN_TX/INTEL_HSW_IN_TX/
  s/HSW_IN_TX_CHECKPOINTED/INTEL_HSW_IN_TX_CHECKPOINTED/
  s/ICL_EVENTSEL_ADAPTIVE/INTEL_ICL_EVENTSEL_ADAPTIVE/
  s/ICL_FIXED_0_ADAPTIVE/INTEL_ICL_FIXED_0_ADAPTIVE/

No functionality changes.

Signed-off-by: Ravi Bangoria <ravi.bangoria@....com>
---
 arch/x86/events/intel/core.c      | 12 ++++++------
 arch/x86/events/intel/ds.c        |  2 +-
 arch/x86/events/perf_event.h      |  2 +-
 arch/x86/include/asm/perf_event.h | 12 ++++++------
 arch/x86/kvm/pmu.c                | 14 +++++++-------
 arch/x86/kvm/vmx/pmu_intel.c      |  2 +-
 6 files changed, 22 insertions(+), 22 deletions(-)

diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index a3c7ca876aeb..9a72fd8ddab9 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2359,7 +2359,7 @@ static inline void intel_pmu_ack_status(u64 ack)
 
 static inline bool event_is_checkpointed(struct perf_event *event)
 {
-	return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
+	return unlikely(event->hw.config & INTEL_HSW_IN_TX_CHECKPOINTED) != 0;
 }
 
 static inline void intel_set_masks(struct perf_event *event, int idx)
@@ -2717,8 +2717,8 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
 	mask = 0xfULL << (idx * 4);
 
 	if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
-		bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
-		mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
+		bits |= INTEL_ICL_FIXED_0_ADAPTIVE << (idx * 4);
+		mask |= INTEL_ICL_FIXED_0_ADAPTIVE << (idx * 4);
 	}
 
 	rdmsrl(hwc->config_base, ctrl_val);
@@ -4000,14 +4000,14 @@ static int hsw_hw_config(struct perf_event *event)
 		return ret;
 	if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
 		return 0;
-	event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
+	event->hw.config |= event->attr.config & (INTEL_HSW_IN_TX|INTEL_HSW_IN_TX_CHECKPOINTED);
 
 	/*
 	 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
 	 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
 	 * this combination.
 	 */
-	if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
+	if ((event->hw.config & (INTEL_HSW_IN_TX|INTEL_HSW_IN_TX_CHECKPOINTED)) &&
 	     ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
 	      event->attr.precise_ip > 0))
 		return -EOPNOTSUPP;
@@ -4050,7 +4050,7 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
 	c = intel_get_event_constraints(cpuc, idx, event);
 
 	/* Handle special quirk on in_tx_checkpointed only in counter 2 */
-	if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
+	if (event->hw.config & INTEL_HSW_IN_TX_CHECKPOINTED) {
 		if (c->idxmsk64 & (1U << 2))
 			return &counter2_constraint;
 		return &emptyconstraint;
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 2e215369df4a..9f1c419f401d 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1225,7 +1225,7 @@ void intel_pmu_pebs_enable(struct perf_event *event)
 		cpuc->pebs_enabled |= 1ULL << 63;
 
 	if (x86_pmu.intel_cap.pebs_baseline) {
-		hwc->config |= ICL_EVENTSEL_ADAPTIVE;
+		hwc->config |= INTEL_ICL_EVENTSEL_ADAPTIVE;
 		if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
 			wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
 			cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 150261d929b9..e789b390d90c 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -410,7 +410,7 @@ struct cpu_hw_events {
  *  The other filters are supported by fixed counters.
  *  The any-thread option is supported starting with v3.
  */
-#define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
+#define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|INTEL_HSW_IN_TX|INTEL_HSW_IN_TX_CHECKPOINTED)
 #define FIXED_EVENT_CONSTRAINT(c, n)	\
 	EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
 
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 8fc1b5003713..002e67661330 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -30,10 +30,10 @@
 #define ARCH_PERFMON_EVENTSEL_INV			(1ULL << 23)
 #define ARCH_PERFMON_EVENTSEL_CMASK			0xFF000000ULL
 
-#define HSW_IN_TX					(1ULL << 32)
-#define HSW_IN_TX_CHECKPOINTED				(1ULL << 33)
-#define ICL_EVENTSEL_ADAPTIVE				(1ULL << 34)
-#define ICL_FIXED_0_ADAPTIVE				(1ULL << 32)
+#define INTEL_HSW_IN_TX					(1ULL << 32)
+#define INTEL_HSW_IN_TX_CHECKPOINTED			(1ULL << 33)
+#define INTEL_ICL_EVENTSEL_ADAPTIVE			(1ULL << 34)
+#define INTEL_ICL_FIXED_0_ADAPTIVE			(1ULL << 32)
 
 #define AMD64_EVENTSEL_INT_CORE_ENABLE			(1ULL << 36)
 #define AMD64_EVENTSEL_GUESTONLY			(1ULL << 40)
@@ -79,8 +79,8 @@
 	 ARCH_PERFMON_EVENTSEL_CMASK | 		\
 	 ARCH_PERFMON_EVENTSEL_ANY | 		\
 	 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | 	\
-	 HSW_IN_TX | 				\
-	 HSW_IN_TX_CHECKPOINTED)
+	 INTEL_HSW_IN_TX |			\
+	 INTEL_HSW_IN_TX_CHECKPOINTED)
 #define AMD64_RAW_EVENT_MASK		\
 	(X86_RAW_EVENT_MASK          |  \
 	 AMD64_EVENTSEL_EVENT)
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index b1a02993782b..4a70380f2287 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -117,15 +117,15 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
 	attr.sample_period = get_sample_period(pmc, pmc->counter);
 
 	if (in_tx)
-		attr.config |= HSW_IN_TX;
+		attr.config |= INTEL_HSW_IN_TX;
 	if (in_tx_cp) {
 		/*
-		 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
+		 * INTEL_HSW_IN_TX_CHECKPOINTED is not supported with nonzero
 		 * period. Just clear the sample period so at least
 		 * allocating the counter doesn't fail.
 		 */
 		attr.sample_period = 0;
-		attr.config |= HSW_IN_TX_CHECKPOINTED;
+		attr.config |= INTEL_HSW_IN_TX_CHECKPOINTED;
 	}
 
 	event = perf_event_create_kernel_counter(&attr, -1, current,
@@ -213,8 +213,8 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
 	if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
 			  ARCH_PERFMON_EVENTSEL_INV |
 			  ARCH_PERFMON_EVENTSEL_CMASK |
-			  HSW_IN_TX |
-			  HSW_IN_TX_CHECKPOINTED))) {
+			  INTEL_HSW_IN_TX |
+			  INTEL_HSW_IN_TX_CHECKPOINTED))) {
 		config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
 		if (config != PERF_COUNT_HW_MAX)
 			type = PERF_TYPE_HARDWARE;
@@ -233,8 +233,8 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
 			      !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
 			      !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
 			      eventsel & ARCH_PERFMON_EVENTSEL_INT,
-			      (eventsel & HSW_IN_TX),
-			      (eventsel & HSW_IN_TX_CHECKPOINTED));
+			      (eventsel & INTEL_HSW_IN_TX),
+			      (eventsel & INTEL_HSW_IN_TX_CHECKPOINTED));
 }
 EXPORT_SYMBOL_GPL(reprogram_gp_counter);
 
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 466d18fc0c5d..7c64792a9506 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -534,7 +534,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 	if (entry &&
 	    (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
 	    (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
-		pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
+		pmu->reserved_bits ^= INTEL_HSW_IN_TX|INTEL_HSW_IN_TX_CHECKPOINTED;
 
 	bitmap_set(pmu->all_valid_pmc_idx,
 		0, pmu->nr_arch_gp_counters);
-- 
2.27.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ