[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1455152873-30467-5-git-send-email-vikas.shivappa@linux.intel.com>
Date: Wed, 10 Feb 2016 17:07:52 -0800
From: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
To: vikas.shivappa@...el.com
Cc: x86@...nel.org, linux-kernel@...r.kernel.org, hpa@...or.com,
tglx@...utronix.de, vikas.shivappa@...ux.intel.com,
mingo@...nel.org, peterz@...radead.org, ravi.v.shankar@...el.com,
tony.luck@...el.com, fenghua.yu@...el.com,
kanaka.d.juvva@...el.com, h.peter.anvin@...el.com
Subject: [PATCH 4/5] x86/mbm: RMID Recycling MBM changes
RMID could be allocated or deallocated as part of RMID recycling.
When an RMID is allocated for mbm event, the mbm counter needs to be
initialized because next time we read the counter we need the previous
value to account for total bytes that went to the memory controller.
Similarly, when RMID is deallocated we need to update the ->count
variable.
Signed-off-by: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
---
arch/x86/kernel/cpu/perf_event_intel_cqm.c | 27 +++++++++++++++++++++++++--
1 file changed, 25 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index b1c9663..fea22c8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -454,6 +454,7 @@ struct rmid_read {
static void __intel_cqm_event_count(void *info);
static void init_mbm_sample(u32 rmid, enum mbm_evt_type evt_type);
+static void __intel_mbm_event_count(void *info);
static bool is_mbm_event(int e)
{
@@ -480,8 +481,14 @@ static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid)
.rmid = old_rmid,
};
- on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count,
- &rr, 1);
+ if (is_mbm_event(group->attr.config)) {
+ rr.evt_type = group->attr.config;
+ on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_count,
+ &rr, 1);
+ } else {
+ on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count,
+ &rr, 1);
+ }
local64_set(&group->count, atomic64_read(&rr.value));
}
@@ -493,6 +500,22 @@ static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid)
raw_spin_unlock_irq(&cache_lock);
+ /*
+ * If the allocation is for mbm, init the mbm stats.
+ * Need to check if each event in the group is mbm event
+ * because there could be multiple type of events in the same group.
+ */
+ if (__rmid_valid(rmid)) {
+ event = group;
+ if (is_mbm_event(event->attr.config))
+ init_mbm_sample(rmid, event->attr.config);
+
+ list_for_each_entry(event, head, hw.cqm_group_entry) {
+ if (is_mbm_event(event->attr.config))
+ init_mbm_sample(rmid, event->attr.config);
+ }
+ }
+
return old_rmid;
}
--
1.9.1
Powered by blists - more mailing lists