[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1461776456-25246-4-git-send-email-vikas.shivappa@linux.intel.com>
Date: Wed, 27 Apr 2016 10:00:56 -0700
From: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
To: vikas.shivappa@...el.com, x86@...nel.org,
linux-kernel@...r.kernel.org, hpa@...or.com, tglx@...utronix.de
Cc: mingo@...nel.org, peterz@...radead.org, ravi.v.shankar@...el.com,
tony.luck@...el.com, fenghua.yu@...el.com,
vikas.shivappa@...ux.intel.com
Subject: [PATCH 3/3] perf/x86/mbm: Fix mbm counting when RMIDs are reused
When multiple instances of perf reuse RMID for the same PID, then we
need to start counting from zero for each new event, rather than
reporting the current RMID count. This patch adds a st_count(start
count) 'per event' to track the same.
Note that this is different from the 'per rmid' start count. For the
first time an RMID is used we store the start_count for the 'per RMID'.
The 'per rmid' start_count is in the RMID data structure where as the
start count 'per event' is in the perf_event.
u64 read_sample(rmid) // for each rmid
{
start: // first call for the rmid
'per rmid' prev = read_hw_counter();
count:
cur_count = read_hw_counter();
delta = cur_count - prev;
prev = cur_count;
total_bytes += delta;
return total_bytes;
}
for each event -
start:
if rmid is reused
'per event' prev = read_sample(rmid);
else
prev = 0;
count:
tmp = read_sample(rmid);
count = tmp - prev;
For ex:
1.event1 gets RMID1. We read the hw_counter and set that as the RMID1's
start_count.
2.RMID1's total_bytes(this is current hw_count - start_count of the
RMID) is 100MB for event1(PID1)
3.another perf instance starts measuring the same PID1 with event2. We
reuse RMID1 as the PID1 is already counted.
4.event2 stores st_count as 100MB.
5.After some time, when user wants to count event2 and say RMID1's
current total_bytes 110MB, we report 110MB - 100MB = 10MB
Signed-off-by: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
---
arch/x86/events/intel/cqm.c | 71 ++++++++++++++++++++++++++++++++++++++++++---
include/linux/perf_event.h | 1 +
2 files changed, 68 insertions(+), 4 deletions(-)
diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 320af26..8129959 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -484,8 +484,18 @@ mbm_set_rccount(struct perf_event *event, struct rmid_read *rr)
{
u64 tmpval;
- tmpval = local64_read(&event->hw.rc_count) + atomic64_read(&rr->value);
+ tmpval = local64_read(&event->hw.rc_count) + atomic64_read(&rr->value) -
+ local64_read(&event->hw.st_count);
+
local64_set(&event->hw.rc_count, tmpval);
+
+ /*
+ * The st_count(start count) is meant to store the starting bytes
+ * for an event which is reusing an RMID which already
+ * had bytes measured.Once we start using the rc_count
+ * to keep the history bytes, reset the start bytes.
+ */
+ local64_set(&event->hw.st_count, 0UL);
local64_set(&event->count, tmpval);
}
@@ -1025,6 +1035,58 @@ static void init_mbm_sample(u32 rmid, u32 evt_type)
on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_init, &rr, 1);
}
+static inline bool first_event_ingroup(struct perf_event *group,
+ struct perf_event *event)
+{
+ struct list_head *head = &group->hw.cqm_group_entry;
+ u32 evt_type = event->attr.config;
+
+ if (evt_type == group->attr.config)
+ return false;
+ list_for_each_entry(event, head, hw.cqm_group_entry) {
+ if (evt_type == event->attr.config)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * mbm_setup_event - Does mbm specific count initialization
+ * when multiple events share RMID.
+ *
+ * If this is the first mbm event using the RMID, then initialize
+ * the total_bytes in the RMID and prev_count.
+ * else only initialize the start count of the event which is the current
+ * count of the RMID.
+ * In other words if the RMID has say counted 100MB till now because
+ * other event was already using it, we start
+ * from zero for our new event. Because after 1s if user checks the count,
+ * we need to report for the 1s duration and not the entire duration the
+ * RMID was being counted.
+*/
+static inline void mbm_setup_event(u32 rmid, struct perf_event *group,
+ struct perf_event *event)
+{
+ u32 evt_type = event->attr.config;
+ struct rmid_read rr;
+
+ if (first_event_ingroup(group, event)) {
+ init_mbm_sample(rmid, evt_type);
+ } else {
+ rr = __init_rr(rmid, evt_type, 0);
+ cqm_mask_call(&rr);
+ local64_set(&event->hw.st_count, atomic64_read(&rr.value));
+ }
+}
+
+static inline void mbm_setup_event_init(struct perf_event *event)
+{
+ event->hw.is_group_event = false;
+ local64_set(&event->hw.rc_count, 0UL);
+ local64_set(&event->hw.st_count, 0UL);
+}
+
/*
* Find a group and setup RMID.
*
@@ -1037,7 +1099,7 @@ static void intel_cqm_setup_event(struct perf_event *event,
bool conflict = false;
u32 rmid;
- event->hw.is_group_event = false;
+ mbm_setup_event_init(event);
list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) {
rmid = iter->hw.cqm_rmid;
@@ -1046,7 +1108,7 @@ static void intel_cqm_setup_event(struct perf_event *event,
event->hw.cqm_rmid = rmid;
*group = iter;
if (is_mbm_event(event->attr.config) && __rmid_valid(rmid))
- init_mbm_sample(rmid, event->attr.config);
+ mbm_setup_event(rmid, iter, event);
return;
}
@@ -1278,7 +1340,8 @@ static u64 intel_cqm_event_count(struct perf_event *event)
if (event->hw.cqm_rmid == rr.rmid) {
if (is_mbm_event(event->attr.config)) {
tmpval = atomic64_read(&rr.value) +
- local64_read(&event->hw.rc_count);
+ local64_read(&event->hw.rc_count) -
+ local64_read(&event->hw.st_count);
local64_set(&event->count, tmpval);
} else {
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index ec7772a..44a7f0c 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -123,6 +123,7 @@ struct hw_perf_event {
u32 cqm_rmid;
int is_group_event;
local64_t rc_count;
+ local64_t st_count;
struct list_head cqm_events_entry;
struct list_head cqm_groups_entry;
struct list_head cqm_group_entry;
--
1.9.1
Powered by blists - more mailing lists