[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1481929988-31569-11-git-send-email-vikas.shivappa@linux.intel.com>
Date: Fri, 16 Dec 2016 15:13:04 -0800
From: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
To: vikas.shivappa@...el.com, vikas.shivappa@...ux.intel.com
Cc: linux-kernel@...r.kernel.org, x86@...nel.org, tglx@...utronix.de,
peterz@...radead.org, ravi.v.shankar@...el.com,
tony.luck@...el.com, fenghua.yu@...el.com, andi.kleen@...el.com,
davidcc@...gle.com, eranian@...gle.com, hpa@...or.com
Subject: [PATCH 10/14] x86/cqm: Add RMID reuse
When an RMID is freed by an event it cannot be reused immediately as the
RMID may still have some cache occupancy. Hence when an RMID is freed it
goes into limbo list and not free list. This patch provides support to
periodically check the occupancy values of such RMIDs and move them to
the free list once its occupancy < threshold_occupancy value. The
threshold occupancy value can be modified by user based on his
requirements.
Tests: Before the patch, task monitoring would just throw error once
RMIDs are used in the lifetime of systemboot.
After this patch, we would be able to reuse the RMIDs that are freed.
Signed-off-by: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
---
arch/x86/events/intel/cqm.c | 107 +++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 106 insertions(+), 1 deletion(-)
diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 73f566a..85162aa 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -173,6 +173,13 @@ u32 __get_rmid(int domain)
return entry->rmid;
}
+static void cqm_schedule_rmidwork(int domain);
+
+static inline bool is_first_cqmwork(int domain)
+{
+ return (!atomic_cmpxchg(&cqm_pkgs_data[domain]->reuse_scheduled, 0, 1));
+}
+
static void __put_rmid(u32 rmid, int domain)
{
struct cqm_rmid_entry *entry;
@@ -293,6 +300,93 @@ static void cqm_mask_call(struct rmid_read *rr)
static unsigned int __intel_cqm_threshold;
static unsigned int __intel_cqm_max_threshold;
+/*
+ * Test whether an RMID has a zero occupancy value on this cpu.
+ */
+static void intel_cqm_stable(void)
+{
+ struct cqm_rmid_entry *entry;
+ struct list_head *llist;
+
+ llist = &cqm_pkgs_data[pkg_id]->cqm_rmid_limbo_lru;
+ list_for_each_entry(entry, llist, list) {
+
+ if (__rmid_read(entry->rmid) < __intel_cqm_threshold)
+ entry->state = RMID_AVAILABLE;
+ }
+}
+
+static void __intel_cqm_rmid_reuse(void)
+{
+ struct cqm_rmid_entry *entry, *tmp;
+ struct list_head *llist, *flist;
+ struct pkg_data *pdata;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&cache_lock, flags);
+ pdata = cqm_pkgs_data[pkg_id];
+ llist = &pdata->cqm_rmid_limbo_lru;
+ flist = &pdata->cqm_rmid_free_lru;
+
+ if (list_empty(llist))
+ goto end;
+ /*
+ * Test whether an RMID is free
+ */
+ intel_cqm_stable();
+
+ list_for_each_entry_safe(entry, tmp, llist, list) {
+
+ if (entry->state == RMID_DIRTY)
+ continue;
+ /*
+ * Otherwise remove from limbo and place it onto the free list.
+ */
+ list_del(&entry->list);
+ list_add_tail(&entry->list, flist);
+ }
+
+end:
+ raw_spin_unlock_irqrestore(&cache_lock, flags);
+}
+
+static bool reschedule_cqm_work(void)
+{
+ unsigned long flags;
+ bool nwork = false;
+
+ raw_spin_lock_irqsave(&cache_lock, flags);
+
+ if (!list_empty(&cqm_pkgs_data[pkg_id]->cqm_rmid_limbo_lru))
+ nwork = true;
+ else
+ atomic_set(&cqm_pkgs_data[pkg_id]->reuse_scheduled, 0U);
+
+ raw_spin_unlock_irqrestore(&cache_lock, flags);
+
+ return nwork;
+}
+
+static void cqm_schedule_rmidwork(int domain)
+{
+ struct delayed_work *dwork;
+ unsigned long delay;
+
+ dwork = &cqm_pkgs_data[domain]->intel_cqm_rmid_work;
+ delay = msecs_to_jiffies(RMID_DEFAULT_QUEUE_TIME);
+
+ schedule_delayed_work_on(cqm_pkgs_data[domain]->rmid_work_cpu,
+ dwork, delay);
+}
+
+static void intel_cqm_rmid_reuse(struct work_struct *work)
+{
+ __intel_cqm_rmid_reuse();
+
+ if (reschedule_cqm_work())
+ cqm_schedule_rmidwork(pkg_id);
+}
+
static struct pmu intel_cqm_pmu;
static u64 update_sample(unsigned int rmid, u32 evt_type, int first)
@@ -540,7 +634,7 @@ static int intel_cqm_setup_event(struct perf_event *event,
}
#ifdef CONFIG_CGROUP_PERF
/*
- * For continously monitored cgroups, *rmid is allocated already.
+ * For continously monitored cgroups, rmid is allocated already.
*/
if (event->cgrp) {
cqm_info = cgrp_to_cqm_info(event->cgrp);
@@ -882,6 +976,7 @@ static void intel_cqm_event_terminate(struct perf_event *event)
{
struct perf_event *group_other = NULL;
unsigned long flags;
+ int d;
mutex_lock(&cache_mutex);
/*
@@ -924,6 +1019,13 @@ static void intel_cqm_event_terminate(struct perf_event *event)
mbm_stop_timers();
mutex_unlock(&cache_mutex);
+
+ for (d = 0; d < cqm_socket_max; d++) {
+
+ if (cqm_pkgs_data[d] != NULL && is_first_cqmwork(d)) {
+ cqm_schedule_rmidwork(d);
+ }
+ }
}
static int intel_cqm_event_init(struct perf_event *event)
@@ -1430,6 +1532,9 @@ static int pkg_data_init_cpu(int cpu)
mutex_init(&pkg_data->pkg_data_mutex);
raw_spin_lock_init(&pkg_data->pkg_data_lock);
+ INIT_DEFERRABLE_WORK(
+ &pkg_data->intel_cqm_rmid_work, intel_cqm_rmid_reuse);
+
pkg_data->rmid_work_cpu = cpu;
nr_rmids = cqm_max_rmid + 1;
--
1.9.1
Powered by blists - more mailing lists