[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1498503368-20173-9-git-send-email-vikas.shivappa@linux.intel.com>
Date: Mon, 26 Jun 2017 11:55:55 -0700
From: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
To: x86@...nel.org, linux-kernel@...r.kernel.org, tglx@...utronix.de
Cc: hpa@...or.com, peterz@...radead.org, ravi.v.shankar@...el.com,
vikas.shivappa@...el.com, tony.luck@...el.com,
fenghua.yu@...el.com, andi.kleen@...el.com
Subject: [PATCH 08/21] x86/intel_rdt/cqm: Add RMID(Resource monitoring ID) management
Hardware uses RMID(Resource monitoring ID) to keep track of each of the
RDT events associated with tasks. The number of RMIDs is dependent on
the SKU and is enumerated via CPUID. We add support to manage the RMIDs
which include managing the RMID allocation and reading LLC occupancy
for an RMID.
RMID allocation is managed by keeping a free list which is initialized
to all available RMIDs except for RMID 0 which is always reserved for
root group. RMIDs goto a limbo list once they are
freed since the RMIDs are still tagged to cache lines of the tasks which
were using them - thereby still having some occupancy. They continue to
be in limbo list until the occupancy < threshold_occupancy. The
threshold_occupancy is a user configurable value.
OS uses IA32_QM_CTR MSR to read the occupancy associated with an RMID
after programming the IA32_EVENTSEL MSR with the RMID.
Signed-off-by: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
---
arch/x86/kernel/cpu/intel_rdt.h | 2 +
arch/x86/kernel/cpu/intel_rdt_monitor.c | 121 ++++++++++++++++++++++++++++++++
2 files changed, 123 insertions(+)
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
index 285f106..cf25b6c 100644
--- a/arch/x86/kernel/cpu/intel_rdt.h
+++ b/arch/x86/kernel/cpu/intel_rdt.h
@@ -20,6 +20,8 @@
#define QOS_L3_MBM_TOTAL_EVENT_ID 0x02
#define QOS_L3_MBM_LOCAL_EVENT_ID 0x03
+#define RMID_VAL_ERROR (1ULL << 63)
+#define RMID_VAL_UNAVAIL (1ULL << 62)
/**
* struct mon_evt - Entry in the event list of a resource
* @evtid: event id
diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/intel_rdt_monitor.c
index a418854..4f4221a 100644
--- a/arch/x86/kernel/cpu/intel_rdt_monitor.c
+++ b/arch/x86/kernel/cpu/intel_rdt_monitor.c
@@ -28,6 +28,9 @@
#include <asm/cpu_device_id.h>
#include "intel_rdt.h"
+#define MSR_IA32_QM_CTR 0x0c8e
+#define MSR_IA32_QM_EVTSEL 0x0c8d
+
enum rmid_recycle_state {
RMID_CHECK = 0,
RMID_DIRTY,
@@ -82,6 +85,124 @@ static inline struct rmid_entry *__rmid_entry(u32 rmid)
return entry;
}
+static u64 __rmid_read(u32 rmid, u32 eventid)
+{
+ u64 val;
+
+ wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
+ rdmsrl(MSR_IA32_QM_CTR, val);
+
+ /*
+ * Aside from the ERROR and UNAVAIL bits, the return value is the
+ * count for this @eventid tagged with @rmid.
+ */
+ return val;
+}
+
+/*
+ * Test whether an RMID is dirty(occupancy > threshold_occupancy)
+ */
+static void intel_cqm_stable(void *arg)
+{
+ struct rmid_entry *entry;
+ u64 val;
+
+ /*
+ * Since we are in the IPI already lets mark all the RMIDs
+ * that are dirty
+ */
+ list_for_each_entry(entry, &rmid_limbo_lru, list) {
+ val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
+ if (val > intel_cqm_threshold)
+ entry->state = RMID_DIRTY;
+ }
+}
+
+/*
+ * Scan the limbo list and move all entries that are below the
+ * intel_cqm_threshold to the free list.
+ * Return "true" if the limbo list is empty, "false" if there are
+ * still some RMIDs there.
+ */
+static bool try_freeing_limbo_rmid(void)
+{
+ struct rmid_entry *entry, *tmp;
+ struct rdt_resource *r;
+ cpumask_var_t cpu_mask;
+ struct rdt_domain *d;
+ bool ret = true;
+
+ if (list_empty(&rmid_limbo_lru))
+ return ret;
+
+ if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
+ return false;
+
+ r = &rdt_resources_all[RDT_RESOURCE_L3];
+
+ list_for_each_entry(d, &r->domains, list)
+ cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
+
+ /*
+ * Test whether an RMID is free for each package.
+ */
+ on_each_cpu_mask(cpu_mask, intel_cqm_stable, NULL, true);
+
+ list_for_each_entry_safe(entry, tmp, &rmid_limbo_lru, list) {
+ /*
+ * Ignore the RMIDs that are marked dirty and reset the
+ * state to check for being dirty again later.
+ */
+ if (entry->state == RMID_DIRTY) {
+ entry->state = RMID_CHECK;
+ ret = false;
+ continue;
+ }
+ list_del(&entry->list);
+ list_add_tail(&entry->list, &rmid_free_lru);
+ }
+
+ free_cpumask_var(cpu_mask);
+ return ret;
+}
+
+int alloc_rmid(void)
+{
+ struct rmid_entry *entry;
+ bool ret;
+
+ lockdep_assert_held(&rdtgroup_mutex);
+
+ if (list_empty(&rmid_free_lru)) {
+ ret = try_freeing_limbo_rmid();
+ if (list_empty(&rmid_free_lru))
+ return ret ? -ENOSPC : -EBUSY;
+ }
+
+ entry = list_first_entry(&rmid_free_lru,
+ struct rmid_entry, list);
+ list_del(&entry->list);
+
+ return entry->rmid;
+}
+
+void free_rmid(u32 rmid)
+{
+ struct rmid_entry *entry;
+
+ lockdep_assert_held(&rdtgroup_mutex);
+
+ WARN_ON(!rmid);
+ entry = __rmid_entry(rmid);
+
+ entry->state = RMID_CHECK;
+
+ if (rdt_mon_features & (1 << QOS_L3_OCCUP_EVENT_ID))
+ list_add_tail(&entry->list, &rmid_limbo_lru);
+ else
+ list_add_tail(&entry->list, &rmid_free_lru);
+}
+
static int dom_data_init(struct rdt_resource *r)
{
struct rmid_entry *entry = NULL;
--
1.9.1
Powered by blists - more mailing lists