[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1477787923-61185-14-git-send-email-davidcc@google.com>
Date: Sat, 29 Oct 2016 17:38:10 -0700
From: David Carrillo-Cisneros <davidcc@...gle.com>
To: linux-kernel@...r.kernel.org
Cc: "x86@...nel.org" <x86@...nel.org>, Ingo Molnar <mingo@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>,
Andi Kleen <ak@...ux.intel.com>,
Kan Liang <kan.liang@...el.com>,
Peter Zijlstra <peterz@...radead.org>,
Vegard Nossum <vegard.nossum@...il.com>,
Marcelo Tosatti <mtosatti@...hat.com>,
Nilay Vaish <nilayvaish@...il.com>,
Borislav Petkov <bp@...e.de>,
Vikas Shivappa <vikas.shivappa@...ux.intel.com>,
Ravi V Shankar <ravi.v.shankar@...el.com>,
Fenghua Yu <fenghua.yu@...el.com>,
Paul Turner <pjt@...gle.com>,
Stephane Eranian <eranian@...gle.com>,
David Carrillo-Cisneros <davidcc@...gle.com>
Subject: [PATCH v3 13/46] perf/x86/intel/cmt: add pmonr's Off and Unused states
A pmonr uses a state machine to keep track of its rmids and their
hierarchical dependency with other pmonrs in the same package.
This patch introduces the first two states in such state machine.
It also adds pmonr_rmids: a word-size container to atomically access
a pmonr's sched and read rmids.
More details in code's comments.
Signed-off-by: David Carrillo-Cisneros <davidcc@...gle.com>
---
arch/x86/events/intel/cmt.c | 57 +++++++++++++++++++++++++++++++++++++++++++--
arch/x86/events/intel/cmt.h | 53 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 108 insertions(+), 2 deletions(-)
diff --git a/arch/x86/events/intel/cmt.c b/arch/x86/events/intel/cmt.c
index 5799816..fb6877f 100644
--- a/arch/x86/events/intel/cmt.c
+++ b/arch/x86/events/intel/cmt.c
@@ -10,6 +10,8 @@
#define QOS_L3_OCCUP_EVENT_ID BIT_ULL(0)
#define QOS_EVENT_MASK QOS_L3_OCCUP_EVENT_ID
+#define INVALID_RMID -1
+
/* Increase as needed as Intel CPUs grow. */
#define CMT_MAX_NR_PKGS 8
@@ -121,6 +123,16 @@ static inline struct pmonr *pkgd_pmonr(struct pkg_data *pkgd, struct monr *monr)
return rcu_dereference_check(monr->pmonrs[pkgd->pkgid], safe);
}
+static inline void pmonr_set_rmids(struct pmonr *pmonr,
+ u32 sched_rmid, u32 read_rmid)
+{
+ union pmonr_rmids rmids;
+
+ rmids.sched_rmid = sched_rmid;
+ rmids.read_rmid = read_rmid;
+ atomic64_set(&pmonr->atomic_rmids, rmids.value);
+}
+
static struct pmonr *pmonr_alloc(struct pkg_data *pkgd)
{
struct pmonr *pmonr;
@@ -130,6 +142,7 @@ static struct pmonr *pmonr_alloc(struct pkg_data *pkgd)
if (!pmonr)
return ERR_PTR(-ENOMEM);
+ pmonr_set_rmids(pmonr, INVALID_RMID, INVALID_RMID);
pmonr->pkgd = pkgd;
return pmonr;
@@ -140,6 +153,29 @@ static inline bool monr_is_root(struct monr *monr)
return monr_hrchy_root == monr;
}
+/* pkg_data lock is not required for transition from Off state. */
+static void pmonr_to_unused(struct pmonr *pmonr)
+{
+ /*
+ * Do not warn on re-entering Unused state to simplify cleanup
+ * of initialized pmonrs that were not scheduled.
+ */
+ if (pmonr->state == PMONR_UNUSED)
+ return;
+
+ if (pmonr->state == PMONR_OFF) {
+ pmonr->state = PMONR_UNUSED;
+ pmonr_set_rmids(pmonr, INVALID_RMID, 0);
+ return;
+ }
+}
+
+static void pmonr_unused_to_off(struct pmonr *pmonr)
+{
+ pmonr->state = PMONR_OFF;
+ pmonr_set_rmids(pmonr, INVALID_RMID, 0);
+}
+
static void monr_dealloc(struct monr *monr)
{
u16 p, nr_pkgs = topology_max_packages();
@@ -152,6 +188,8 @@ static void monr_dealloc(struct monr *monr)
/* out of monr_hrchy, so no need for rcu or lock protection. */
if (!monr->pmonrs[p])
continue;
+ if (WARN_ON_ONCE(monr->pmonrs[p]->state != PMONR_OFF))
+ continue;
kfree(monr->pmonrs[p]);
}
kfree(monr);
@@ -210,9 +248,20 @@ static enum cmt_user_flags pmonr_uflags(struct pmonr *pmonr)
static int __pmonr_apply_uflags(struct pmonr *pmonr,
enum cmt_user_flags pmonr_uflags)
{
+ if (!(pmonr_uflags & CMT_UF_HAS_USER)) {
+ if (pmonr->state != PMONR_OFF) {
+ pmonr_to_unused(pmonr);
+ pmonr_unused_to_off(pmonr);
+ }
+ return 0;
+ }
+
if (monr_is_root(pmonr->monr) && (~pmonr_uflags & root_monr_uflags))
return -EINVAL;
+ if (pmonr->state == PMONR_OFF)
+ pmonr_to_unused(pmonr);
+
return 0;
}
@@ -750,15 +799,19 @@ static struct pkg_data *alloc_pkg_data(int cpu)
static void __terminate_pkg_data(struct pkg_data *pkgd)
{
struct monr *pos = NULL;
+ struct pmonr *pmonr;
unsigned long flags;
lockdep_assert_held(&cmt_mutex);
raw_spin_lock_irqsave(&pkgd->lock, flags);
/* post-order traversal guarantees pos to be leaf of monr hierarchy. */
- while ((pos = monr_next_descendant_post(pos, monr_hrchy_root)))
+ while ((pos = monr_next_descendant_post(pos, monr_hrchy_root))) {
+ pmonr = pkgd_pmonr(pkgd, pos);
+ pmonr_to_unused(pmonr);
+ pmonr_unused_to_off(pmonr);
RCU_INIT_POINTER(pos->pmonrs[pkgd->pkgid], NULL);
-
+ }
raw_spin_unlock_irqrestore(&pkgd->lock, flags);
synchronize_rcu();
diff --git a/arch/x86/events/intel/cmt.h b/arch/x86/events/intel/cmt.h
index 6211392..05325c8 100644
--- a/arch/x86/events/intel/cmt.h
+++ b/arch/x86/events/intel/cmt.h
@@ -52,13 +52,66 @@
*/
/**
+ * enum pmonr_state - pmonrs can be in one of the following states:
+ * - Off: pmonr is unavailable for monitoring. It's the starting state.
+ * - Unused: pmonr is available for monitoring but no thread associated to
+ * this pmonr's monr has been scheduled in this pmonr's package.
+ *
+ * The valid state transitions are:
+ *
+ * From: | To: Cause:
+ *=============================================================================
+ * Off | Unused monitoring is enabled for a pmonr.
+ *-----------------------------------------------------------------------------
+ * Unused | Off monitoring is disabled for a pmonr.
+ *-----------------------------------------------------------------------------
+ */
+enum pmonr_state {
+ PMONR_OFF = 0,
+ PMONR_UNUSED,
+};
+
+/**
+ * union pmonr_rmids - Machine-size summary of a pmonr's rmid state.
+ * @value: One word accesor.
+ * @sched_rmid: The rmid to write in the PQR MSR in sched in/out.
+ * @read_rmid: The rmid to read occupancy from.
+ *
+ * An atomically readable/writable summary of the rmids used by a pmonr.
+ * Its values can also used to atomically read the state (preventing
+ * unnecessary locks of pkgd->lock) in the following way:
+ * pmonr state
+ * | Off Unused
+ * ============================================================================
+ * sched_rmid | INVALID_RMID INVALID_RMID
+ * ----------------------------------------------------------------------------
+ * read_rmid | INVALID_RMID 0
+ *
+ */
+union pmonr_rmids {
+ long value;
+ struct {
+ u32 sched_rmid;
+ u32 read_rmid;
+ };
+};
+
+/**
* struct pmonr - per-package componet of MONitored Resources (monr).
* @monr: The monr that contains this pmonr.
* @pkgd: The package data associated with this pmonr.
+ * @atomic_rmids: Atomic accesor for this pmonr_rmids.
+ * @state: The state for this pmonr, note that this can also
+ * be inferred from the combination of sched_rmid and
+ * read_rmid in @atomic_rmids.
*/
struct pmonr {
struct monr *monr;
struct pkg_data *pkgd;
+
+ /* all writers are sync'ed by package's lock. */
+ atomic64_t atomic_rmids;
+ enum pmonr_state state;
};
/*
--
2.8.0.rc3.226.g39d4020
Powered by blists - more mailing lists