[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1477787923-61185-12-git-send-email-davidcc@google.com>
Date: Sat, 29 Oct 2016 17:38:08 -0700
From: David Carrillo-Cisneros <davidcc@...gle.com>
To: linux-kernel@...r.kernel.org
Cc: "x86@...nel.org" <x86@...nel.org>, Ingo Molnar <mingo@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>,
Andi Kleen <ak@...ux.intel.com>,
Kan Liang <kan.liang@...el.com>,
Peter Zijlstra <peterz@...radead.org>,
Vegard Nossum <vegard.nossum@...il.com>,
Marcelo Tosatti <mtosatti@...hat.com>,
Nilay Vaish <nilayvaish@...il.com>,
Borislav Petkov <bp@...e.de>,
Vikas Shivappa <vikas.shivappa@...ux.intel.com>,
Ravi V Shankar <ravi.v.shankar@...el.com>,
Fenghua Yu <fenghua.yu@...el.com>,
Paul Turner <pjt@...gle.com>,
Stephane Eranian <eranian@...gle.com>,
David Carrillo-Cisneros <davidcc@...gle.com>
Subject: [PATCH v3 11/46] perf/x86/intel/cmt: add cmt_user_flags (uflags) to monr
uflags allow users to signal special behavior for a pmonr. This patch
series introduces two uflags that provide new behavior and are relevant
to users:
1) CMT_UF_NOLAZY_RMID: signal that rmids must be reserved immediately.
2) CMT_UF_NOSTEAL_RMID: rmids cannot be stolen.
A monr mantains one field of cmt_user_flags at "monr level" and a set of
"package level" ones, one per possible hardware package.
The effective uflags for a pmonr is the OR of its monr level uflags and
the package level one of pmonr's pkgd.
A user passes uflags for all pmonrs in an event's monr by setting them
in the perf_event_attr::config1 field. In future patches in this series,
users could specify per package uflags through attributes in the
perf cgroup fs.
This patch only introduces infrastructure to mantain uflags and the first,
uflag: CMT_UF_HAS_USER, that marks monrs and pmonrs as in use by
a cgroup or event. This flag is special because is always taken as set
for a perf event, regardless of the value in event->attr.config1.
Signed-off-by: David Carrillo-Cisneros <davidcc@...gle.com>
---
arch/x86/events/intel/cmt.c | 166 ++++++++++++++++++++++++++++++++++++++++++--
arch/x86/events/intel/cmt.h | 18 +++++
2 files changed, 180 insertions(+), 4 deletions(-)
diff --git a/arch/x86/events/intel/cmt.c b/arch/x86/events/intel/cmt.c
index 06e6325..07560e5 100644
--- a/arch/x86/events/intel/cmt.c
+++ b/arch/x86/events/intel/cmt.c
@@ -29,6 +29,13 @@ static unsigned int __min_max_rmid; /* minimum max_rmid across all pkgs. */
/* Root for system-wide hierarchy of MONitored Resources (monr). */
static struct monr *monr_hrchy_root;
+/* Flags for root monr and all its pmonrs while being monitored. */
+static enum cmt_user_flags root_monr_uflags = CMT_UF_HAS_USER;
+
+/* Auxiliar flags */
+static enum cmt_user_flags *pkg_uflags_zeroes;
+static size_t pkg_uflags_size;
+
/* Array of packages (array of pkgds). It's protected by RCU or cmt_mutex. */
static struct pkg_data **cmt_pkgs_data;
@@ -128,10 +135,19 @@ static struct pmonr *pmonr_alloc(struct pkg_data *pkgd)
return pmonr;
}
+static inline bool monr_is_root(struct monr *monr)
+{
+ return monr_hrchy_root == monr;
+}
+
static void monr_dealloc(struct monr *monr)
{
u16 p, nr_pkgs = topology_max_packages();
+ if (WARN_ON_ONCE(monr->nr_has_user) ||
+ WARN_ON_ONCE(monr->mon_events))
+ return;
+
for (p = 0; p < nr_pkgs; p++) {
/* out of monr_hrchy, so no need for rcu or lock protection. */
if (!monr->pmonrs[p])
@@ -150,7 +166,8 @@ static struct monr *monr_alloc(void)
lockdep_assert_held(&cmt_mutex);
- monr = kzalloc(sizeof(*monr), GFP_KERNEL);
+ /* Extra space for pkg_uflags. */
+ monr = kzalloc(sizeof(*monr) + pkg_uflags_size, GFP_KERNEL);
if (!monr)
return ERR_PTR(-ENOMEM);
@@ -183,14 +200,118 @@ static struct monr *monr_alloc(void)
return monr;
}
+static enum cmt_user_flags pmonr_uflags(struct pmonr *pmonr)
+{
+ struct monr *monr = pmonr->monr;
+
+ return monr->uflags | monr->pkg_uflags[pmonr->pkgd->pkgid];
+}
+
+static int __pmonr_apply_uflags(struct pmonr *pmonr,
+ enum cmt_user_flags pmonr_uflags)
+{
+ if (monr_is_root(pmonr->monr) && (~pmonr_uflags & root_monr_uflags))
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool pkg_uflags_has_user(enum cmt_user_flags *uflags)
+{
+ int p, nr_pkgs = topology_max_packages();
+
+ for (p = 0; p < nr_pkgs; p++)
+ if (uflags[p] & CMT_UF_HAS_USER)
+ return true;
+ return false;
+}
+
+static bool monr_has_user(struct monr *monr)
+{
+ return monr->uflags & CMT_UF_HAS_USER ||
+ pkg_uflags_has_user(monr->pkg_uflags);
+}
+
+static int __monr_apply_uflags(struct monr *monr, enum cmt_user_flags *puflags)
+{
+ enum cmt_user_flags pmonr_uflags;
+ struct pkg_data *pkgd = NULL;
+ struct pmonr *pmonr;
+ int p, err;
+
+ while ((pkgd = cmt_pkgs_data_next_rcu(pkgd))) {
+ p = pkgd->pkgid;
+ pmonr_uflags = monr->uflags |
+ (puflags ? puflags[p] : monr->pkg_uflags[p]);
+ pmonr = pkgd_pmonr(pkgd, monr);
+ err = __pmonr_apply_uflags(pmonr, pmonr_uflags);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Apply puflags for all packages or rollback and fail. */
+static int monr_apply_uflags(struct monr *monr, enum cmt_user_flags *puflags)
+{
+ int p, err;
+ unsigned long flags;
+
+ monr_hrchy_assert_held_mutexes();
+ monr_hrchy_acquire_locks(&flags);
+
+ err = __monr_apply_uflags(monr, puflags);
+ if (err)
+ goto exit;
+
+ /* Proceed to exit if no uflags to update to pkg_uflags. */
+ if (!puflags)
+ goto exit;
+
+ /*
+ * Now that we've succeded to apply puflags to online packages, we
+ * store new puflags in all packages, even those not online. It's
+ * up to CPU hot plug to try apply the pkg_flag in oncoming package.
+ */
+ for (p = 0; p < topology_max_packages(); p++)
+ monr->pkg_uflags[p] = puflags[p];
+
+exit:
+ monr_hrchy_release_locks(&flags);
+
+ return err;
+}
+
static inline struct monr *monr_from_event(struct perf_event *event)
{
return (struct monr *) READ_ONCE(event->hw.cmt_monr);
}
+static enum cmt_user_flags uflags_from_event(struct perf_event *event)
+{
+ return event->attr.config1 | CMT_UF_HAS_USER;
+}
+
+/* return true if monr uflags will change, false otherwise. */
+static bool monr_account_uflags(struct monr *monr,
+ enum cmt_user_flags uflags, bool account)
+{
+ enum cmt_user_flags old_flags = monr->uflags;
+
+ if (uflags & CMT_UF_HAS_USER)
+ monr->nr_has_user += account ? 1 : -1;
+
+ monr->uflags = (monr->nr_has_user ? CMT_UF_HAS_USER : 0);
+
+ return old_flags != monr->uflags;
+}
+
static struct monr *monr_remove_event(struct perf_event *event)
{
struct monr *monr = monr_from_event(event);
+ enum cmt_user_flags uflags = uflags_from_event(event);
+ int err;
lockdep_assert_held(&cmt_mutex);
monr_hrchy_assert_held_mutexes();
@@ -207,11 +328,23 @@ static struct monr *monr_remove_event(struct perf_event *event)
WRITE_ONCE(event->hw.cmt_monr, NULL);
+ if (monr_account_uflags(monr, uflags, false)) {
+ /*
+ * Undo flags in error, cannot fail since flags require rmids
+ * and less flags mean less rmids required.
+ */
+ err = monr_apply_uflags(monr, NULL);
+ WARN_ON_ONCE(err);
+ }
+
return monr;
}
static int monr_append_event(struct monr *monr, struct perf_event *event)
{
+ enum cmt_user_flags uflags = uflags_from_event(event);
+ int err;
+
lockdep_assert_held(&cmt_mutex);
monr_hrchy_assert_held_mutexes();
@@ -225,7 +358,14 @@ static int monr_append_event(struct monr *monr, struct perf_event *event)
WRITE_ONCE(event->hw.cmt_monr, monr);
- return 0;
+ if (!monr_account_uflags(monr, uflags, true))
+ return 0;
+
+ err = monr_apply_uflags(monr, NULL);
+ if (err)
+ monr_remove_event(event);
+
+ return err;
}
static void monr_hrchy_insert_leaf(struct monr *monr, struct monr *parent)
@@ -465,7 +605,8 @@ static void intel_cmt_event_destroy(struct perf_event *event)
/* monr is dettached from event. */
monr = monr_remove_event(event);
- monr_destroy(monr);
+ if (!monr_has_user(monr))
+ monr_destroy(monr);
monr_hrchy_release_mutexes();
mutex_unlock(&cmt_mutex);
@@ -625,6 +766,7 @@ static int init_pkg_data(int cpu)
struct monr *pos = NULL;
struct pkg_data *pkgd;
struct pmonr *pmonr;
+ unsigned long flags;
int err = 0;
u16 pkgid = topology_logical_package_id(cpu);
@@ -650,6 +792,10 @@ static int init_pkg_data(int cpu)
* not set in cmt_pkgs_data yet.
*/
RCU_INIT_POINTER(pos->pmonrs[pkgid], pmonr);
+
+ raw_spin_lock_irqsave(&pkgd->lock, flags);
+ err = __pmonr_apply_uflags(pmonr, pmonr_uflags(pmonr));
+ raw_spin_unlock_irqrestore(&pkgd->lock, flags);
}
if (err) {
@@ -739,6 +885,9 @@ static void cmt_dealloc(void)
kfree(cmt_pkgs_data);
cmt_pkgs_data = NULL;
+
+ kfree(pkg_uflags_zeroes);
+ pkg_uflags_zeroes = NULL;
}
static void cmt_stop(void)
@@ -749,6 +898,11 @@ static void cmt_stop(void)
static int __init cmt_alloc(void)
{
+ pkg_uflags_size = sizeof(*pkg_uflags_zeroes) * topology_max_packages();
+ pkg_uflags_zeroes = kzalloc(pkg_uflags_size, GFP_KERNEL);
+ if (!pkg_uflags_zeroes)
+ return -ENOMEM;
+
cmt_l3_scale = boot_cpu_data.x86_cache_occ_scale;
if (cmt_l3_scale == 0)
cmt_l3_scale = 1;
@@ -771,7 +925,11 @@ static int __init cmt_alloc(void)
static int __init cmt_start(void)
{
char *str, scale[20];
- int err;
+ int err, p;
+
+ monr_account_uflags(monr_hrchy_root, root_monr_uflags, true);
+ for (p = 0; p < topology_max_packages(); p++)
+ monr_hrchy_root->pkg_uflags[p] = root_monr_uflags;
/* will be modified by init_pkg_data() in intel_cmt_prep_up(). */
__min_max_rmid = UINT_MAX;
diff --git a/arch/x86/events/intel/cmt.h b/arch/x86/events/intel/cmt.h
index 7f3a7b8..66b078a 100644
--- a/arch/x86/events/intel/cmt.h
+++ b/arch/x86/events/intel/cmt.h
@@ -76,6 +76,16 @@ struct pkg_data {
};
/**
+ * enum cmt_user_flags - user set flags for monr and pmonrs.
+ */
+enum cmt_user_flags {
+ /* if no has_user other flags are meaningless. */
+ CMT_UF_HAS_USER = BIT(0), /* has cgroup or event users */
+ CMT_UF_MAX = BIT(1) - 1,
+ CMT_UF_ERROR = CMT_UF_MAX + 1,
+};
+
+/**
* struct monr - MONitored Resource.
* @mon_events: The head of event's group that use this monr, if any.
* @entry: List entry into cmt_event_monrs.
@@ -83,6 +93,10 @@ struct pkg_data {
* @parent: Parent in monr hierarchy.
* @children: List of children in monr hierarchy.
* @parent_entry: Entry in parent's children list.
+ * @nr_has_user: nr of CMT_UF_HAS_USER set in events in mon_events.
+ * @uflags: monr level cmt_user_flags, or'ed with pkg_uflags.
+ * @pkg_uflags: package level cmt_user_flags, each entry is used as
+ * pmonr uflags if that package is online.
*
* An monr is assigned to every CMT event and/or monitored cgroups when
* monitoring is activated and that instance's address do not change during
@@ -98,4 +112,8 @@ struct monr {
struct monr *parent;
struct list_head children;
struct list_head parent_entry;
+
+ int nr_has_user;
+ enum cmt_user_flags uflags;
+ enum cmt_user_flags pkg_uflags[];
};
--
2.8.0.rc3.226.g39d4020
Powered by blists - more mailing lists