lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sat, 29 Oct 2016 17:38:14 -0700
From:   David Carrillo-Cisneros <davidcc@...gle.com>
To:     linux-kernel@...r.kernel.org
Cc:     "x86@...nel.org" <x86@...nel.org>, Ingo Molnar <mingo@...hat.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Andi Kleen <ak@...ux.intel.com>,
        Kan Liang <kan.liang@...el.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Vegard Nossum <vegard.nossum@...il.com>,
        Marcelo Tosatti <mtosatti@...hat.com>,
        Nilay Vaish <nilayvaish@...il.com>,
        Borislav Petkov <bp@...e.de>,
        Vikas Shivappa <vikas.shivappa@...ux.intel.com>,
        Ravi V Shankar <ravi.v.shankar@...el.com>,
        Fenghua Yu <fenghua.yu@...el.com>,
        Paul Turner <pjt@...gle.com>,
        Stephane Eranian <eranian@...gle.com>,
        David Carrillo-Cisneros <davidcc@...gle.com>
Subject: [PATCH v3 17/46] perf/x86/intel/cmt: add uflag CMT_UF_NOLAZY_RMID

This uflag allows user to specify that a rmid must be allocated at monr's
initialization or fail otherwise.

For this to work we split __pmonr_apply_uflags into reserve and apply
modes. The reserve mode will try to reserve a free rmid, and if successful,
the apply mode can proceed using the rmid previously reserved.

Signed-off-by: David Carrillo-Cisneros <davidcc@...gle.com>
---
 arch/x86/events/intel/cmt.c | 116 +++++++++++++++++++++++++++++++++++++++-----
 arch/x86/events/intel/cmt.h |   5 +-
 2 files changed, 109 insertions(+), 12 deletions(-)

diff --git a/arch/x86/events/intel/cmt.c b/arch/x86/events/intel/cmt.c
index 9421a3e..3883cb4 100644
--- a/arch/x86/events/intel/cmt.c
+++ b/arch/x86/events/intel/cmt.c
@@ -33,7 +33,8 @@ static unsigned int __min_max_rmid;	/* minimum max_rmid across all pkgs. */
 static struct monr *monr_hrchy_root;
 
 /* Flags for root monr and all its pmonrs while being monitored. */
-static enum cmt_user_flags root_monr_uflags = CMT_UF_HAS_USER;
+static enum cmt_user_flags root_monr_uflags =
+		CMT_UF_HAS_USER | CMT_UF_NOLAZY_RMID;
 
 /* Auxiliar flags */
 static enum cmt_user_flags *pkg_uflags_zeroes;
@@ -414,6 +415,7 @@ static void monr_dealloc(struct monr *monr)
 	u16 p, nr_pkgs = topology_max_packages();
 
 	if (WARN_ON_ONCE(monr->nr_has_user) ||
+	    WARN_ON_ONCE(monr->nr_nolazy_rmid) ||
 	    WARN_ON_ONCE(monr->mon_events))
 		return;
 
@@ -478,11 +480,28 @@ static enum cmt_user_flags pmonr_uflags(struct pmonr *pmonr)
 	return monr->uflags | monr->pkg_uflags[pmonr->pkgd->pkgid];
 }
 
+/*
+ * Callable in two modes:
+ *   1) @reserve == true: will check if uflags are applicable and store in
+ *   @res_rmid the "reserved" rmid.
+ *   2) @reserve == false: will apply pmonr_uflags using the rmid stored in
+ *   @res_rmid rmid (if any). Cannot fail.
+ */
 static int __pmonr_apply_uflags(struct pmonr *pmonr,
-		enum cmt_user_flags pmonr_uflags)
+		enum cmt_user_flags pmonr_uflags, bool reserve, u32 *res_rmid)
 {
+	struct pkg_data *pkgd = pmonr->pkgd;
+	u32 free_rmid;
+
+	if (WARN_ON_ONCE(!res_rmid))
+		return -EINVAL;
+	if (WARN_ON_ONCE(reserve && *res_rmid != INVALID_RMID))
+		return -EINVAL;
+
 	if (!(pmonr_uflags & CMT_UF_HAS_USER)) {
 		if (pmonr->state != PMONR_OFF) {
+			if (reserve)
+				return 0;
 			pmonr_to_unused(pmonr);
 			pmonr_unused_to_off(pmonr);
 		}
@@ -492,8 +511,40 @@ static int __pmonr_apply_uflags(struct pmonr *pmonr,
 	if (monr_is_root(pmonr->monr) && (~pmonr_uflags & root_monr_uflags))
 		return -EINVAL;
 
-	if (pmonr->state == PMONR_OFF)
-		pmonr_to_unused(pmonr);
+	if (pmonr->state == PMONR_OFF) {
+		if (!reserve)
+			pmonr_to_unused(pmonr);
+	}
+	if (pmonr->state == PMONR_ACTIVE)
+		return 0;
+	if (!(pmonr_uflags & CMT_UF_NOLAZY_RMID))
+		return 0;
+	if (pmonr->state == PMONR_DEP_DIRTY) {
+		if (!reserve)
+			pmonr_dep_dirty_to_active(pmonr);
+		return 0;
+	}
+
+	/*
+	 * At this point pmonr is in either Unused or Dep_Idle state and
+	 * needs a rmid to transition to Active.
+	 */
+	if (reserve) {
+		free_rmid = find_first_bit(pkgd->free_rmids, CMT_MAX_NR_RMIDS);
+		if (free_rmid == CMT_MAX_NR_RMIDS)
+			return -ENOSPC;
+		*res_rmid = free_rmid;
+		__clear_bit(*res_rmid, pkgd->free_rmids);
+		return 0;
+	}
+
+	/* both cases use the reserved rmid. */
+	if (pmonr->state == PMONR_UNUSED) {
+		pmonr_unused_to_active(pmonr, *res_rmid);
+	} else {
+		WARN_ON_ONCE(pmonr->state != PMONR_DEP_IDLE);
+		pmonr_dep_idle_to_active(pmonr, *res_rmid);
+	}
 
 	return 0;
 }
@@ -514,7 +565,10 @@ static bool monr_has_user(struct monr *monr)
 	       pkg_uflags_has_user(monr->pkg_uflags);
 }
 
-static int __monr_apply_uflags(struct monr *monr, enum cmt_user_flags *puflags)
+static int __monr_apply_uflags(struct monr *monr,
+			       enum cmt_user_flags *puflags,
+			       bool reserve,
+			       u32 *res_rmids)
 {
 	enum cmt_user_flags pmonr_uflags;
 	struct pkg_data *pkgd = NULL;
@@ -526,7 +580,10 @@ static int __monr_apply_uflags(struct monr *monr, enum cmt_user_flags *puflags)
 		pmonr_uflags = monr->uflags |
 				(puflags ? puflags[p] : monr->pkg_uflags[p]);
 		pmonr = pkgd_pmonr(pkgd, monr);
-		err = __pmonr_apply_uflags(pmonr, pmonr_uflags);
+		err = __pmonr_apply_uflags(pmonr, pmonr_uflags,
+					   reserve, &res_rmids[p]);
+		/* Should not fail if reserve is set. */
+		WARN_ON_ONCE(!reserve && err);
 		if (err)
 			return err;
 	}
@@ -537,17 +594,26 @@ static int __monr_apply_uflags(struct monr *monr, enum cmt_user_flags *puflags)
 /* Apply puflags for all packages or rollback and fail. */
 static int monr_apply_uflags(struct monr *monr, enum cmt_user_flags *puflags)
 {
+	struct pkg_data *pkgd = NULL;
+	u32 res_rmids[CMT_MAX_NR_PKGS];
 	int p, err;
 	unsigned long flags;
 
 	monr_hrchy_assert_held_mutexes();
 	monr_hrchy_acquire_locks(&flags);
 
-	err = __monr_apply_uflags(monr, puflags);
+	for (p = 0; p < CMT_MAX_NR_PKGS; p++)
+		res_rmids[p] = INVALID_RMID;
+
+	/* first call of __monr_apply_uflags will "reserve" rmid only.*/
+	err = __monr_apply_uflags(monr, puflags, true, res_rmids);
 	if (err)
-		goto exit;
+		goto error;
+
+	/* second call actually applies the flags. */
+	err = __monr_apply_uflags(monr, puflags, false, res_rmids);
+	WARN_ON_ONCE(err);
 
-	/* Proceed to exit if no uflags to update to pkg_uflags. */
 	if (!puflags)
 		goto exit;
 
@@ -563,6 +629,14 @@ static int monr_apply_uflags(struct monr *monr, enum cmt_user_flags *puflags)
 	monr_hrchy_release_locks(&flags);
 
 	return err;
+
+error:
+	while ((pkgd = cmt_pkgs_data_next_rcu(pkgd))) {
+		p = pkgd->pkgid;
+		if (res_rmids[p] != INVALID_RMID)
+			__set_bit(res_rmids[p], pkgd->free_rmids);
+	}
+	goto exit;
 }
 
 static inline struct monr *monr_from_event(struct perf_event *event)
@@ -583,8 +657,11 @@ static bool monr_account_uflags(struct monr *monr,
 
 	if (uflags & CMT_UF_HAS_USER)
 		monr->nr_has_user += account ? 1 : -1;
+	if (uflags & CMT_UF_NOLAZY_RMID)
+		monr->nr_nolazy_rmid += account ? 1 : -1;
 
-	monr->uflags =  (monr->nr_has_user ? CMT_UF_HAS_USER : 0);
+	monr->uflags =  (monr->nr_has_user ? CMT_UF_HAS_USER : 0) |
+			(monr->nr_nolazy_rmid ? CMT_UF_NOLAZY_RMID : 0);
 
 	return old_flags != monr->uflags;
 }
@@ -1165,6 +1242,7 @@ static int init_pkg_data(int cpu)
 	struct pmonr *pmonr;
 	unsigned long flags;
 	int err = 0;
+	u32 res_rmid;
 	u16 pkgid = topology_logical_package_id(cpu);
 
 	lockdep_assert_held(&cmt_mutex);
@@ -1190,9 +1268,25 @@ static int init_pkg_data(int cpu)
 		 */
 		RCU_INIT_POINTER(pos->pmonrs[pkgid], pmonr);
 
+		res_rmid = INVALID_RMID;
 		raw_spin_lock_irqsave(&pkgd->lock, flags);
-		err = __pmonr_apply_uflags(pmonr, pmonr_uflags(pmonr));
+		err = __pmonr_apply_uflags(pmonr, pmonr_uflags(pmonr),
+					   true, &res_rmid);
+		if (!err)
+			__pmonr_apply_uflags(pmonr, pmonr_uflags(pmonr),
+					     false, &res_rmid);
 		raw_spin_unlock_irqrestore(&pkgd->lock, flags);
+
+		/*
+		 * Do not fail the whole package initialization because a pmonr
+		 * failed to apply its uflags, just report the error.
+		 */
+		if (err) {
+			pr_err("Not enough free RMIDs in package %d for Intel CMT.\n",
+				pkgid);
+			pos->pkg_uflags[pkgid] |= CMT_UF_ERROR;
+			err = 0;
+		}
 	}
 
 	if (err) {
diff --git a/arch/x86/events/intel/cmt.h b/arch/x86/events/intel/cmt.h
index bf90c26..754a9c8 100644
--- a/arch/x86/events/intel/cmt.h
+++ b/arch/x86/events/intel/cmt.h
@@ -245,7 +245,8 @@ struct pkg_data {
 enum cmt_user_flags {
 	/* if no has_user other flags are meaningless. */
 	CMT_UF_HAS_USER		= BIT(0), /* has cgroup or event users */
-	CMT_UF_MAX		= BIT(1) - 1,
+	CMT_UF_NOLAZY_RMID	= BIT(1), /* try to obtain rmid on creation */
+	CMT_UF_MAX		= BIT(2) - 1,
 	CMT_UF_ERROR		= CMT_UF_MAX + 1,
 };
 
@@ -258,6 +259,7 @@ enum cmt_user_flags {
  * @children:		List of children in monr hierarchy.
  * @parent_entry:	Entry in parent's children list.
  * @nr_has_user:	nr of CMT_UF_HAS_USER set in events in mon_events.
+ * @nr_nolazy_user:	nr of CMT_UF_NOLAZY_RMID set in events in mon_events.
  * @uflags:		monr level cmt_user_flags, or'ed with pkg_uflags.
  * @pkg_uflags:		package level cmt_user_flags, each entry is used as
  *			pmonr uflags if that package is online.
@@ -278,6 +280,7 @@ struct monr {
 	struct list_head		parent_entry;
 
 	int				nr_has_user;
+	int				nr_nolazy_rmid;
 	enum cmt_user_flags		uflags;
 	enum cmt_user_flags		pkg_uflags[];
 };
-- 
2.8.0.rc3.226.g39d4020

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ