lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sat, 29 Oct 2016 17:38:27 -0700
From:   David Carrillo-Cisneros <davidcc@...gle.com>
To:     linux-kernel@...r.kernel.org
Cc:     "x86@...nel.org" <x86@...nel.org>, Ingo Molnar <mingo@...hat.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Andi Kleen <ak@...ux.intel.com>,
        Kan Liang <kan.liang@...el.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Vegard Nossum <vegard.nossum@...il.com>,
        Marcelo Tosatti <mtosatti@...hat.com>,
        Nilay Vaish <nilayvaish@...il.com>,
        Borislav Petkov <bp@...e.de>,
        Vikas Shivappa <vikas.shivappa@...ux.intel.com>,
        Ravi V Shankar <ravi.v.shankar@...el.com>,
        Fenghua Yu <fenghua.yu@...el.com>,
        Paul Turner <pjt@...gle.com>,
        Stephane Eranian <eranian@...gle.com>,
        David Carrillo-Cisneros <davidcc@...gle.com>
Subject: [PATCH v3 30/46] perf/x86/intel/cmt: add asynchronous read for task events

Reading CMT/MBM task events in intel_cmt poses a challenge since it
requires to read from multiple sockets (usually accomplished with
an IPI) while called with interruptions disabled.

The current usptream driver avoids the problematic read with
interruptions disabled by not making a dummy perf_event_read() of
llc_occupancy for task events. The actual read is performed in
perf_event_count() whenever perf_event_count() is called with
interruptions enabled. This works but changes the expected behavior of
perf_event_read() and perf_event_count().

This patch follows a different approach by performing asynchronous reads
of all remote packages and waiting until either reads complete or a
deadline expires. It will return an error if the IPI does not complete
on time.

This asynchronous approach has advantages:
  1) It does not alter perf_event_count().
  2) perf_event_read() does read for all types of events.
  3) reads in all packages are executed in parallel. Parallel readings are
  specially advantageous because reading CMT/MBM events is slow
  (it requires sequential read and write to two msrs). I measured a
  llc_occupancy read in my HSW system to take ~1250 cycles.
  Parallel reads of all caches will become a bigger advantage with
  oncoming bigger microprocessors (up to 8 packages) and when CMT support
  for L2 is rolled out, since task events will require a read to all
  L2 caches units.

Also, introduces struct cmt_csd and a per-pkg array of cmt_csd's (one
per rmid). This array is used to control the potentially concurrent
reads to each rmid's event.

Signed-off-by: David Carrillo-Cisneros <davidcc@...gle.com>
---
 arch/x86/events/intel/cmt.c | 206 +++++++++++++++++++++++++++++++++++++++++++-
 arch/x86/events/intel/cmt.h |  14 +++
 2 files changed, 217 insertions(+), 3 deletions(-)

diff --git a/arch/x86/events/intel/cmt.c b/arch/x86/events/intel/cmt.c
index f5ab48e..f9195ec 100644
--- a/arch/x86/events/intel/cmt.c
+++ b/arch/x86/events/intel/cmt.c
@@ -8,6 +8,12 @@
 #include "cmt.h"
 #include "../perf_event.h"
 
+#define RMID_VAL_UNAVAIL	BIT_ULL(62)
+#define RMID_VAL_ERROR		BIT_ULL(63)
+
+#define MSR_IA32_QM_CTR		0x0c8e
+#define MSR_IA32_QM_EVTSEL	0x0c8d
+
 #define QOS_L3_OCCUP_EVENT_ID	BIT_ULL(0)
 #define QOS_EVENT_MASK		QOS_L3_OCCUP_EVENT_ID
 
@@ -1229,6 +1235,41 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
 	return false;
 }
 
+/* Must be called in a cpu in rmid's package. */
+static int cmt_rmid_read(u32 rmid, u64 *val)
+{
+	wrmsr(MSR_IA32_QM_EVTSEL, QOS_L3_OCCUP_EVENT_ID, rmid);
+	rdmsrl(MSR_IA32_QM_CTR, *val);
+
+	/* Ignore this reading on error states and do not update the value. */
+	if (WARN_ON_ONCE(*val & RMID_VAL_ERROR))
+		return -EINVAL;
+	if (WARN_ON_ONCE(*val & RMID_VAL_UNAVAIL))
+		return -ENODATA;
+
+	return 0;
+}
+
+/* time to wait before time out rmid read IPI */
+#define CMT_IPI_WAIT_TIME	100	/* ms */
+
+static void smp_call_rmid_read(void *data)
+{
+	struct cmt_csd *ccsd = (struct cmt_csd *)data;
+
+	ccsd->ret = cmt_rmid_read(ccsd->rmid, &ccsd->value);
+
+	/*
+	 * smp_call_function_single_async must have cleared csd.flags
+	 * before invoking func.
+	 */
+	WARN_ON_ONCE(ccsd->csd.flags);
+
+	/* ensure values are stored before clearing on_read. */
+	barrier();
+	atomic_set(&ccsd->on_read, 0);
+}
+
 static struct pmu intel_cmt_pmu;
 
 /* Try to find a monr with same target, otherwise create new one. */
@@ -1318,9 +1359,145 @@ static struct monr *monr_next_descendant_post(struct monr *pos,
 	return pos->parent;
 }
 
+/* Issue reads to CPUs in remote packages. */
+static int issue_read_remote_pkgs(struct monr *monr,
+				  struct cmt_csd **issued_ccsds,
+				  u32 *local_rmid)
+{
+	struct cmt_csd *ccsd;
+	struct pmonr *pmonr;
+	struct pkg_data *pkgd = NULL;
+	union pmonr_rmids rmids;
+	int err = 0, read_cpu;
+	u16 p, local_pkgid = topology_logical_package_id(smp_processor_id());
+
+	/* Issue remote packages. */
+	rcu_read_lock();
+	while ((pkgd = cmt_pkgs_data_next_rcu(pkgd))) {
+
+		pmonr = pkgd_pmonr(pkgd, monr);
+		/* Retrieve rmid and check state without acquiring pkg locks. */
+		rmids.value = atomic64_read(&pmonr->atomic_rmids);
+		/* Skip Off and Unused states. */
+		if (rmids.sched_rmid == INVALID_RMID)
+			continue;
+		/*
+		 * pmonrs in Dep_{Idle,Dirty} states have run without rmid
+		 * their own rmid and would report wrong occupancy.
+		 */
+		if (rmids.read_rmid == INVALID_RMID) {
+			err = -EBUSY;
+			goto exit;
+		}
+		p = pkgd->pkgid;
+		if (p == local_pkgid) {
+			*local_rmid = rmids.read_rmid;
+			continue;
+		}
+		ccsd = &pkgd->ccsds[rmids.read_rmid];
+		/*
+		 * Reads of remote packages are only required for task events.
+		 * pmu->read in task events is serialized by task_ctx->lock in
+		 * perf generic code. Events with same task target share rmid
+		 * and task_ctx->lock, so there is no need to support
+		 * concurrent remote reads to same RMID.
+		 *
+		 * ccsd->on_read could be not zero if a read expired before,
+		 * in that rare case, fail now and hope next time the ongoing
+		 * IPI will have completed.
+		 */
+		if (atomic_inc_return(&ccsd->on_read) > 1) {
+			err = -EBUSY;
+			goto exit;
+		}
+		issued_ccsds[p] = ccsd;
+		read_cpu = cpumask_any(topology_core_cpumask(pkgd->work_cpu));
+		err = smp_call_function_single_async(read_cpu, &ccsd->csd);
+		if (WARN_ON_ONCE(err))
+			goto exit;
+	}
+exit:
+	rcu_read_unlock();
+
+	return err;
+}
+
+/*
+ * Fail if IPI hasn't finish by @deadline if @count != NULL.
+ * @count == NULL signals no update and therefore no reason to wait.
+ */
+static int read_issued_pkgs(struct cmt_csd **issued_ccsds,
+			    u64 deadline, u64 *count)
+{
+	struct cmt_csd *ccsd;
+	int p;
+
+	for (p = 0; p < CMT_MAX_NR_PKGS; p++) {
+		ccsd = issued_ccsds[p];
+		if (!ccsd)
+			continue;
+
+		/* A smp_cond_acquire on ccsd->on_read and time. */
+		while (atomic_read(&ccsd->on_read) &&
+				 time_before64(get_jiffies_64(), deadline))
+			cpu_relax();
+
+		/*
+		 * guarantee that cssd->ret and ccsd->value are read after
+		 * read or deadline.
+		 */
+		smp_rmb();
+
+		/* last IPI took unusually long. */
+		if (WARN_ON_ONCE(atomic_read(&ccsd->on_read)))
+			return -EBUSY;
+		/* ccsd->on_read is always cleared after csd.flags. */
+		if (WARN_ON_ONCE(ccsd->csd.flags))
+			return -EBUSY;
+		if (ccsd->ret)
+			return ccsd->ret;
+
+		*count += ccsd->value;
+	}
+
+	return 0;
+}
+
+static int read_all_pkgs(struct monr *monr, int wait_time_ms, u64 *count)
+{
+	struct cmt_csd *issued_ccsds[CMT_MAX_NR_PKGS];
+	int err = 0;
+	u32 local_rmid = INVALID_RMID;
+	u64 deadline, val;
+
+	*count = 0;
+	memset(issued_ccsds, 0, CMT_MAX_NR_PKGS * sizeof(*issued_ccsds));
+	err = issue_read_remote_pkgs(monr, issued_ccsds, &local_rmid);
+	if (err)
+		return err;
+	/*
+	 * Save deadline after issuing reads so that all packages have at
+	 * least wait_time_ms to complete.
+	 */
+	deadline = get_jiffies_64() + msecs_to_jiffies(wait_time_ms);
+
+	/* Read local package. */
+	if (local_rmid != INVALID_RMID) {
+		err = cmt_rmid_read(local_rmid, &val);
+		if (WARN_ON_ONCE(err))
+			return err;
+		*count += val;
+	}
+
+	return read_issued_pkgs(issued_ccsds, deadline, count);
+}
+
 static int intel_cmt_event_read(struct perf_event *event)
 {
 	struct monr *monr = monr_from_event(event);
+	u64 count;
+	u16 pkgid = topology_logical_package_id(smp_processor_id());
+	int err;
 
 	/*
 	 * preemption disabled since called holding
@@ -1342,11 +1519,17 @@ static int intel_cmt_event_read(struct perf_event *event)
 	}
 
 	if (event->attach_state & PERF_ATTACH_TASK) {
+		/* It's a task event. */
+		err = read_all_pkgs(monr, CMT_IPI_WAIT_TIME, &count);
+	} else {
 		/* To add support in next patches in series */
 		return -ENOTSUPP;
 	}
-	/* To add support in next patches in series */
-	return -ENOTSUPP;
+	if (err)
+		return err;
+	local64_set(&event->count, count);
+
+	return 0;
 }
 
 static inline void __intel_cmt_event_start(struct perf_event *event,
@@ -1566,15 +1749,17 @@ void perf_cgroup_arch_css_offline(struct cgroup_subsys_state *css)
 
 static void free_pkg_data(struct pkg_data *pkg_data)
 {
+	kfree(pkg_data->ccsds);
 	kfree(pkg_data);
 }
 
 /* Init pkg_data for @cpu 's package. */
 static struct pkg_data *alloc_pkg_data(int cpu)
 {
+	struct cmt_csd *ccsd;
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
 	struct pkg_data *pkgd;
-	int numa_node = cpu_to_node(cpu);
+	int r, ccsds_nr_bytes, numa_node = cpu_to_node(cpu);
 	u16 pkgid = topology_logical_package_id(cpu);
 
 	if (pkgid >= CMT_MAX_NR_PKGS) {
@@ -1618,6 +1803,21 @@ static struct pkg_data *alloc_pkg_data(int cpu)
 	lockdep_set_class(&pkgd->lock, &lock_keys[pkgid]);
 #endif
 
+	ccsds_nr_bytes = (pkgd->max_rmid + 1) * sizeof(*(pkgd->ccsds));
+	pkgd->ccsds = kzalloc_node(ccsds_nr_bytes, GFP_KERNEL, numa_node);
+	if (!pkgd->ccsds) {
+		free_pkg_data(pkgd);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	for (r = 0; r <= pkgd->max_rmid; r++) {
+		ccsd = &pkgd->ccsds[r];
+		ccsd->rmid = r;
+		ccsd->csd.func = smp_call_rmid_read;
+		ccsd->csd.info = ccsd;
+		__set_bit(r, pkgd->free_rmids);
+	}
+
 	__min_max_rmid = min(__min_max_rmid, pkgd->max_rmid);
 
 	return pkgd;
diff --git a/arch/x86/events/intel/cmt.h b/arch/x86/events/intel/cmt.h
index 1e40e6b..8bb43bd 100644
--- a/arch/x86/events/intel/cmt.h
+++ b/arch/x86/events/intel/cmt.h
@@ -191,6 +191,19 @@ struct pmonr {
 	enum pmonr_state			state;
 };
 
+/**
+ * struct cmt_csd - data for async IPI call that read rmids on remote packages.
+ *
+ * One per rmid per package. One issuer at the time. Readers wait on @value_gen.
+ */
+struct cmt_csd {
+	struct call_single_data csd;
+	atomic_t		on_read;
+	u64			value;
+	int			ret;
+	u32			rmid;
+};
+
 /*
  * Compile constant required for bitmap macros.
  * Broadwell EP has 2 rmids per logical core, use twice as many as upper bound.
@@ -237,6 +250,7 @@ struct pkg_data {
 	unsigned int		work_cpu;
 	u32			max_rmid;
 	u16			pkgid;
+	struct cmt_csd		*ccsds;
 };
 
 /**
-- 
2.8.0.rc3.226.g39d4020

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ