lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1477787923-61185-7-git-send-email-davidcc@google.com>
Date:   Sat, 29 Oct 2016 17:38:03 -0700
From:   David Carrillo-Cisneros <davidcc@...gle.com>
To:     linux-kernel@...r.kernel.org
Cc:     "x86@...nel.org" <x86@...nel.org>, Ingo Molnar <mingo@...hat.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Andi Kleen <ak@...ux.intel.com>,
        Kan Liang <kan.liang@...el.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Vegard Nossum <vegard.nossum@...il.com>,
        Marcelo Tosatti <mtosatti@...hat.com>,
        Nilay Vaish <nilayvaish@...il.com>,
        Borislav Petkov <bp@...e.de>,
        Vikas Shivappa <vikas.shivappa@...ux.intel.com>,
        Ravi V Shankar <ravi.v.shankar@...el.com>,
        Fenghua Yu <fenghua.yu@...el.com>,
        Paul Turner <pjt@...gle.com>,
        Stephane Eranian <eranian@...gle.com>,
        David Carrillo-Cisneros <davidcc@...gle.com>
Subject: [PATCH v3 06/46] perf/x86/intel/cmt: add intel_cmt pmu

Add pmu, llc_occupancy event attributes, and functions for event
initialization.

Empty pmu functions will be filled in future patches in this series.

Signed-off-by: David Carrillo-Cisneros <davidcc@...gle.com>
---
 arch/x86/events/intel/cmt.c | 106 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 106 insertions(+)

diff --git a/arch/x86/events/intel/cmt.c b/arch/x86/events/intel/cmt.c
index f12a06b..0a24896 100644
--- a/arch/x86/events/intel/cmt.c
+++ b/arch/x86/events/intel/cmt.c
@@ -7,6 +7,9 @@
 #include "cmt.h"
 #include "../perf_event.h"
 
+#define QOS_L3_OCCUP_EVENT_ID	BIT_ULL(0)
+#define QOS_EVENT_MASK		QOS_L3_OCCUP_EVENT_ID
+
 /* Increase as needed as Intel CPUs grow. */
 #define CMT_MAX_NR_PKGS		8
 
@@ -46,6 +49,96 @@ static struct pkg_data *cmt_pkgs_data_next_rcu(struct pkg_data *pkgd)
 	return pkgd;
 }
 
+static struct pmu intel_cmt_pmu;
+
+static void intel_cmt_event_read(struct perf_event *event)
+{
+}
+
+static void intel_cmt_event_start(struct perf_event *event, int mode)
+{
+}
+
+static void intel_cmt_event_stop(struct perf_event *event, int mode)
+{
+}
+
+static int intel_cmt_event_add(struct perf_event *event, int mode)
+{
+	return 0;
+}
+
+static int intel_cmt_event_init(struct perf_event *event)
+{
+	int err = 0;
+
+	if (event->attr.type != intel_cmt_pmu.type)
+		return -ENOENT;
+	if (event->attr.config & ~QOS_EVENT_MASK)
+		return -EINVAL;
+
+	/* unsupported modes and filters */
+	if (event->attr.exclude_user   ||
+	    event->attr.exclude_kernel ||
+	    event->attr.exclude_hv     ||
+	    event->attr.exclude_idle   ||
+	    event->attr.exclude_host   ||
+	    event->attr.exclude_guest  ||
+	    event->attr.inherit_stat   || /* cmt groups share rmid */
+	    event->attr.sample_period) /* no sampling */
+		return -EINVAL;
+
+	return err;
+}
+
+EVENT_ATTR_STR(llc_occupancy, intel_cmt_llc, "event=0x01");
+EVENT_ATTR_STR(llc_occupancy.per-pkg, intel_cmt_llc_pkg, "1");
+EVENT_ATTR_STR(llc_occupancy.unit, intel_cmt_llc_unit, "Bytes");
+EVENT_ATTR_STR(llc_occupancy.scale, intel_cmt_llc_scale, NULL);
+EVENT_ATTR_STR(llc_occupancy.snapshot, intel_cmt_llc_snapshot, "1");
+
+static struct attribute *intel_cmt_events_attr[] = {
+	EVENT_PTR(intel_cmt_llc),
+	EVENT_PTR(intel_cmt_llc_pkg),
+	EVENT_PTR(intel_cmt_llc_unit),
+	EVENT_PTR(intel_cmt_llc_scale),
+	EVENT_PTR(intel_cmt_llc_snapshot),
+	NULL,
+};
+
+static struct attribute_group intel_cmt_events_group = {
+	.name = "events",
+	.attrs = intel_cmt_events_attr,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-7");
+static struct attribute *intel_cmt_formats_attr[] = {
+	&format_attr_event.attr,
+	NULL,
+};
+
+static struct attribute_group intel_cmt_format_group = {
+	.name = "format",
+	.attrs = intel_cmt_formats_attr,
+};
+
+static const struct attribute_group *intel_cmt_attr_groups[] = {
+	&intel_cmt_events_group,
+	&intel_cmt_format_group,
+	NULL,
+};
+
+static struct pmu intel_cmt_pmu = {
+	.attr_groups	     = intel_cmt_attr_groups,
+	.task_ctx_nr	     = perf_sw_context,
+	.event_init	     = intel_cmt_event_init,
+	.add		     = intel_cmt_event_add,
+	.del		     = intel_cmt_event_stop,
+	.start		     = intel_cmt_event_start,
+	.stop		     = intel_cmt_event_stop,
+	.read		     = intel_cmt_event_read,
+};
+
 static void free_pkg_data(struct pkg_data *pkg_data)
 {
 	kfree(pkg_data);
@@ -199,6 +292,12 @@ static void cmt_dealloc(void)
 	cmt_pkgs_data = NULL;
 }
 
+static void cmt_stop(void)
+{
+	cpuhp_remove_state(CPUHP_AP_PERF_X86_CMT_ONLINE);
+	cpuhp_remove_state(CPUHP_PERF_X86_CMT_PREP);
+}
+
 static int __init cmt_alloc(void)
 {
 	cmt_l3_scale = boot_cpu_data.x86_cache_occ_scale;
@@ -240,6 +339,7 @@ static int __init cmt_start(void)
 		err = -ENOMEM;
 		goto rm_online;
 	}
+	event_attr_intel_cmt_llc_scale.event_str = str;
 
 	return 0;
 
@@ -269,6 +369,10 @@ static int __init intel_cmt_init(void)
 	if (err)
 		goto err_dealloc;
 
+	err = perf_pmu_register(&intel_cmt_pmu, "intel_cmt", -1);
+	if (err)
+		goto err_stop;
+
 	pr_info("Intel CMT enabled with ");
 	rcu_read_lock();
 	while ((pkgd = cmt_pkgs_data_next_rcu(pkgd))) {
@@ -280,6 +384,8 @@ static int __init intel_cmt_init(void)
 
 	return err;
 
+err_stop:
+	cmt_stop();
 err_dealloc:
 	cmt_dealloc();
 err_exit:
-- 
2.8.0.rc3.226.g39d4020

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ