lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <tip-25df39f2cfd06a4b49ad592c5b7cba0cbf24e27f@git.kernel.org>
Date:   Thu, 30 Mar 2017 01:39:06 -0700
From:   tip-bot for Suravee Suthikulpanit <tipbot@...or.com>
To:     linux-tip-commits@...r.kernel.org
Cc:     joro@...tes.org, torvalds@...ux-foundation.org,
        peterz@...radead.org, jolsa@...hat.com, hpa@...or.com,
        eranian@...gle.com, alexander.shishkin@...ux.intel.com,
        mingo@...nel.org, tglx@...utronix.de, linux-kernel@...r.kernel.org,
        suravee.suthikulpanit@....com, bp@...e.de,
        vincent.weaver@...ne.edu, acme@...hat.com
Subject: [tip:perf/core] x86/events/amd/iommu: Enable support for multiple
 IOMMUs

Commit-ID:  25df39f2cfd06a4b49ad592c5b7cba0cbf24e27f
Gitweb:     http://git.kernel.org/tip/25df39f2cfd06a4b49ad592c5b7cba0cbf24e27f
Author:     Suravee Suthikulpanit <suravee.suthikulpanit@....com>
AuthorDate: Wed, 22 Mar 2017 02:02:42 -0500
Committer:  Ingo Molnar <mingo@...nel.org>
CommitDate: Thu, 30 Mar 2017 09:55:36 +0200

x86/events/amd/iommu: Enable support for multiple IOMMUs

Add support for multiple IOMMUs to perf by exposing an AMD IOMMU PMU for
each IOMMU found in the system via:

  /bus/event_source/devices/amd_iommu_x

where x is the IOMMU index. This allows users to specify different
events to be programmed into the performance counters of each IOMMU.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@....com>
[ Improve readability, shorten names. ]
Signed-off-by: Borislav Petkov <bp@...e.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Cc: Alexander Shishkin <alexander.shishkin@...ux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@...hat.com>
Cc: Jiri Olsa <jolsa@...hat.com>
Cc: Jörg Rödel <joro@...tes.org>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Stephane Eranian <eranian@...gle.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Vince Weaver <vincent.weaver@...ne.edu>
Cc: iommu@...ts.linux-foundation.org
Link: http://lkml.kernel.org/r/1490166162-10002-11-git-send-email-Suravee.Suthikulpanit@amd.com
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
 arch/x86/events/amd/iommu.c | 112 ++++++++++++++++++++++++++++----------------
 1 file changed, 71 insertions(+), 41 deletions(-)

diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
index f0d94c8..3641e24 100644
--- a/arch/x86/events/amd/iommu.c
+++ b/arch/x86/events/amd/iommu.c
@@ -34,16 +34,21 @@
 #define GET_DOMID_MASK(x)  (((x)->conf1 >> 16) & 0xFFFFULL)
 #define GET_PASID_MASK(x)  (((x)->conf1 >> 32) & 0xFFFFFULL)
 
-static struct perf_amd_iommu __perf_iommu;
+#define IOMMU_NAME_SIZE 16
 
 struct perf_amd_iommu {
+	struct list_head list;
 	struct pmu pmu;
+	struct amd_iommu *iommu;
+	char name[IOMMU_NAME_SIZE];
 	u8 max_banks;
 	u8 max_counters;
 	u64 cntr_assign_mask;
 	raw_spinlock_t lock;
 };
 
+static LIST_HEAD(perf_amd_iommu_list);
+
 /*---------------------------------------------
  * sysfs format attributes
  *---------------------------------------------*/
@@ -233,9 +238,14 @@ static int perf_iommu_event_init(struct perf_event *event)
 	return 0;
 }
 
+static inline struct amd_iommu *perf_event_2_iommu(struct perf_event *ev)
+{
+	return (container_of(ev->pmu, struct perf_amd_iommu, pmu))->iommu;
+}
+
 static void perf_iommu_enable_event(struct perf_event *ev)
 {
-	struct amd_iommu *iommu = get_amd_iommu(0);
+	struct amd_iommu *iommu = perf_event_2_iommu(ev);
 	struct hw_perf_event *hwc = &ev->hw;
 	u8 bank = hwc->iommu_bank;
 	u8 cntr = hwc->iommu_cntr;
@@ -265,7 +275,7 @@ static void perf_iommu_enable_event(struct perf_event *ev)
 
 static void perf_iommu_disable_event(struct perf_event *event)
 {
-	struct amd_iommu *iommu = get_amd_iommu(0);
+	struct amd_iommu *iommu = perf_event_2_iommu(event);
 	struct hw_perf_event *hwc = &event->hw;
 	u64 reg = 0ULL;
 
@@ -285,7 +295,7 @@ static void perf_iommu_start(struct perf_event *event, int flags)
 
 	if (flags & PERF_EF_RELOAD) {
 		u64 prev_raw_count = local64_read(&hwc->prev_count);
-		struct amd_iommu *iommu = get_amd_iommu(0);
+		struct amd_iommu *iommu = perf_event_2_iommu(event);
 
 		amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
 				     IOMMU_PC_COUNTER_REG, &prev_raw_count);
@@ -300,7 +310,7 @@ static void perf_iommu_read(struct perf_event *event)
 {
 	u64 count, prev, delta;
 	struct hw_perf_event *hwc = &event->hw;
-	struct amd_iommu *iommu = get_amd_iommu(0);
+	struct amd_iommu *iommu = perf_event_2_iommu(event);
 
 	if (amd_iommu_pc_get_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
 				 IOMMU_PC_COUNTER_REG, &count))
@@ -388,11 +398,6 @@ static __init int _init_events_attrs(void)
 	return 0;
 }
 
-static __init void amd_iommu_pc_exit(void)
-{
-	kfree(amd_iommu_events_group.attrs);
-}
-
 const struct attribute_group *amd_iommu_attr_groups[] = {
 	&amd_iommu_format_group,
 	&amd_iommu_cpumask_group,
@@ -400,46 +405,57 @@ const struct attribute_group *amd_iommu_attr_groups[] = {
 	NULL,
 };
 
-static __init int
-_init_perf_amd_iommu(struct perf_amd_iommu *perf_iommu, char *name)
+static struct pmu iommu_pmu = {
+	.event_init	= perf_iommu_event_init,
+	.add		= perf_iommu_add,
+	.del		= perf_iommu_del,
+	.start		= perf_iommu_start,
+	.stop		= perf_iommu_stop,
+	.read		= perf_iommu_read,
+	.task_ctx_nr	= perf_invalid_context,
+	.attr_groups	= amd_iommu_attr_groups,
+};
+
+static __init int init_one_iommu(unsigned int idx)
 {
+	struct perf_amd_iommu *perf_iommu;
 	int ret;
 
+	perf_iommu = kzalloc(sizeof(struct perf_amd_iommu), GFP_KERNEL);
+	if (!perf_iommu)
+		return -ENOMEM;
+
 	raw_spin_lock_init(&perf_iommu->lock);
 
-	/* Init cpumask attributes to only core 0 */
-	cpumask_set_cpu(0, &iommu_cpumask);
+	perf_iommu->pmu          = iommu_pmu;
+	perf_iommu->iommu        = get_amd_iommu(idx);
+	perf_iommu->max_banks    = amd_iommu_pc_get_max_banks(idx);
+	perf_iommu->max_counters = amd_iommu_pc_get_max_counters(idx);
 
-	perf_iommu->max_banks    = amd_iommu_pc_get_max_banks(0);
-	perf_iommu->max_counters = amd_iommu_pc_get_max_counters(0);
-	if (!perf_iommu->max_banks || !perf_iommu->max_counters)
+	if (!perf_iommu->iommu ||
+	    !perf_iommu->max_banks ||
+	    !perf_iommu->max_counters) {
+		kfree(perf_iommu);
 		return -EINVAL;
+	}
 
-	perf_iommu->pmu.attr_groups = amd_iommu_attr_groups;
-	ret = perf_pmu_register(&perf_iommu->pmu, name, -1);
-	if (ret)
-		pr_err("Error initializing AMD IOMMU perf counters.\n");
-	else
-		pr_info("Detected AMD IOMMU (%d banks, %d counters/bank).\n",
-			amd_iommu_pc_get_max_banks(0),
-			amd_iommu_pc_get_max_counters(0));
+	snprintf(perf_iommu->name, IOMMU_NAME_SIZE, "amd_iommu_%u", idx);
+
+	ret = perf_pmu_register(&perf_iommu->pmu, perf_iommu->name, -1);
+	if (!ret) {
+		pr_info("Detected AMD IOMMU #%d (%d banks, %d counters/bank).\n",
+			idx, perf_iommu->max_banks, perf_iommu->max_counters);
+		list_add_tail(&perf_iommu->list, &perf_amd_iommu_list);
+	} else {
+		pr_warn("Error initializing IOMMU %d.\n", idx);
+		kfree(perf_iommu);
+	}
 	return ret;
 }
 
-static struct perf_amd_iommu __perf_iommu = {
-	.pmu = {
-		.task_ctx_nr    = perf_invalid_context,
-		.event_init	= perf_iommu_event_init,
-		.add		= perf_iommu_add,
-		.del		= perf_iommu_del,
-		.start		= perf_iommu_start,
-		.stop		= perf_iommu_stop,
-		.read		= perf_iommu_read,
-	},
-};
-
 static __init int amd_iommu_pc_init(void)
 {
+	unsigned int i, cnt = 0;
 	int ret;
 
 	/* Make sure the IOMMU PC resource is available */
@@ -450,11 +466,25 @@ static __init int amd_iommu_pc_init(void)
 	if (ret)
 		return ret;
 
-	ret = _init_perf_amd_iommu(&__perf_iommu, "amd_iommu");
-	if (ret)
-		amd_iommu_pc_exit();
+	/*
+	 * An IOMMU PMU is specific to an IOMMU, and can function independently.
+	 * So we go through all IOMMUs and ignore the one that fails init
+	 * unless all IOMMU are failing.
+	 */
+	for (i = 0; i < amd_iommu_get_num_iommus(); i++) {
+		ret = init_one_iommu(i);
+		if (!ret)
+			cnt++;
+	}
 
-	return ret;
+	if (!cnt) {
+		kfree(amd_iommu_events_group.attrs);
+		return -ENODEV;
+	}
+
+	/* Init cpumask attributes to only core 0 */
+	cpumask_set_cpu(0, &iommu_cpumask);
+	return 0;
 }
 
 device_initcall(amd_iommu_pc_init);

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ