[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1441479742-15402-4-git-send-email-matt@codeblueprint.co.uk>
Date: Sat, 5 Sep 2015 20:02:22 +0100
From: Matt Fleming <matt@...eblueprint.co.uk>
To: Arnaldo Carvalho de Melo <acme@...hat.com>,
Jiri Olsa <jolsa@...hat.com>, Ingo Molnar <mingo@...nel.org>
Cc: linux-kernel@...r.kernel.org, Andi Kleen <andi@...stfloor.org>,
Vince Weaver <vince@...ter.net>,
Peter Zijlstra <peterz@...radead.org>,
Kanaka Juvva <kanaka.d.juvva@...el.com>,
Vikas Shivappa <vikas.shivappa@...el.com>,
Matt Fleming <matt.fleming@...el.com>
Subject: [PATCH 3/3] perf tests: Add Intel CQM test
From: Matt Fleming <matt.fleming@...el.com>
Peter reported that it's possible to trigger a WARN_ON_ONCE() in the
Intel CQM code by combining a hardware event and an Intel CQM
(software) event into a group, which was fixed in commit 2c534c0da0a6
("perf/x86/intel/cqm: Return cached counter value from IRQ context").
Unfortunately, the perf tools were not able to create this bundle and
we had to manually construct a test case.
For posterity, record Peter's proof of concept test case in tools/perf
so that it presents a model for how we can perform architecture
specific tests, or "arch tests", in perf in the future.
The particular issue triggered in the test case is that when the
counter for the hardware event overflows and triggers a PMI we'll read
both the hardware event and the software event counters.
Unfortunately, for CQM that involves performing an IPI to read the CQM
event counters on all sockets, which in NMI context triggers the
WARN_ON_ONCE().
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Jiri Olsa <jolsa@...hat.com>
Cc: Arnaldo Carvalho de Melo <acme@...hat.com>
Link: http://lkml.kernel.org/r/1437490509-15373-1-git-send-email-matt@codeblueprint.co.uk
Signed-off-by: Matt Fleming <matt.fleming@...el.com>
---
tools/perf/arch/x86/tests/Build | 1 +
tools/perf/arch/x86/tests/arch-tests.c | 4 ++
tools/perf/arch/x86/tests/intel-cqm.c | 124 +++++++++++++++++++++++++++++++++
3 files changed, 129 insertions(+)
create mode 100644 tools/perf/arch/x86/tests/intel-cqm.c
diff --git a/tools/perf/arch/x86/tests/Build b/tools/perf/arch/x86/tests/Build
index 1381525dae06..32996f54e123 100644
--- a/tools/perf/arch/x86/tests/Build
+++ b/tools/perf/arch/x86/tests/Build
@@ -4,3 +4,4 @@ libperf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
libperf-y += arch-tests.o
libperf-y += rdpmc.o
libperf-y += perf-time-to-tsc.o
+libperf-y += intel-cqm.o
diff --git a/tools/perf/arch/x86/tests/arch-tests.c b/tools/perf/arch/x86/tests/arch-tests.c
index 0004fe48dc7e..3ab31adc283a 100644
--- a/tools/perf/arch/x86/tests/arch-tests.c
+++ b/tools/perf/arch/x86/tests/arch-tests.c
@@ -18,6 +18,10 @@ struct test arch_tests[] = {
},
#endif
{
+ .desc = "Test intel cqm nmi context read",
+ .func = test__intel_cqm_count_nmi_context,
+ },
+ {
.func = NULL,
},
diff --git a/tools/perf/arch/x86/tests/intel-cqm.c b/tools/perf/arch/x86/tests/intel-cqm.c
new file mode 100644
index 000000000000..d28c1b6a3b54
--- /dev/null
+++ b/tools/perf/arch/x86/tests/intel-cqm.c
@@ -0,0 +1,124 @@
+#include "tests/tests.h"
+#include "perf.h"
+#include "cloexec.h"
+#include "debug.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "arch-tests.h"
+
+#include <sys/mman.h>
+#include <string.h>
+
+static pid_t spawn(void)
+{
+ pid_t pid;
+
+ pid = fork();
+ if (pid)
+ return pid;
+
+ while(1);
+ sleep(5);
+ return 0;
+}
+
+/*
+ * Create an event group that contains both a sampled hardware
+ * (cpu-cycles) and software (intel_cqm/llc_occupancy/) event. We then
+ * wait for the hardware perf counter to overflow and generate a PMI,
+ * which triggers an event read for both of the events in the group.
+ *
+ * Since reading Intel CQM event counters requires sending SMP IPIs, the
+ * CQM pmu needs to handle the above situation gracefully, and return
+ * the last read counter value to avoid triggering a WARN_ON_ONCE() in
+ * smp_call_function_many() caused by sending IPIs from NMI context.
+ */
+int test__intel_cqm_count_nmi_context(void)
+{
+ struct perf_evlist *evlist = NULL;
+ struct perf_evsel *evsel = NULL;
+ struct perf_event_attr pe;
+ int i, fd[2], flag, ret;
+ size_t mmap_len;
+ void *event;
+ pid_t pid;
+ int err = TEST_FAIL;
+
+ flag = perf_event_open_cloexec_flag();
+
+ evlist = perf_evlist__new();
+ if (!evlist) {
+ pr_debug("perf_evlist__new failed\n");
+ return TEST_FAIL;
+ }
+
+ ret = parse_events(evlist, "intel_cqm/llc_occupancy/", NULL);
+ if (ret) {
+ pr_debug("parse_events failed\n");
+ err = TEST_SKIP;
+ goto out;
+ }
+
+ evsel = perf_evlist__first(evlist);
+ if (!evsel) {
+ pr_debug("perf_evlist__first failed\n");
+ goto out;
+ }
+
+ memset(&pe, 0, sizeof(pe));
+ pe.size = sizeof(pe);
+
+ pe.type = PERF_TYPE_HARDWARE;
+ pe.config = PERF_COUNT_HW_CPU_CYCLES;
+ pe.read_format = PERF_FORMAT_GROUP;
+
+ pe.sample_period = 128;
+ pe.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_READ;
+
+ pid = spawn();
+
+ fd[0] = sys_perf_event_open(&pe, pid, -1, -1, flag);
+ if (fd[0] < 0) {
+ pr_debug("failed to open event\n");
+ goto out;
+ }
+
+ memset(&pe, 0, sizeof(pe));
+ pe.size = sizeof(pe);
+
+ pe.type = evsel->attr.type;
+ pe.config = evsel->attr.config;
+
+ fd[1] = sys_perf_event_open(&pe, pid, -1, fd[0], flag);
+ if (fd[1] < 0) {
+ pr_debug("failed to open event\n");
+ goto out;
+ }
+
+ /*
+ * Pick a power-of-two number of pages + 1 for the meta-data
+ * page (struct perf_event_mmap_page). See tools/perf/design.txt.
+ */
+ mmap_len = page_size * 65;
+
+ event = mmap(NULL, mmap_len, PROT_READ, MAP_SHARED, fd[0], 0);
+ if (event == (void *)(-1)) {
+ pr_debug("failed to mmap %d\n", errno);
+ goto out;
+ }
+
+ sleep(1);
+
+ err = TEST_OK;
+
+ munmap(event, mmap_len);
+
+ for (i = 0; i < 2; i++)
+ close(fd[i]);
+
+ kill(pid, SIGKILL);
+ wait(NULL);
+out:
+ perf_evlist__delete(evlist);
+ return err;
+}
--
2.1.0
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists