[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221105072311.8214-4-leo.yan@linaro.org>
Date: Sat, 5 Nov 2022 07:23:11 +0000
From: Leo Yan <leo.yan@...aro.org>
To: Marc Zyngier <maz@...nel.org>, James Morse <james.morse@....com>,
Alexandru Elisei <alexandru.elisei@....com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Oliver Upton <oliver.upton@...ux.dev>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
John Garry <john.garry@...wei.com>,
James Clark <james.clark@....com>,
Mike Leach <mike.leach@...aro.org>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev,
kvmarm@...ts.cs.columbia.edu, linux-kernel@...r.kernel.org,
linux-perf-users@...r.kernel.org
Cc: Leo Yan <leo.yan@...aro.org>
Subject: [PATCH v1 3/3] perf arm64: Support virtual CPU ID for kvm-stat
Since the two trace events kvm_entry_v2/kvm_exit_v2 are added, we can
use the field "vcpu_id" in the events to get to know the virtual CPU ID.
To keep backward compatibility, we still need to rely on the trace
events kvm_entry/kvm_exit for old kernels.
This patch adds Arm64's functions setup_kvm_events_tp() and
arm64__setup_kvm_tp(), by detecting the nodes under sysfs folder, it can
dynamically register trace events kvm_entry_v2/kvm_exit_v2 when the
kernel has provided them, otherwise, it rolls back to use events
kvm_entry/kvm_exit for backward compatibility.
Let cpu_isa_init() to invoke arm64__setup_kvm_tp(), this can allow the
command "perf kvm stat report" also to dynamically setup trace events.
Before:
# perf kvm stat report --vcpu 27
Analyze events for all VMs, VCPU 27:
VM-EXIT Samples Samples% Time% Min Time Max Time Avg time
Total Samples:0, Total events handled time:0.00us.
After:
# perf kvm stat report --vcpu 27
Analyze events for all VMs, VCPU 27:
VM-EXIT Samples Samples% Time% Min Time Max Time Avg time
SYS64 808 98.54% 91.24% 0.00us 303.76us 3.46us ( +- 13.54% )
WFx 10 1.22% 7.79% 0.00us 69.48us 23.91us ( +- 25.91% )
IRQ 2 0.24% 0.97% 0.00us 22.64us 14.82us ( +- 52.77% )
Total Samples:820, Total events handled time:3068.28us.
Signed-off-by: Leo Yan <leo.yan@...aro.org>
---
tools/perf/arch/arm64/util/kvm-stat.c | 54 ++++++++++++++++++++++++---
1 file changed, 49 insertions(+), 5 deletions(-)
diff --git a/tools/perf/arch/arm64/util/kvm-stat.c b/tools/perf/arch/arm64/util/kvm-stat.c
index 73d18e0ed6f6..1ba54ce3d7d8 100644
--- a/tools/perf/arch/arm64/util/kvm-stat.c
+++ b/tools/perf/arch/arm64/util/kvm-stat.c
@@ -3,6 +3,7 @@
#include <memory.h>
#include "../../../util/evsel.h"
#include "../../../util/kvm-stat.h"
+#include "../../../util/tracepoint.h"
#include "arm64_exception_types.h"
#include "debug.h"
@@ -10,18 +11,28 @@ define_exit_reasons_table(arm64_exit_reasons, kvm_arm_exception_type);
define_exit_reasons_table(arm64_trap_exit_reasons, kvm_arm_exception_class);
const char *kvm_trap_exit_reason = "esr_ec";
-const char *vcpu_id_str = "id";
+const char *vcpu_id_str = "vcpu_id";
const int decode_str_len = 20;
const char *kvm_exit_reason = "ret";
-const char *kvm_entry_trace = "kvm:kvm_entry";
-const char *kvm_exit_trace = "kvm:kvm_exit";
+const char *kvm_entry_trace;
+const char *kvm_exit_trace;
-const char *kvm_events_tp[] = {
+#define NR_TPS 2
+
+static const char *kvm_events_tp_v1[NR_TPS + 1] = {
"kvm:kvm_entry",
"kvm:kvm_exit",
NULL,
};
+static const char *kvm_events_tp_v2[NR_TPS + 1] = {
+ "kvm:kvm_entry_v2",
+ "kvm:kvm_exit_v2",
+ NULL,
+};
+
+const char *kvm_events_tp[NR_TPS + 1];
+
static void event_get_key(struct evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
@@ -78,8 +89,41 @@ const char * const kvm_skip_events[] = {
NULL,
};
-int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
+static int arm64__setup_kvm_tp(struct perf_kvm_stat *kvm)
{
+ const char **kvm_events, **events_ptr;
+ int i, nr_tp = 0;
+
+ if (is_valid_tracepoint("kvm:kvm_entry_v2")) {
+ kvm_events = kvm_events_tp_v2;
+ kvm_entry_trace = "kvm:kvm_entry_v2";
+ kvm_exit_trace = "kvm:kvm_exit_v2";
+ } else {
+ kvm_events = kvm_events_tp_v1;
+ kvm_entry_trace = "kvm:kvm_entry";
+ kvm_exit_trace = "kvm:kvm_exit";
+ }
+
+ for (events_ptr = kvm_events; *events_ptr; events_ptr++) {
+ if (!is_valid_tracepoint(*events_ptr))
+ return -1;
+ nr_tp++;
+ }
+
+ for (i = 0; i < nr_tp; i++)
+ kvm_events_tp[i] = kvm_events[i];
+ kvm_events_tp[i] = NULL;
+
kvm->exit_reasons_isa = "arm64";
return 0;
}
+
+int setup_kvm_events_tp(struct perf_kvm_stat *kvm)
+{
+ return arm64__setup_kvm_tp(kvm);
+}
+
+int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
+{
+ return arm64__setup_kvm_tp(kvm);
+}
--
2.34.1
Powered by blists - more mailing lists