lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221105072311.8214-3-leo.yan@linaro.org>
Date:   Sat,  5 Nov 2022 07:23:10 +0000
From:   Leo Yan <leo.yan@...aro.org>
To:     Marc Zyngier <maz@...nel.org>, James Morse <james.morse@....com>,
        Alexandru Elisei <alexandru.elisei@....com>,
        Suzuki K Poulose <suzuki.poulose@....com>,
        Oliver Upton <oliver.upton@...ux.dev>,
        Catalin Marinas <catalin.marinas@....com>,
        Will Deacon <will@...nel.org>,
        Arnaldo Carvalho de Melo <acme@...nel.org>,
        John Garry <john.garry@...wei.com>,
        James Clark <james.clark@....com>,
        Mike Leach <mike.leach@...aro.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>,
        Mark Rutland <mark.rutland@....com>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        Jiri Olsa <jolsa@...nel.org>,
        Namhyung Kim <namhyung@...nel.org>,
        linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev,
        kvmarm@...ts.cs.columbia.edu, linux-kernel@...r.kernel.org,
        linux-perf-users@...r.kernel.org
Cc:     Leo Yan <leo.yan@...aro.org>
Subject: [PATCH v1 2/3] KVM: arm64: Add trace events with field 'vcpu_id'

The existed trace events kvm_entry and kvm_exit don't contain the info
for virtual CPU id, thus the perf tool has no chance to do statistics
based on virtual CPU wise; and the trace events are ABI and we cannot
change it to avoid ABI breakage.

For above reasons, this patch adds two trace events kvm_entry_v2 and
kvm_exit_v2 with a new field 'vcpu_id'.  To support both the old and
new events, we use the tracepoint callback to check if any event is
enabled or not, if it's enabled then the callback will invoke the
corresponding trace event.

Signed-off-by: Leo Yan <leo.yan@...aro.org>
---
 arch/arm64/kvm/trace.c     |  6 +++++
 arch/arm64/kvm/trace_arm.h | 45 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 51 insertions(+)

diff --git a/arch/arm64/kvm/trace.c b/arch/arm64/kvm/trace.c
index d25a3db994e2..d9b2587c77c3 100644
--- a/arch/arm64/kvm/trace.c
+++ b/arch/arm64/kvm/trace.c
@@ -10,6 +10,9 @@ static void kvm_entry_tp(void *data, struct kvm_vcpu *vcpu)
 {
 	if (trace_kvm_entry_enabled())
 		trace_kvm_entry(*vcpu_pc(vcpu));
+
+	if (trace_kvm_entry_v2_enabled())
+		trace_kvm_entry_v2(vcpu);
 }
 
 static void kvm_exit_tp(void *data, int ret, struct kvm_vcpu *vcpu)
@@ -17,6 +20,9 @@ static void kvm_exit_tp(void *data, int ret, struct kvm_vcpu *vcpu)
 	if (trace_kvm_exit_enabled())
 		trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu),
 			       *vcpu_pc(vcpu));
+
+	if (trace_kvm_exit_v2_enabled())
+		trace_kvm_exit_v2(ret, vcpu);
 }
 
 static int __init kvm_tp_init(void)
diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h
index ef02ae93b28b..932c9d0c36f3 100644
--- a/arch/arm64/kvm/trace_arm.h
+++ b/arch/arm64/kvm/trace_arm.h
@@ -4,6 +4,7 @@
 
 #include <kvm/arm_arch_timer.h>
 #include <linux/tracepoint.h>
+#include <asm/kvm_emulate.h>
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM kvm
@@ -30,6 +31,23 @@ TRACE_EVENT(kvm_entry,
 	TP_printk("PC: 0x%016lx", __entry->vcpu_pc)
 );
 
+TRACE_EVENT(kvm_entry_v2,
+	TP_PROTO(struct kvm_vcpu *vcpu),
+	TP_ARGS(vcpu),
+
+	TP_STRUCT__entry(
+		__field(	unsigned int,	vcpu_id		)
+		__field(	unsigned long,	vcpu_pc		)
+	),
+
+	TP_fast_assign(
+		__entry->vcpu_id		= vcpu->vcpu_id;
+		__entry->vcpu_pc		= *vcpu_pc(vcpu);
+	),
+
+	TP_printk("vcpu: %u PC: 0x%016lx", __entry->vcpu_id, __entry->vcpu_pc)
+);
+
 DECLARE_TRACE(kvm_exit_tp,
 	TP_PROTO(int ret, struct kvm_vcpu *vcpu),
 	TP_ARGS(ret, vcpu));
@@ -57,6 +75,33 @@ TRACE_EVENT(kvm_exit,
 		  __entry->vcpu_pc)
 );
 
+TRACE_EVENT(kvm_exit_v2,
+	TP_PROTO(int ret, struct kvm_vcpu *vcpu),
+	TP_ARGS(ret, vcpu),
+
+	TP_STRUCT__entry(
+		__field(	unsigned int,	vcpu_id		)
+		__field(	int,		ret		)
+		__field(	unsigned int,	esr_ec		)
+		__field(	unsigned long,	vcpu_pc		)
+	),
+
+	TP_fast_assign(
+		__entry->vcpu_id		= vcpu->vcpu_id;
+		__entry->ret			= ARM_EXCEPTION_CODE(ret);
+		__entry->esr_ec			= ARM_EXCEPTION_IS_TRAP(ret) ?
+						  kvm_vcpu_trap_get_class(vcpu): 0;
+		__entry->vcpu_pc		= *vcpu_pc(vcpu);
+	),
+
+	TP_printk("%s: vcpu: %u HSR_EC: 0x%04x (%s), PC: 0x%016lx",
+		  __print_symbolic(__entry->ret, kvm_arm_exception_type),
+		  __entry->vcpu_id,
+		  __entry->esr_ec,
+		  __print_symbolic(__entry->esr_ec, kvm_arm_exception_class),
+		  __entry->vcpu_pc)
+);
+
 TRACE_EVENT(kvm_guest_fault,
 	TP_PROTO(unsigned long vcpu_pc, unsigned long hsr,
 		 unsigned long hxfar,
-- 
2.34.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ