[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20201027150052.GC2479604@kernel.org>
Date: Tue, 27 Oct 2020 12:00:52 -0300
From: Arnaldo Carvalho de Melo <acme@...nel.org>
To: Sergey Senozhatsky <sergey.senozhatsky@...il.com>
Cc: Leo Yan <leo.yan@...aro.org>, Mark Rutland <mark.rutland@....com>,
Peter Zijlstra <peterz@...radead.org>,
Will Deacon <will@...nel.org>,
John Garry <john.garry@...wei.com>,
Mathieu Poirier <mathieu.poirier@...aro.org>,
Namhyung Kim <namhyung@...nel.org>,
Suleiman Souhlal <suleiman@...gle.com>,
linux-kernel@...r.kernel.org, linux-arm-kernel@...ts.infradead.org
Subject: Re: [PATCHv5] perf kvm: add kvm-stat for arm64
Em Tue, Oct 27, 2020 at 03:24:21PM +0900, Sergey Senozhatsky escreveu:
> Add support for perf kvm stat on arm64 platform.
>
> Example:
> # perf kvm stat report
>
> Analyze events for all VMs, all VCPUs:
>
> VM-EXIT Samples Samples% Time% Min Time Max Time Avg time
>
> DABT_LOW 661867 98.91% 40.45% 2.19us 3364.65us 6.24us ( +- 0.34% )
> IRQ 4598 0.69% 57.44% 2.89us 3397.59us 1276.27us ( +- 1.61% )
> WFx 1475 0.22% 1.71% 2.22us 3388.63us 118.31us ( +- 8.69% )
> IABT_LOW 1018 0.15% 0.38% 2.22us 2742.07us 38.29us ( +- 12.55% )
> SYS64 180 0.03% 0.01% 2.07us 112.91us 6.57us ( +- 14.95% )
> HVC64 17 0.00% 0.01% 2.19us 322.35us 42.95us ( +- 58.98% )
>
> Total Samples:669155, Total events handled time:10216387.86us.
>
> Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@...il.com>
> Reviewed-by: Leo Yan <leo.yan@...aro.org>
> Tested-by: Leo Yan <leo.yan@...aro.org>
Thanks, applied.
- Arnaldo
> ---
>
> v5: rebased against perf/core (Arnaldo)
> v4: rebased against perf/core (Leo)
> v3: report ARM_EXCEPTION_IL exceptions (Leo)
> v2: reworked the patch after offline discussion with Suleiman
>
> tools/perf/arch/arm64/Makefile | 1 +
> tools/perf/arch/arm64/util/Build | 1 +
> .../arch/arm64/util/arm64_exception_types.h | 92 +++++++++++++++++++
> tools/perf/arch/arm64/util/kvm-stat.c | 85 +++++++++++++++++
> 4 files changed, 179 insertions(+)
> create mode 100644 tools/perf/arch/arm64/util/arm64_exception_types.h
> create mode 100644 tools/perf/arch/arm64/util/kvm-stat.c
>
> diff --git a/tools/perf/arch/arm64/Makefile b/tools/perf/arch/arm64/Makefile
> index dbef716a1913..fab3095fb5d0 100644
> --- a/tools/perf/arch/arm64/Makefile
> +++ b/tools/perf/arch/arm64/Makefile
> @@ -4,6 +4,7 @@ PERF_HAVE_DWARF_REGS := 1
> endif
> PERF_HAVE_JITDUMP := 1
> PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
> +HAVE_KVM_STAT_SUPPORT := 1
>
> #
> # Syscall table generation for perf
> diff --git a/tools/perf/arch/arm64/util/Build b/tools/perf/arch/arm64/util/Build
> index b53294d74b01..8d2b9bcfffca 100644
> --- a/tools/perf/arch/arm64/util/Build
> +++ b/tools/perf/arch/arm64/util/Build
> @@ -2,6 +2,7 @@ perf-y += header.o
> perf-y += machine.o
> perf-y += perf_regs.o
> perf-y += tsc.o
> +perf-y += kvm-stat.o
> perf-$(CONFIG_DWARF) += dwarf-regs.o
> perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
> perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
> diff --git a/tools/perf/arch/arm64/util/arm64_exception_types.h b/tools/perf/arch/arm64/util/arm64_exception_types.h
> new file mode 100644
> index 000000000000..27c981ebe401
> --- /dev/null
> +++ b/tools/perf/arch/arm64/util/arm64_exception_types.h
> @@ -0,0 +1,92 @@
> +// SPDX-License-Identifier: GPL-2.0
> +#ifndef ARCH_PERF_ARM64_EXCEPTION_TYPES_H
> +#define ARCH_PERF_ARM64_EXCEPTION_TYPES_H
> +
> +/* Per asm/virt.h */
> +#define HVC_STUB_ERR 0xbadca11
> +
> +/* Per asm/kvm_asm.h */
> +#define ARM_EXCEPTION_IRQ 0
> +#define ARM_EXCEPTION_EL1_SERROR 1
> +#define ARM_EXCEPTION_TRAP 2
> +#define ARM_EXCEPTION_IL 3
> +/* The hyp-stub will return this for any kvm_call_hyp() call */
> +#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
> +
> +#define kvm_arm_exception_type \
> + {ARM_EXCEPTION_IRQ, "IRQ" }, \
> + {ARM_EXCEPTION_EL1_SERROR, "SERROR" }, \
> + {ARM_EXCEPTION_TRAP, "TRAP" }, \
> + {ARM_EXCEPTION_IL, "ILLEGAL" }, \
> + {ARM_EXCEPTION_HYP_GONE, "HYP_GONE" }
> +
> +/* Per asm/esr.h */
> +#define ESR_ELx_EC_UNKNOWN (0x00)
> +#define ESR_ELx_EC_WFx (0x01)
> +/* Unallocated EC: 0x02 */
> +#define ESR_ELx_EC_CP15_32 (0x03)
> +#define ESR_ELx_EC_CP15_64 (0x04)
> +#define ESR_ELx_EC_CP14_MR (0x05)
> +#define ESR_ELx_EC_CP14_LS (0x06)
> +#define ESR_ELx_EC_FP_ASIMD (0x07)
> +#define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */
> +#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */
> +/* Unallocated EC: 0x0A - 0x0B */
> +#define ESR_ELx_EC_CP14_64 (0x0C)
> +/* Unallocated EC: 0x0d */
> +#define ESR_ELx_EC_ILL (0x0E)
> +/* Unallocated EC: 0x0F - 0x10 */
> +#define ESR_ELx_EC_SVC32 (0x11)
> +#define ESR_ELx_EC_HVC32 (0x12) /* EL2 only */
> +#define ESR_ELx_EC_SMC32 (0x13) /* EL2 and above */
> +/* Unallocated EC: 0x14 */
> +#define ESR_ELx_EC_SVC64 (0x15)
> +#define ESR_ELx_EC_HVC64 (0x16) /* EL2 and above */
> +#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */
> +#define ESR_ELx_EC_SYS64 (0x18)
> +#define ESR_ELx_EC_SVE (0x19)
> +#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */
> +/* Unallocated EC: 0x1b - 0x1E */
> +#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
> +#define ESR_ELx_EC_IABT_LOW (0x20)
> +#define ESR_ELx_EC_IABT_CUR (0x21)
> +#define ESR_ELx_EC_PC_ALIGN (0x22)
> +/* Unallocated EC: 0x23 */
> +#define ESR_ELx_EC_DABT_LOW (0x24)
> +#define ESR_ELx_EC_DABT_CUR (0x25)
> +#define ESR_ELx_EC_SP_ALIGN (0x26)
> +/* Unallocated EC: 0x27 */
> +#define ESR_ELx_EC_FP_EXC32 (0x28)
> +/* Unallocated EC: 0x29 - 0x2B */
> +#define ESR_ELx_EC_FP_EXC64 (0x2C)
> +/* Unallocated EC: 0x2D - 0x2E */
> +#define ESR_ELx_EC_SERROR (0x2F)
> +#define ESR_ELx_EC_BREAKPT_LOW (0x30)
> +#define ESR_ELx_EC_BREAKPT_CUR (0x31)
> +#define ESR_ELx_EC_SOFTSTP_LOW (0x32)
> +#define ESR_ELx_EC_SOFTSTP_CUR (0x33)
> +#define ESR_ELx_EC_WATCHPT_LOW (0x34)
> +#define ESR_ELx_EC_WATCHPT_CUR (0x35)
> +/* Unallocated EC: 0x36 - 0x37 */
> +#define ESR_ELx_EC_BKPT32 (0x38)
> +/* Unallocated EC: 0x39 */
> +#define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */
> +/* Unallocated EC: 0x3B */
> +#define ESR_ELx_EC_BRK64 (0x3C)
> +/* Unallocated EC: 0x3D - 0x3F */
> +#define ESR_ELx_EC_MAX (0x3F)
> +
> +#define ECN(x) { ESR_ELx_EC_##x, #x }
> +
> +#define kvm_arm_exception_class \
> + ECN(UNKNOWN), ECN(WFx), ECN(CP15_32), ECN(CP15_64), ECN(CP14_MR), \
> + ECN(CP14_LS), ECN(FP_ASIMD), ECN(CP10_ID), ECN(PAC), ECN(CP14_64), \
> + ECN(SVC64), ECN(HVC64), ECN(SMC64), ECN(SYS64), ECN(SVE), \
> + ECN(IMP_DEF), ECN(IABT_LOW), ECN(IABT_CUR), \
> + ECN(PC_ALIGN), ECN(DABT_LOW), ECN(DABT_CUR), \
> + ECN(SP_ALIGN), ECN(FP_EXC32), ECN(FP_EXC64), ECN(SERROR), \
> + ECN(BREAKPT_LOW), ECN(BREAKPT_CUR), ECN(SOFTSTP_LOW), \
> + ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
> + ECN(BKPT32), ECN(VECTOR32), ECN(BRK64)
> +
> +#endif /* ARCH_PERF_ARM64_EXCEPTION_TYPES_H */
> diff --git a/tools/perf/arch/arm64/util/kvm-stat.c b/tools/perf/arch/arm64/util/kvm-stat.c
> new file mode 100644
> index 000000000000..50376b9062c1
> --- /dev/null
> +++ b/tools/perf/arch/arm64/util/kvm-stat.c
> @@ -0,0 +1,85 @@
> +// SPDX-License-Identifier: GPL-2.0
> +#include <errno.h>
> +#include <memory.h>
> +#include "../../util/evsel.h"
> +#include "../../util/kvm-stat.h"
> +#include "arm64_exception_types.h"
> +#include "debug.h"
> +
> +define_exit_reasons_table(arm64_exit_reasons, kvm_arm_exception_type);
> +define_exit_reasons_table(arm64_trap_exit_reasons, kvm_arm_exception_class);
> +
> +const char *kvm_trap_exit_reason = "esr_ec";
> +const char *vcpu_id_str = "id";
> +const int decode_str_len = 20;
> +const char *kvm_exit_reason = "ret";
> +const char *kvm_entry_trace = "kvm:kvm_entry";
> +const char *kvm_exit_trace = "kvm:kvm_exit";
> +
> +const char *kvm_events_tp[] = {
> + "kvm:kvm_entry",
> + "kvm:kvm_exit",
> + NULL,
> +};
> +
> +static void event_get_key(struct evsel *evsel,
> + struct perf_sample *sample,
> + struct event_key *key)
> +{
> + key->info = 0;
> + key->key = evsel__intval(evsel, sample, kvm_exit_reason);
> + key->exit_reasons = arm64_exit_reasons;
> +
> + /*
> + * TRAP exceptions carry exception class info in esr_ec field
> + * and, hence, we need to use a different exit_reasons table to
> + * properly decode event's est_ec.
> + */
> + if (key->key == ARM_EXCEPTION_TRAP) {
> + key->key = evsel__intval(evsel, sample, kvm_trap_exit_reason);
> + key->exit_reasons = arm64_trap_exit_reasons;
> + }
> +}
> +
> +static bool event_begin(struct evsel *evsel,
> + struct perf_sample *sample __maybe_unused,
> + struct event_key *key __maybe_unused)
> +{
> + return !strcmp(evsel->name, kvm_entry_trace);
> +}
> +
> +static bool event_end(struct evsel *evsel,
> + struct perf_sample *sample,
> + struct event_key *key)
> +{
> + if (!strcmp(evsel->name, kvm_exit_trace)) {
> + event_get_key(evsel, sample, key);
> + return true;
> + }
> + return false;
> +}
> +
> +static struct kvm_events_ops exit_events = {
> + .is_begin_event = event_begin,
> + .is_end_event = event_end,
> + .decode_key = exit_event_decode_key,
> + .name = "VM-EXIT"
> +};
> +
> +struct kvm_reg_events_ops kvm_reg_events_ops[] = {
> + {
> + .name = "vmexit",
> + .ops = &exit_events,
> + },
> + { NULL },
> +};
> +
> +const char * const kvm_skip_events[] = {
> + NULL,
> +};
> +
> +int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
> +{
> + kvm->exit_reasons_isa = "arm64";
> + return 0;
> +}
> --
> 2.29.0
>
--
- Arnaldo
Powered by blists - more mailing lists