lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Thu, 20 Jun 2024 15:26:35 -0700
From: Raghavendra Rao Ananta <rananta@...gle.com>
To: Shaoqin Huang <shahuang@...hat.com>
Cc: Oliver Upton <oliver.upton@...ux.dev>, Marc Zyngier <maz@...nel.org>, kvmarm@...ts.linux.dev, 
	Paolo Bonzini <pbonzini@...hat.com>, Shuah Khan <shuah@...nel.org>, 
	James Morse <james.morse@....com>, Suzuki K Poulose <suzuki.poulose@....com>, 
	Zenghui Yu <yuzenghui@...wei.com>, linux-kernel@...r.kernel.org, kvm@...r.kernel.org, 
	linux-kselftest@...r.kernel.org, linux-arm-kernel@...ts.infradead.org
Subject: Re: [PATCH v10 2/3] KVM: selftests: aarch64: Introduce pmu_event_filter_test

Hi Shaoqin,

On Wed, Jun 19, 2024 at 1:33 AM Shaoqin Huang <shahuang@...hat.com> wrote:
>
> Introduce pmu_event_filter_test for arm64 platforms. The test configures
> PMUv3 for a vCPU, and sets different pmu event filters for the vCPU, and
> check if the guest can see those events which user allow and can't use
> those events which use deny.
>
> This test refactor the create_vpmu_vm() and make it a wrapper for
> __create_vpmu_vm(), which allows some extra init code before
> KVM_ARM_VCPU_PMU_V3_INIT.
>
> And this test use the KVM_ARM_VCPU_PMU_V3_FILTER attribute to set the
> pmu event filter in KVM. And choose to filter two common event
> branches_retired and instructions_retired, and let the guest to check if
> it see the right pmceid register.
>
> Signed-off-by: Shaoqin Huang <shahuang@...hat.com>
> ---
>  tools/testing/selftests/kvm/Makefile          |   1 +
>  .../kvm/aarch64/pmu_event_filter_test.c       | 314 ++++++++++++++++++
>  2 files changed, 315 insertions(+)
>  create mode 100644 tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
>
> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> index ac280dcba996..2110b49e7a84 100644
> --- a/tools/testing/selftests/kvm/Makefile
> +++ b/tools/testing/selftests/kvm/Makefile
> @@ -153,6 +153,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/aarch32_id_regs
>  TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
>  TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
>  TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
> +TEST_GEN_PROGS_aarch64 += aarch64/pmu_event_filter_test
>  TEST_GEN_PROGS_aarch64 += aarch64/psci_test
>  TEST_GEN_PROGS_aarch64 += aarch64/set_id_regs
>  TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter
> diff --git a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
> new file mode 100644
> index 000000000000..308b8677e08e
> --- /dev/null
> +++ b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
> @@ -0,0 +1,314 @@
> +
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * pmu_event_filter_test - Test user limit pmu event for guest.
> + *
> + * Copyright (c) 2023 Red Hat, Inc.
> + *
> + * This test checks if the guest only see the limited pmu event that userspace
> + * sets, if the guest can use those events which user allow, and if the guest
> + * can't use those events which user deny.
> + * This test runs only when KVM_CAP_ARM_PMU_V3, KVM_ARM_VCPU_PMU_V3_FILTER
> + * is supported on the host.
> + */
> +#include <kvm_util.h>
> +#include <processor.h>
> +#include <vgic.h>
> +#include <vpmu.h>
> +#include <test_util.h>
> +#include <perf/arm_pmuv3.h>
> +
> +struct pmu_common_event_ids {
> +       uint64_t pmceid0;
> +       uint64_t pmceid1;
> +} max_pmce, expected_pmce;
> +
> +struct vpmu_vm {
> +       struct kvm_vm *vm;
> +       struct kvm_vcpu *vcpu;
> +       int gic_fd;
> +};
> +
> +static struct vpmu_vm vpmu_vm;
> +
> +#define FILTER_NR 10
> +
> +struct test_desc {
> +       const char *name;
> +       struct kvm_pmu_event_filter filter[FILTER_NR];
> +};
> +
> +#define __DEFINE_FILTER(base, num, act)                \
> +       ((struct kvm_pmu_event_filter) {        \
> +               .base_event     = base,         \
> +               .nevents        = num,          \
> +               .action         = act,          \
> +       })
> +
> +#define DEFINE_FILTER(base, act) __DEFINE_FILTER(base, 1, act)
> +
> +#define EVENT_ALLOW(event)     DEFINE_FILTER(event, KVM_PMU_EVENT_ALLOW)
> +#define EVENT_DENY(event)      DEFINE_FILTER(event, KVM_PMU_EVENT_DENY)
> +
> +static void guest_code(void)
> +{
> +       uint64_t pmceid0 = read_sysreg(pmceid0_el0);
> +       uint64_t pmceid1 = read_sysreg(pmceid1_el0);
> +
> +       GUEST_ASSERT_EQ(expected_pmce.pmceid0, pmceid0);
> +       GUEST_ASSERT_EQ(expected_pmce.pmceid1, pmceid1);
> +
> +       GUEST_DONE();
> +}
> +
> +static void guest_get_pmceid(void)
> +{
> +       max_pmce.pmceid0 = read_sysreg(pmceid0_el0);
> +       max_pmce.pmceid1 = read_sysreg(pmceid1_el0);
> +
> +       GUEST_DONE();
> +}
> +
> +static void run_vcpu(struct kvm_vcpu *vcpu)
> +{
> +       struct ucall uc;
> +
> +       while (1) {
> +               vcpu_run(vcpu);
> +               switch (get_ucall(vcpu, &uc)) {
> +               case UCALL_DONE:
> +                       return;
> +               case UCALL_ABORT:
> +                       REPORT_GUEST_ASSERT(uc);
> +                       break;
> +               default:
> +                       TEST_FAIL("Unknown ucall %lu", uc.cmd);
> +               }
> +       }
> +}
> +
> +static void set_pmce(struct pmu_common_event_ids *pmce, int action, int event)
> +{
> +       int base = 0;
> +       uint64_t *pmceid = NULL;
> +
> +       if (event >= 0x4000) {
> +               event -= 0x4000;
> +               base = 32;
> +       }
> +
> +       if (event >= 0 && event <= 0x1F) {
> +               pmceid = &pmce->pmceid0;
> +       } else if (event >= 0x20 && event <= 0x3F) {
> +               event -= 0x20;
> +               pmceid = &pmce->pmceid1;
> +       } else {
> +               return;
> +       }
> +
> +       event += base;
> +       if (action == KVM_PMU_EVENT_ALLOW)
> +               *pmceid |= BIT(event);
> +       else
> +               *pmceid &= ~BIT(event);
> +}
> +
> +static inline bool is_valid_filter(struct kvm_pmu_event_filter *filter)
> +{
> +       return filter && filter->nevents != 0;
> +}
> +
> +static void prepare_expected_pmce(struct kvm_pmu_event_filter *filter)
> +{
> +       struct pmu_common_event_ids pmce_mask = { ~0, ~0 };
> +       int i;
> +
> +       if (is_valid_filter(filter) && filter->action == KVM_PMU_EVENT_ALLOW)
> +               memset(&pmce_mask, 0, sizeof(pmce_mask));
> +
> +       while (is_valid_filter(filter)) {
> +               for (i = 0; i < filter->nevents; i++)
> +                       set_pmce(&pmce_mask, filter->action,
> +                                filter->base_event + i);
> +               filter++;
> +       }
> +
> +       expected_pmce.pmceid0 = max_pmce.pmceid0 & pmce_mask.pmceid0;
> +       expected_pmce.pmceid1 = max_pmce.pmceid1 & pmce_mask.pmceid1;
> +}
> +
> +static void pmu_event_filter_init(struct kvm_pmu_event_filter *filter)
> +{
> +       while (is_valid_filter(filter)) {
> +               kvm_device_attr_set(vpmu_vm.vcpu->fd,
> +                                   KVM_ARM_VCPU_PMU_V3_CTRL,
> +                                   KVM_ARM_VCPU_PMU_V3_FILTER,
> +                                   filter);
> +               filter++;
> +       }
> +}
> +
> +/* Create a VM that has one vCPU with PMUv3 configured. */
> +static void create_vpmu_vm_with_filter(void *guest_code,
> +                                      struct kvm_pmu_event_filter *filter)
> +{
> +       uint64_t irq = 23;
> +
> +       /* The test creates the vpmu_vm multiple times. Ensure a clean state */
> +       memset(&vpmu_vm, 0, sizeof(vpmu_vm));
> +
> +       vpmu_vm.vm = vm_create(1);
> +       vpmu_vm.vcpu = vm_vcpu_add_with_vpmu(vpmu_vm.vm, 0, guest_code);
> +       vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64);
> +       __TEST_REQUIRE(vpmu_vm.gic_fd >= 0,
> +                      "Failed to create vgic-v3, skipping");
> +
> +       pmu_event_filter_init(filter);
> +
> +       /* Initialize vPMU */
> +       vpmu_set_irq(vpmu_vm.vcpu, irq);
> +       vpmu_init(vpmu_vm.vcpu);
> +}
> +
> +static void create_vpmu_vm(void *guest_code)
> +{
> +       create_vpmu_vm_with_filter(guest_code, NULL);
> +}
> +
> +static void destroy_vpmu_vm(void)
> +{
> +       close(vpmu_vm.gic_fd);
> +       kvm_vm_free(vpmu_vm.vm);
> +}
> +
> +static void run_test(struct test_desc *t)
> +{
> +       pr_info("Test: %s\n", t->name);
> +
> +       create_vpmu_vm_with_filter(guest_code, t->filter);
> +       prepare_expected_pmce(t->filter);
> +       sync_global_to_guest(vpmu_vm.vm, expected_pmce);
> +
> +       run_vcpu(vpmu_vm.vcpu);
> +
> +       destroy_vpmu_vm();
> +}
> +
> +static struct test_desc tests[] = {
> +       {
> +               .name = "without_filter",
> +               .filter = {
> +                       { 0 }
> +               },
> +       },
> +       {
> +               .name = "member_allow_filter",
> +               .filter = {
> +                       EVENT_ALLOW(ARMV8_PMUV3_PERFCTR_SW_INCR),
> +                       EVENT_ALLOW(ARMV8_PMUV3_PERFCTR_INST_RETIRED),
> +                       EVENT_ALLOW(ARMV8_PMUV3_PERFCTR_BR_RETIRED),
> +                       { 0 },
> +               },
> +       },
> +       {
> +               .name = "member_deny_filter",
> +               .filter = {
> +                       EVENT_DENY(ARMV8_PMUV3_PERFCTR_SW_INCR),
> +                       EVENT_DENY(ARMV8_PMUV3_PERFCTR_INST_RETIRED),
> +                       EVENT_DENY(ARMV8_PMUV3_PERFCTR_BR_RETIRED),
> +                       { 0 },
> +               },
> +       },
> +       {
> +               .name = "not_member_deny_filter",
> +               .filter = {
> +                       EVENT_DENY(ARMV8_PMUV3_PERFCTR_SW_INCR),
> +                       { 0 },
> +               },
> +       },
> +       {
> +               .name = "not_member_allow_filter",
> +               .filter = {
> +                       EVENT_ALLOW(ARMV8_PMUV3_PERFCTR_SW_INCR),
> +                       { 0 },
> +               },
> +       },
> +       {
> +               .name = "deny_chain_filter",
> +               .filter = {
> +                       EVENT_DENY(ARMV8_PMUV3_PERFCTR_CHAIN),
> +                       { 0 },
> +               },
> +       },
> +       {
> +               .name = "deny_cpu_cycles_filter",
> +               .filter = {
> +                       EVENT_DENY(ARMV8_PMUV3_PERFCTR_CPU_CYCLES),
> +                       { 0 },
> +               },
> +       },
> +       {
> +               .name = "cancel_allow_filter",
> +               .filter = {
> +                       EVENT_ALLOW(ARMV8_PMUV3_PERFCTR_CPU_CYCLES),
> +                       EVENT_DENY(ARMV8_PMUV3_PERFCTR_CPU_CYCLES),
> +               },
> +       },
> +       {
> +               .name = "cancel_deny_filter",
> +               .filter = {
> +                       EVENT_DENY(ARMV8_PMUV3_PERFCTR_CPU_CYCLES),
> +                       EVENT_ALLOW(ARMV8_PMUV3_PERFCTR_CPU_CYCLES),
> +               },
> +       },
> +       {
> +               .name = "multiple_filter",
> +               .filter = {
> +                       __DEFINE_FILTER(0x0, 0x10, KVM_PMU_EVENT_ALLOW),
> +                       __DEFINE_FILTER(0x6, 0x3, KVM_PMU_EVENT_DENY),
> +               },
> +       },
> +       { 0 }
> +};
> +
> +static void run_tests(void)
> +{
> +       struct test_desc *t;
> +
> +       for (t = &tests[0]; t->name; t++)
> +               run_test(t);
> +}
> +
> +static int used_pmu_events[] = {
> +       ARMV8_PMUV3_PERFCTR_BR_RETIRED,
> +       ARMV8_PMUV3_PERFCTR_INST_RETIRED,
> +       ARMV8_PMUV3_PERFCTR_CHAIN,
> +       ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
> +};
> +
> +static bool kvm_pmu_support_events(void)
> +{
> +       struct pmu_common_event_ids used_pmce = { 0, 0 };
> +
> +       create_vpmu_vm(guest_get_pmceid);
> +
> +       memset(&max_pmce, 0, sizeof(max_pmce));
> +       sync_global_to_guest(vpmu_vm.vm, max_pmce);
> +       run_vcpu(vpmu_vm.vcpu);
> +       sync_global_from_guest(vpmu_vm.vm, max_pmce);
> +       destroy_vpmu_vm();
> +
> +       for (int i = 0; i < ARRAY_SIZE(used_pmu_events); i++)
> +               set_pmce(&used_pmce, KVM_PMU_EVENT_ALLOW, used_pmu_events[i]);
> +
> +       return ((max_pmce.pmceid0 & used_pmce.pmceid0) == used_pmce.pmceid0) &&
> +              ((max_pmce.pmceid1 & used_pmce.pmceid1) == used_pmce.pmceid1);
> +}
> +
> +int main(void)
> +{
> +       TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
> +       TEST_REQUIRE(kvm_pmu_support_events());
> +
> +       run_tests();
> +}
> --
> 2.40.1
>
>
Reviewed-by: Raghavendra Rao Ananta <rananta@...gle.com>

- Raghavendra

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ