lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CALMp9eQm2fW=jbcBSZK9hO9p_Lec67B4gFsio2BBcsJfP1jtRg@mail.gmail.com>
Date: Wed, 28 Jan 2026 15:47:59 -0800
From: Jim Mattson <jmattson@...gle.com>
To: Sean Christopherson <seanjc@...gle.com>
Cc: Paolo Bonzini <pbonzini@...hat.com>, Thomas Gleixner <tglx@...utronix.de>, 
	Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>, 
	Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org, 
	"H. Peter Anvin" <hpa@...or.com>, Peter Zijlstra <peterz@...radead.org>, 
	Arnaldo Carvalho de Melo <acme@...nel.org>, Namhyung Kim <namhyung@...nel.org>, 
	Mark Rutland <mark.rutland@....com>, 
	Alexander Shishkin <alexander.shishkin@...ux.intel.com>, Jiri Olsa <jolsa@...nel.org>, 
	Ian Rogers <irogers@...gle.com>, Adrian Hunter <adrian.hunter@...el.com>, 
	James Clark <james.clark@...aro.org>, Shuah Khan <shuah@...nel.org>, kvm@...r.kernel.org, 
	linux-kernel@...r.kernel.org, linux-perf-users@...r.kernel.org, 
	linux-kselftest@...r.kernel.org
Subject: Re: [PATCH 6/6] KVM: selftests: x86: Add svm_pmu_hg_test for HG_ONLY bits

On Thu, Jan 22, 2026 at 9:12 AM Sean Christopherson <seanjc@...gle.com> wrote:
>
> On Wed, Jan 21, 2026, Jim Mattson wrote:
> > Add a selftest to verify KVM correctly virtualizes the AMD PMU Host-Only
> > (bit 41) and Guest-Only (bit 40) event selector bits across all relevant
> > SVM state transitions.
> >
> > For both Guest-Only and Host-Only counters, verify that:
> >   1. SVME=0: counter counts (HG_ONLY bits ignored)
> >   2. Set SVME=1: counter behavior changes based on HG_ONLY bit
> >   3. VMRUN to L2: counter behavior switches (guest vs host mode)
> >   4. VMEXIT to L1: counter behavior switches back
> >   5. Clear SVME=0: counter counts (HG_ONLY bits ignored again)
> >
> > Also confirm that setting both bits is the same as setting neither bit.
> >
> > Signed-off-by: Jim Mattson <jmattson@...gle.com>
> > ---
> >  tools/testing/selftests/kvm/Makefile.kvm      |   1 +
> >  .../selftests/kvm/x86/svm_pmu_hg_test.c       | 297 ++++++++++++++++++
> >  2 files changed, 298 insertions(+)
> >  create mode 100644 tools/testing/selftests/kvm/x86/svm_pmu_hg_test.c
> >
> > diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm
> > index e88699e227dd..06ba85d97618 100644
> > --- a/tools/testing/selftests/kvm/Makefile.kvm
> > +++ b/tools/testing/selftests/kvm/Makefile.kvm
> > @@ -112,6 +112,7 @@ TEST_GEN_PROGS_x86 += x86/svm_vmcall_test
> >  TEST_GEN_PROGS_x86 += x86/svm_int_ctl_test
> >  TEST_GEN_PROGS_x86 += x86/svm_nested_shutdown_test
> >  TEST_GEN_PROGS_x86 += x86/svm_nested_soft_inject_test
> > +TEST_GEN_PROGS_x86 += x86/svm_pmu_hg_test
>
> Maybe svm_nested_pmu_test?  Hmm, that makes it sound like "nested PMU" though.
>
> svm_pmu_host_guest_test?

Sounds good.

> > +#define MSR_F15H_PERF_CTL0   0xc0010200
> > +#define MSR_F15H_PERF_CTR0   0xc0010201
> > +
> > +#define AMD64_EVENTSEL_GUESTONLY     BIT_ULL(40)
> > +#define AMD64_EVENTSEL_HOSTONLY              BIT_ULL(41)
>
> Please put architectural definitions in pmu.h (or whatever library header we
> have).

These should be redundant. I will confirm.

> > +struct hg_test_data {
>
> Please drop "hg" (I keep reading it as "mercury").
>
> > +     uint64_t l2_delta;
> > +     bool l2_done;
> > +};
> > +
> > +static struct hg_test_data *hg_data;
> > +
> > +static void l2_guest_code(void)
> > +{
> > +     hg_data->l2_delta = run_and_measure();
> > +     hg_data->l2_done = true;
> > +     vmmcall();
> > +}
> > +
> > +/*
> > + * Test Guest-Only counter across all relevant state transitions.
> > + */
> > +static void l1_guest_code_guestonly(struct svm_test_data *svm,
> > +                                 struct hg_test_data *data)
> > +{
> > +     unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
> > +     struct vmcb *vmcb = svm->vmcb;
> > +     uint64_t eventsel, delta;
> > +
> > +     hg_data = data;
> > +
> > +     eventsel = EVENTSEL_RETIRED_INSNS | AMD64_EVENTSEL_GUESTONLY;
> > +     wrmsr(MSR_F15H_PERF_CTL0, eventsel);
> > +     wrmsr(MSR_F15H_PERF_CTR0, 0);
> > +
> > +     /* Step 1: SVME=0; HG_ONLY ignored */
> > +     wrmsr(MSR_EFER, rdmsr(MSR_EFER) & ~EFER_SVME);
> > +     delta = run_and_measure();
> > +     GUEST_ASSERT_NE(delta, 0);
> > +
> > +     /* Step 2: Set SVME=1; Guest-Only counter stops */
> > +     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
> > +     delta = run_and_measure();
> > +     GUEST_ASSERT_EQ(delta, 0);
> > +
> > +     /* Step 3: VMRUN to L2; Guest-Only counter counts */
> > +     generic_svm_setup(svm, l2_guest_code,
> > +                       &l2_guest_stack[L2_GUEST_STACK_SIZE]);
> > +     vmcb->control.intercept &= ~(1ULL << INTERCEPT_MSR_PROT);
> > +
> > +     run_guest(vmcb, svm->vmcb_gpa);
> > +
> > +     GUEST_ASSERT_EQ(vmcb->control.exit_code, SVM_EXIT_VMMCALL);
> > +     GUEST_ASSERT(data->l2_done);
> > +     GUEST_ASSERT_NE(data->l2_delta, 0);
> > +
> > +     /* Step 4: After VMEXIT to L1; Guest-Only counter stops */
> > +     delta = run_and_measure();
> > +     GUEST_ASSERT_EQ(delta, 0);
> > +
> > +     /* Step 5: Clear SVME; HG_ONLY ignored */
> > +     wrmsr(MSR_EFER, rdmsr(MSR_EFER) & ~EFER_SVME);
> > +     delta = run_and_measure();
> > +     GUEST_ASSERT_NE(delta, 0);
> > +
> > +     GUEST_DONE();
> > +}
> > +
> > +/*
> > + * Test Host-Only counter across all relevant state transitions.
> > + */
> > +static void l1_guest_code_hostonly(struct svm_test_data *svm,
> > +                                struct hg_test_data *data)
> > +{
> > +     unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
> > +     struct vmcb *vmcb = svm->vmcb;
> > +     uint64_t eventsel, delta;
> > +
> > +     hg_data = data;
> > +
> > +     eventsel = EVENTSEL_RETIRED_INSNS | AMD64_EVENTSEL_HOSTONLY;
> > +     wrmsr(MSR_F15H_PERF_CTL0, eventsel);
> > +     wrmsr(MSR_F15H_PERF_CTR0, 0);
> > +
> > +
> > +     /* Step 1: SVME=0; HG_ONLY ignored */
> > +     wrmsr(MSR_EFER, rdmsr(MSR_EFER) & ~EFER_SVME);
> > +     delta = run_and_measure();
> > +     GUEST_ASSERT_NE(delta, 0);
> > +
> > +     /* Step 2: Set SVME=1; Host-Only counter still counts */
> > +     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
> > +     delta = run_and_measure();
> > +     GUEST_ASSERT_NE(delta, 0);
> > +
> > +     /* Step 3: VMRUN to L2; Host-Only counter stops */
> > +     generic_svm_setup(svm, l2_guest_code,
> > +                       &l2_guest_stack[L2_GUEST_STACK_SIZE]);
> > +     vmcb->control.intercept &= ~(1ULL << INTERCEPT_MSR_PROT);
> > +
> > +     run_guest(vmcb, svm->vmcb_gpa);
> > +
> > +     GUEST_ASSERT_EQ(vmcb->control.exit_code, SVM_EXIT_VMMCALL);
> > +     GUEST_ASSERT(data->l2_done);
> > +     GUEST_ASSERT_EQ(data->l2_delta, 0);
> > +
> > +     /* Step 4: After VMEXIT to L1; Host-Only counter counts */
> > +     delta = run_and_measure();
> > +     GUEST_ASSERT_NE(delta, 0);
> > +
> > +     /* Step 5: Clear SVME; HG_ONLY ignored */
> > +     wrmsr(MSR_EFER, rdmsr(MSR_EFER) & ~EFER_SVME);
> > +     delta = run_and_measure();
> > +     GUEST_ASSERT_NE(delta, 0);
> > +
> > +     GUEST_DONE();
> > +}
> > +
> > +/*
> > + * Test that both bits set is the same as neither bit set (always counts).
> > + */
> > +static void l1_guest_code_both_bits(struct svm_test_data *svm,
>
> l1_guest_code gets somewhat redundant.  What about these to be more descriptive
> about the salient points, without creating monstrous names?
>
>         l1_test_no_filtering // very open to suggestions for a better name
>         l1_test_guestonly
>         l1_test_hostonly
>         l1_test_host_and_guest
>
> Actually, why are there even separate helpers?  Very off the cuff, but this seems
> trivial to dedup:
>
> static void l1_guest_code(struct svm_test_data *svm, u64 host_guest_mask)
> {
>         const bool count_in_host = !host_guest_mask ||
>                                    (host_guest_mask & AMD64_EVENTSEL_HOSTONLY);
>         const bool count_in_guest = !host_guest_mask ||
>                                     (host_guest_mask & AMD64_EVENTSEL_GUESTONLY);
>         unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
>         struct vmcb *vmcb = svm->vmcb;
>         uint64_t eventsel, delta;
>
>         wrmsr(MSR_F15H_PERF_CTL0, EVENTSEL_RETIRED_INSNS | host_guest_mask);
>         wrmsr(MSR_F15H_PERF_CTR0, 0);
>
>         /* Step 1: SVME=0; host always counts */
>         wrmsr(MSR_EFER, rdmsr(MSR_EFER) & ~EFER_SVME);
>         delta = run_and_measure();
>         GUEST_ASSERT_NE(delta, 0);
>
>         /* Step 2: Set SVME=1; Guest-Only counter stops */
>         wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
>         delta = run_and_measure();
>         GUEST_ASSERT(!!delta == count_in_host);
>
>         /* Step 3: VMRUN to L2; Guest-Only counter counts */
>         generic_svm_setup(svm, l2_guest_code,
>                           &l2_guest_stack[L2_GUEST_STACK_SIZE]);
>         vmcb->control.intercept &= ~(1ULL << INTERCEPT_MSR_PROT);
>
>         run_guest(vmcb, svm->vmcb_gpa);
>
>         GUEST_ASSERT_EQ(vmcb->control.exit_code, SVM_EXIT_VMMCALL);
>         GUEST_ASSERT(data->l2_done);
>         GUEST_ASSERT(!!data->l2_delta == count_in_guest);
>
>         /* Step 4: After VMEXIT to L1; Guest-Only counter stops */
>         delta = run_and_measure();
>         GUEST_ASSERT(!!delta == count_in_host);
>
>         /* Step 5: Clear SVME; HG_ONLY ignored */
>         wrmsr(MSR_EFER, rdmsr(MSR_EFER) & ~EFER_SVME);
>         delta = run_and_measure();
>         GUEST_ASSERT_NE(delta, 0);
>
>         GUEST_DONE();
> }

Even better, I will fold this all into one test flow with 4 PMCs
covering the bit permutations.

> > +                                 struct hg_test_data *data)
> > +{
> > +     unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
> > +     struct vmcb *vmcb = svm->vmcb;
> > +     uint64_t eventsel, delta;
> > +
> > +     hg_data = data;
> > +
> > +     eventsel = EVENTSEL_RETIRED_INSNS |
> > +             AMD64_EVENTSEL_HOSTONLY | AMD64_EVENTSEL_GUESTONLY;
> > +     wrmsr(MSR_F15H_PERF_CTL0, eventsel);
> > +     wrmsr(MSR_F15H_PERF_CTR0, 0);
> > +
> > +     /* Step 1: SVME=0 */
> > +     wrmsr(MSR_EFER, rdmsr(MSR_EFER) & ~EFER_SVME);
> > +     delta = run_and_measure();
> > +     GUEST_ASSERT_NE(delta, 0);
> > +
> > +     /* Step 2: Set SVME=1 */
> > +     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
> > +     delta = run_and_measure();
> > +     GUEST_ASSERT_NE(delta, 0);
> > +
> > +     /* Step 3: VMRUN to L2 */
> > +     generic_svm_setup(svm, l2_guest_code,
> > +                       &l2_guest_stack[L2_GUEST_STACK_SIZE]);
> > +     vmcb->control.intercept &= ~(1ULL << INTERCEPT_MSR_PROT);
> > +
> > +     run_guest(vmcb, svm->vmcb_gpa);
> > +
> > +     GUEST_ASSERT_EQ(vmcb->control.exit_code, SVM_EXIT_VMMCALL);
> > +     GUEST_ASSERT(data->l2_done);
> > +     GUEST_ASSERT_NE(data->l2_delta, 0);
> > +
> > +     /* Step 4: After VMEXIT to L1 */
> > +     delta = run_and_measure();
> > +     GUEST_ASSERT_NE(delta, 0);
> > +
> > +     /* Step 5: Clear SVME */
> > +     wrmsr(MSR_EFER, rdmsr(MSR_EFER) & ~EFER_SVME);
> > +     delta = run_and_measure();
> > +     GUEST_ASSERT_NE(delta, 0);
> > +
> > +     GUEST_DONE();
> > +}
> > +
> > +static void l1_guest_code(struct svm_test_data *svm, struct hg_test_data *data,
> > +                       int test_num)
> > +{
> > +     switch (test_num) {
> > +     case 0:
>
> As above, I would much rather pass in the mask of GUEST_HOST bits to set, and
> then react accordingly, as opposed to passing in a magic/arbitrary @test_num.
> Then I'm pretty sure we don't need a dispatch function, just run the testcase
> using the passed in mask.
>
> > +             l1_guest_code_guestonly(svm, data);
> > +             break;
> > +     case 1:
> > +             l1_guest_code_hostonly(svm, data);
> > +             break;
> > +     case 2:
> > +             l1_guest_code_both_bits(svm, data);
> > +             break;
> > +     }
> > +}
>
> ...
>
> > +int main(int argc, char *argv[])
> > +{
> > +     TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
> > +     TEST_REQUIRE(kvm_is_pmu_enabled());
> > +     TEST_REQUIRE(get_kvm_amd_param_bool("enable_mediated_pmu"));
> > +
> > +     run_test(0, "Guest-Only counter across all transitions");
> > +     run_test(1, "Host-Only counter across all transitions");
> > +     run_test(2, "Both HG_ONLY bits set (always count)");
>
> As alluded to above, shouldn't we also test "no bits set"?
> > +
> > +     return 0;
> > +}
> > --
> > 2.52.0.457.g6b5491de43-goog
> >

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ