lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 22 Jun 2022 23:30:21 +0000
From:   Kechen Lu <kechenl@...dia.com>
To:     "Huang, Shaoqin" <shaoqin.huang@...el.com>,
        "kvm@...r.kernel.org" <kvm@...r.kernel.org>,
        "pbonzini@...hat.com" <pbonzini@...hat.com>
CC:     "seanjc@...gle.com" <seanjc@...gle.com>,
        "chao.gao@...el.com" <chao.gao@...el.com>,
        "vkuznets@...hat.com" <vkuznets@...hat.com>,
        Somdutta Roy <somduttar@...dia.com>,
        "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: RE: [RFC PATCH v4 7/7] KVM: selftests: Add tests for VM and vCPU cap
 KVM_CAP_X86_DISABLE_EXITS



> -----Original Message-----
> From: Huang, Shaoqin <shaoqin.huang@...el.com>
> Sent: Tuesday, June 21, 2022 11:44 PM
> To: Kechen Lu <kechenl@...dia.com>; kvm@...r.kernel.org;
> pbonzini@...hat.com
> Cc: seanjc@...gle.com; chao.gao@...el.com; vkuznets@...hat.com;
> Somdutta Roy <somduttar@...dia.com>; linux-kernel@...r.kernel.org
> Subject: Re: [RFC PATCH v4 7/7] KVM: selftests: Add tests for VM and vCPU
> cap KVM_CAP_X86_DISABLE_EXITS
> 
> External email: Use caution opening links or attachments
> 
> 
> On 6/22/2022 8:49 AM, Kechen Lu wrote:
> > Add tests for KVM cap KVM_CAP_X86_DISABLE_EXITS overriding flags in
> VM
> > and vCPU scope both works as expected.
> >
> > Suggested-by: Chao Gao <chao.gao@...el.com>
> > Signed-off-by: Kechen Lu <kechenl@...dia.com>
> > ---
> >   tools/testing/selftests/kvm/.gitignore        |   1 +
> >   tools/testing/selftests/kvm/Makefile          |   1 +
> >   .../selftests/kvm/include/x86_64/svm_util.h   |   1 +
> >   .../selftests/kvm/x86_64/disable_exits_test.c | 145 ++++++++++++++++++
> >   4 files changed, 148 insertions(+)
> >   create mode 100644
> > tools/testing/selftests/kvm/x86_64/disable_exits_test.c
> >
> > diff --git a/tools/testing/selftests/kvm/.gitignore
> > b/tools/testing/selftests/kvm/.gitignore
> > index 4509a3a7eeae..2b50170db9b2 100644
> > --- a/tools/testing/selftests/kvm/.gitignore
> > +++ b/tools/testing/selftests/kvm/.gitignore
> > @@ -15,6 +15,7 @@
> >   /x86_64/cpuid_test
> >   /x86_64/cr4_cpuid_sync_test
> >   /x86_64/debug_regs
> > +/x86_64/disable_exits_test
> >   /x86_64/evmcs_test
> >   /x86_64/emulator_error_test
> >   /x86_64/fix_hypercall_test
> > diff --git a/tools/testing/selftests/kvm/Makefile
> > b/tools/testing/selftests/kvm/Makefile
> > index 22423c871ed6..de11d1f95700 100644
> > --- a/tools/testing/selftests/kvm/Makefile
> > +++ b/tools/testing/selftests/kvm/Makefile
> > @@ -115,6 +115,7 @@ TEST_GEN_PROGS_x86_64 +=
> x86_64/xen_shinfo_test
> >   TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
> >   TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests
> >   TEST_GEN_PROGS_x86_64 += x86_64/amx_test
> > +TEST_GEN_PROGS_x86_64 += x86_64/disable_exits_test
> >   TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
> >   TEST_GEN_PROGS_x86_64 += demand_paging_test
> >   TEST_GEN_PROGS_x86_64 += dirty_log_test diff --git
> > a/tools/testing/selftests/kvm/include/x86_64/svm_util.h
> > b/tools/testing/selftests/kvm/include/x86_64/svm_util.h
> > index a25aabd8f5e7..d8cad1cff578 100644
> > --- a/tools/testing/selftests/kvm/include/x86_64/svm_util.h
> > +++ b/tools/testing/selftests/kvm/include/x86_64/svm_util.h
> > @@ -17,6 +17,7 @@
> >   #define CPUID_SVM           BIT_ULL(CPUID_SVM_BIT)
> >
> >   #define SVM_EXIT_MSR                0x07c
> > +#define SVM_EXIT_HLT         0x078
> 
> There has other people add the SVM_EXIT_HLT in the kvm/queue, so you
> may not need to add it here.
> 

Ack. Thanks!

> >   #define SVM_EXIT_VMMCALL    0x081
> >
> >   struct svm_test_data {
> > diff --git a/tools/testing/selftests/kvm/x86_64/disable_exits_test.c
> > b/tools/testing/selftests/kvm/x86_64/disable_exits_test.c
> > new file mode 100644
> > index 000000000000..2811b07e8885
> > --- /dev/null
> > +++ b/tools/testing/selftests/kvm/x86_64/disable_exits_test.c
> > @@ -0,0 +1,145 @@
> > +// SPDX-License-Identifier: GPL-2.0-only
> > +/*
> > + * Test per-VM and per-vCPU disable exits cap
> > + *
> > + */
> > +
> > +#define _GNU_SOURCE /* for program_invocation_short_name */
> #include
> > +<sys/ioctl.h>
> > +
> > +#include "test_util.h"
> > +#include "kvm_util.h"
> > +#include "svm_util.h"
> > +#include "vmx.h"
> > +#include "processor.h"
> > +
> > +#define VCPU_ID_1 0
> > +#define VCPU_ID_2 1
> > +
> > +static void guest_code_exits(void) {
> > +     asm volatile("sti; hlt; cli");
> > +}
> > +
> > +/* Set debug control for trapped instruction exiting to userspace */
> > +static void vcpu_set_debug_exit_userspace(struct kvm_vm *vm, int
> > +vcpu_id) {
> 
> nit: you should make the code style consistent, please use the format:
> function()
> {
> 
> }
> 

Noted.

> > +     struct kvm_guest_debug debug;
> > +     memset(&debug, 0, sizeof(debug));
> > +     debug.control = KVM_GUESTDBG_ENABLE |
> KVM_GUESTDBG_EXIT_USERSPACE;
> > +     vcpu_set_guest_debug(vm, vcpu_id, &debug); }
> > +
> > +static void test_vm_cap_disable_exits(void) {
> > +     struct kvm_enable_cap cap = {
> > +             .cap = KVM_CAP_X86_DISABLE_EXITS,
> > +             .args[0] =
> > +KVM_X86_DISABLE_EXITS_HLT|KVM_X86_DISABLE_EXITS_OVERRIDE,
>                                                     ^
>                         nit: a space is much more clear here
> 

Noted.

> > +     };
> > +     struct kvm_vm *vm;
> > +     struct kvm_run *run;
> > +
> > +     /* Create VM */
> > +     vm = vm_create_without_vcpus(VM_MODE_DEFAULT,
> > + DEFAULT_GUEST_PHY_PAGES);
> > +
> > +     /* Test Case #1
> > +      * Default without disabling HLT exits in VM scope
> > +      */
> > +     vm_vcpu_add_default(vm, VCPU_ID_1, (void *)guest_code_exits);
> > +     vcpu_set_debug_exit_userspace(vm, VCPU_ID_1);
> > +     run = vcpu_state(vm, VCPU_ID_1);
> > +     vcpu_run(vm, VCPU_ID_1);
> > +     /* Exit reason should be HLT */
> > +     if (is_amd_cpu())
> > +             TEST_ASSERT(run->hw.hardware_exit_reason == SVM_EXIT_HLT,
> > +                     "Got exit_reason other than HLT: 0x%llx\n",
> > +                     run->hw.hardware_exit_reason);
> > +     else
> > +             TEST_ASSERT(run->hw.hardware_exit_reason ==
> EXIT_REASON_HLT,
> > +                     "Got exit_reason other than HLT: 0x%llx\n",
> > +                     run->hw.hardware_exit_reason);
> > +
> > +     /* Test Case #2
> > +      * Disabling HLT exits in VM scope
> > +      */
> > +     vm_vcpu_add_default(vm, VCPU_ID_2, (void *)guest_code_exits);
> > +     vcpu_set_debug_exit_userspace(vm, VCPU_ID_2);
> > +     run = vcpu_state(vm, VCPU_ID_2);
> 
> I think you can add more vcpu here to make sure after disabling HLT exits in
> VM scope here, every vcpu will not exit due to the HLT.
> 

Makes sense. Will refine the case design. Thanks.

BR,
Kechen

> > +     /* Set VM scoped cap arg
> > +      * KVM_X86_DISABLE_EXITS_HLT|KVM_X86_DISABLE_EXITS_OVERRIDE
> > +      * after vCPUs creation so requiring override flag
> > +      */
> > +     TEST_ASSERT(!vm_enable_cap(vm, &cap), "Failed to set
> KVM_CAP_X86_DISABLE_EXITS");
> > +     vcpu_run(vm, VCPU_ID_2);
> > +     /* Exit reason should not be HLT, would finish the guest
> > +      * running and exit (e.g. SVM_EXIT_SHUTDOWN)
> > +      */
> > +     if (is_amd_cpu())
> > +             TEST_ASSERT(run->hw.hardware_exit_reason != SVM_EXIT_HLT,
> > +                     "Got exit_reason as HLT: 0x%llx\n",
> > +                     run->hw.hardware_exit_reason);
> > +     else
> > +             TEST_ASSERT(run->hw.hardware_exit_reason !=
> EXIT_REASON_HLT,
> > +                     "Got exit_reason as HLT: 0x%llx\n",
> > +                     run->hw.hardware_exit_reason);
> > +
> > +     kvm_vm_free(vm);
> > +}
> > +
> > +static void test_vcpu_cap_disable_exits(void) {
> > +     struct kvm_enable_cap cap = {
> > +             .cap = KVM_CAP_X86_DISABLE_EXITS,
> > +             .args[0] =
> KVM_X86_DISABLE_EXITS_HLT|KVM_X86_DISABLE_EXITS_OVERRIDE,
> > +     };
> > +     struct kvm_vm *vm;
> > +     struct kvm_run *run;
> > +
> > +     /* Create VM */
> > +     vm = vm_create_without_vcpus(VM_MODE_DEFAULT,
> DEFAULT_GUEST_PHY_PAGES);
> > +     vm_vcpu_add_default(vm, VCPU_ID_1, (void *)guest_code_exits);
> > +     vcpu_set_debug_exit_userspace(vm, VCPU_ID_1);
> > +     vm_vcpu_add_default(vm, VCPU_ID_2, (void *)guest_code_exits);
> > +     vcpu_set_debug_exit_userspace(vm, VCPU_ID_2);
> > +     /* Set vCPU 2 scoped cap arg
> > +      * KVM_X86_DISABLE_EXITS_HLT|KVM_X86_DISABLE_EXITS_OVERRIDE
> > +      */
> > +     TEST_ASSERT(!vcpu_enable_cap(vm, VCPU_ID_2, &cap), "Failed to
> > + set KVM_CAP_X86_DISABLE_EXITS");
> > +
> > +     /* Test Case #3
> > +      * Default without disabling HLT exits in this vCPU 1
> > +      */
> > +     run = vcpu_state(vm, VCPU_ID_1);
> > +     vcpu_run(vm, VCPU_ID_1);
> > +     /* Exit reason should be HLT */
> > +     if (is_amd_cpu())
> > +             TEST_ASSERT(run->hw.hardware_exit_reason == SVM_EXIT_HLT,
> > +                     "Got exit_reason other than HLT: 0x%llx\n",
> > +                     run->hw.hardware_exit_reason);
> > +     else
> > +             TEST_ASSERT(run->hw.hardware_exit_reason ==
> EXIT_REASON_HLT,
> > +                     "Got exit_reason other than HLT: 0x%llx\n",
> > +                     run->hw.hardware_exit_reason);
> > +
> > +     /* Test Case #4
> > +      * Disabling HLT exits in vCPU 2
> > +      */
> > +     run = vcpu_state(vm, VCPU_ID_2);
> > +     vcpu_run(vm, VCPU_ID_2);
> > +     /* Exit reason should not be HLT, would finish the guest
> > +      * running and exit (e.g. SVM_EXIT_SHUTDOWN)
> > +      */
> > +     if (is_amd_cpu())
> > +             TEST_ASSERT(run->hw.hardware_exit_reason != SVM_EXIT_HLT,
> > +                     "Got exit_reason as HLT: 0x%llx\n",
> > +                     run->hw.hardware_exit_reason);
> > +     else
> > +             TEST_ASSERT(run->hw.hardware_exit_reason !=
> EXIT_REASON_HLT,
> > +                     "Got exit_reason as HLT: 0x%llx\n",
> > +                     run->hw.hardware_exit_reason);
> > +
> > +     kvm_vm_free(vm);
> > +}
> > +
> > +int main(int argc, char *argv[])
> > +{
> > +     test_vm_cap_disable_exits();
> > +     test_vcpu_cap_disable_exits();
> > +     return 0;
> > +}

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ