KERNEL SELFTESTS: linux_headers_dir is /usr/src/linux-headers-x86_64-rhel-8.3-kselftests-39be2e28180a2e87af5fbb8d83643812e1a3b371 2021-06-16 06:43:29 ln -sf /usr/bin/clang 2021-06-16 06:43:29 ln -sf /usr/bin/llc 2021-06-16 06:43:30 sed -i s/default_timeout=45/default_timeout=300/ kselftest/runner.sh 2021-06-16 06:43:30 sed -i s/default_timeout=45/default_timeout=300/ /kselftests/kselftest/runner.sh LKP WARN miss config CONFIG_KVM_AMD= of kvm/config source /lkp/lkp/src/lib/tests/kernel-selftests-ext.sh 2021-06-16 06:43:30 /kselftests/run_kselftest.sh -c kvm TAP version 13 1..42 # selftests: kvm: cr4_cpuid_sync_test ok 1 selftests: kvm: cr4_cpuid_sync_test # selftests: kvm: get_msr_index_features ok 2 selftests: kvm: get_msr_index_features # selftests: kvm: evmcs_test # Running L1 which uses EVMCS to run L2 # Injecting NMI into L1 before L2 had a chance to run after restore # Trying extra KVM_GET_NESTED_STATE/KVM_SET_NESTED_STATE cycle ok 3 selftests: kvm: evmcs_test # selftests: kvm: get_cpuid_test ok 4 selftests: kvm: get_cpuid_test # selftests: kvm: hyperv_clock # ==== Test Assertion Failure ==== # x86_64/hyperv_clock.c:234: false # pid=1478 tid=1478 errno=4 - Interrupted system call # 1 0x00000000004026fb: main at hyperv_clock.c:234 # 2 0x00007f0e7ccd009a: ?? ??:0 # 3 0x0000000000402769: _start at ??:? # Failed guest assert: (delta_ns * 100 < (t2 - t1) * 100) at x86_64/hyperv_clock.c:74 not ok 5 selftests: kvm: hyperv_clock # exit=254 # selftests: kvm: hyperv_cpuid ok 6 selftests: kvm: hyperv_cpuid # selftests: kvm: hyperv_features # Testing access to Hyper-V specific MSRs # Testing access to Hyper-V hypercalls ok 7 selftests: kvm: hyperv_features # selftests: kvm: kvm_pv_test # testing msr: MSR_KVM_SYSTEM_TIME (0x12) # testing msr: MSR_KVM_SYSTEM_TIME_NEW (0x4b564d01) # testing msr: MSR_KVM_WALL_CLOCK (0x11) # testing msr: MSR_KVM_WALL_CLOCK_NEW (0x4b564d00) # testing msr: MSR_KVM_ASYNC_PF_EN (0x4b564d02) # testing msr: MSR_KVM_STEAL_TIME (0x4b564d03) # testing msr: MSR_KVM_PV_EOI_EN (0x4b564d04) # testing msr: MSR_KVM_POLL_CONTROL (0x4b564d05) # testing msr: MSR_KVM_ASYNC_PF_INT (0x4b564d06) # testing msr: MSR_KVM_ASYNC_PF_ACK (0x4b564d07) # testing hcall: KVM_HC_KICK_CPU (5) # testing hcall: KVM_HC_SEND_IPI (10) # testing hcall: KVM_HC_SCHED_YIELD (11) ok 8 selftests: kvm: kvm_pv_test # selftests: kvm: mmio_warning_test # ret1=0 exit_reason=17 suberror=1 # ret1=0 exit_reason=8 suberror=65530 # ret1=0 exit_reason=17 suberror=1 # ret1=0 exit_reason=8 suberror=65530 # ret1=0 exit_reason=17 suberror=1 # ret1=0 exit_reason=8 suberror=65530 # ret1=0 exit_reason=17 suberror=1 # ret1=0 exit_reason=8 suberror=65530 # ret1=0 exit_reason=17 suberror=1 # ret1=0 exit_reason=8 suberror=65530 # ret1=0 exit_reason=17 suberror=1 # ret1=0 exit_reason=8 suberror=65530 # ret1=0 exit_reason=17 suberror=1 # ret1=0 exit_reason=8 suberror=65530 # ret1=0 exit_reason=17 suberror=1 # ret1=0 exit_reason=8 suberror=65530 # ret1=0 exit_reason=17 suberror=1 # ret1=0 exit_reason=8 suberror=65530 # ret1=0 exit_reason=17 suberror=1 # ret1=0 exit_reason=8 suberror=65530 ok 9 selftests: kvm: mmio_warning_test # selftests: kvm: platform_info_test ok 10 selftests: kvm: platform_info_test # selftests: kvm: set_boot_cpu_id ok 11 selftests: kvm: set_boot_cpu_id # selftests: kvm: set_sregs_test ok 12 selftests: kvm: set_sregs_test # selftests: kvm: smm_test ok 13 selftests: kvm: smm_test # selftests: kvm: state_test ok 14 selftests: kvm: state_test # selftests: kvm: vmx_preemption_timer_test # ==== Test Assertion Failure ==== # x86_64/vmx_preemption_timer_test.c:226: uc.args[2] >= uc.args[3] # pid=1761 tid=1761 errno=4 - Interrupted system call # 1 0x00000000004026d8: main at vmx_preemption_timer_test.c:226 # 2 0x00007f6832ef809a: ?? ??:0 # 3 0x0000000000402759: _start at ??:? # Stage 2: L1 PT expiry TSC (3195390604) < L1 TSC deadline (3204977664) # Stage 2: L1 PT expiry TSC (3195390604) , L1 TSC deadline (3204977664) # Stage 2: L2 PT expiry TSC (3195165978) , L2 TSC deadline (3205094464) not ok 15 selftests: kvm: vmx_preemption_timer_test # exit=254 # selftests: kvm: svm_vmcall_test # nested SVM not enabled, skipping test ok 16 selftests: kvm: svm_vmcall_test # SKIP # selftests: kvm: sync_regs_test ok 17 selftests: kvm: sync_regs_test # selftests: kvm: userspace_msr_exit_test # To run the instruction emulated tests set the module parameter 'kvm.force_emulation_prefix=1' ok 18 selftests: kvm: userspace_msr_exit_test # selftests: kvm: vmx_apic_access_test ok 19 selftests: kvm: vmx_apic_access_test # selftests: kvm: vmx_close_while_nested_test ok 20 selftests: kvm: vmx_close_while_nested_test # selftests: kvm: vmx_dirty_log_test ok 21 selftests: kvm: vmx_dirty_log_test # selftests: kvm: vmx_set_nested_state_test ok 22 selftests: kvm: vmx_set_nested_state_test # selftests: kvm: vmx_tsc_adjust_test # IA32_TSC_ADJUST is -4294974178 (-1 * TSC_ADJUST_VALUE + -6882). # IA32_TSC_ADJUST is -4294974178 (-1 * TSC_ADJUST_VALUE + -6882). # IA32_TSC_ADJUST is -8589950624 (-2 * TSC_ADJUST_VALUE + -16032). # IA32_TSC_ADJUST is -8589950624 (-2 * TSC_ADJUST_VALUE + -16032). ok 23 selftests: kvm: vmx_tsc_adjust_test # selftests: kvm: vmx_nested_tsc_scaling_test # TSC scaling not supported by the HW, skipping test ok 24 selftests: kvm: vmx_nested_tsc_scaling_test # SKIP # selftests: kvm: xapic_ipi_test # Halter vCPU thread started # vCPU thread running vCPU 0 # Halter vCPU thread reported its APIC ID: 0 after 1 seconds. # IPI sender vCPU thread started. Letting vCPUs run for 3 seconds. # vCPU thread running vCPU 1 # Test successful after running for 3 seconds. # Sending vCPU sent 78353 IPIs to halting vCPU # Halting vCPU halted 78353 times, woke 78352 times, received 78353 IPIs. # Halter APIC ID=0 # Sender ICR value=0xa5 ICR2 value=0 # Halter TPR=0 PPR=0 LVR=0x50014 # Migrations attempted: 0 # Migrations completed: 0 ok 25 selftests: kvm: xapic_ipi_test # selftests: kvm: xss_msr_test ok 26 selftests: kvm: xss_msr_test # selftests: kvm: debug_regs ok 27 selftests: kvm: debug_regs # selftests: kvm: tsc_msrs_test # ==== Test Assertion Failure ==== # x86_64/tsc_msrs_test.c:95: false # pid=2071 tid=2071 errno=4 - Interrupted system call # 1 0x0000000000403038: run_vcpu at tsc_msrs_test.c:95 # 2 0x000000000040297a: main at tsc_msrs_test.c:159 # 3 0x00007f4e71e8e09a: ?? ??:0 # 4 0x0000000000402a89: _start at ??:? # Failed guest assert: rounded_rdmsr(MSR_IA32_TSC) == val at x86_64/tsc_msrs_test.c:72 # values: 0x1200000000, 0x400000000 not ok 28 selftests: kvm: tsc_msrs_test # exit=254 # selftests: kvm: vmx_pmu_msrs_test ok 29 selftests: kvm: vmx_pmu_msrs_test # selftests: kvm: xen_shinfo_test # KVM_XEN_HVM_CONFIG_SHARED_INFO not available, skipping test ok 30 selftests: kvm: xen_shinfo_test # SKIP # selftests: kvm: xen_vmcall_test # KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL not available, skipping test ok 31 selftests: kvm: xen_vmcall_test # SKIP # selftests: kvm: demand_paging_test # Testing guest mode: PA-bits:ANY, VA-bits:48, 4K pages # guest physical test memory offset: 0x7fbffff000 # Finished creating vCPUs and starting uffd threads # Started all vCPUs # All vCPU threads joined # Total guest execution time: 4.713104705s # Overall demand paging rate: 23550.704513 pgs/sec ok 32 selftests: kvm: demand_paging_test # selftests: kvm: dirty_log_test # Test iterations: 32, interval: 10 (ms) # Testing Log Mode 'dirty-log' # Testing guest mode: PA-bits:ANY, VA-bits:48, 4K pages # guest physical test memory offset: 0x7fbfffc000 # Dirtied 1100800 pages # Total bits checked: dirty (1155064), clear (6971493), track_next (374587) # Testing Log Mode 'clear-log' # Testing guest mode: PA-bits:ANY, VA-bits:48, 4K pages # guest physical test memory offset: 0x7fbfffc000 # Dirtied 1229824 pages # Total bits checked: dirty (1507403), clear (6619154), track_next (248698) # Testing Log Mode 'dirty-ring' # Testing guest mode: PA-bits:ANY, VA-bits:48, 4K pages # guest physical test memory offset: 0x7fbfffc000 # vcpu stops because vcpu is kicked out... # Notifying vcpu to continue # vcpu continues now. # Iteration 1 collected 915 pages # vcpu stops because dirty ring is full... # vcpu continues now. # vcpu stops because dirty ring is full... # Notifying vcpu to continue # Iteration 2 collected 64960 pages # vcpu continues now. # vcpu stops because vcpu is kicked out... # vcpu continues now. # vcpu stops because vcpu is kicked out... # Notifying vcpu to continue # vcpu continues now. # Iteration 3 collected 563 pages # vcpu stops because dirty ring is full... # vcpu continues now. # vcpu stops because dirty ring is full... # Notifying vcpu to continue # Iteration 4 collected 64960 pages # vcpu continues now. # vcpu stops because vcpu is kicked out... # vcpu continues now. # vcpu stops because vcpu is kicked out... # Notifying vcpu to continue # vcpu continues now. # Iteration 5 collected 513 pages # vcpu stops because dirty ring is full... # vcpu continues now. # vcpu stops because dirty ring is full... # Notifying vcpu to continue # Iteration 6 collected 64960 pages # vcpu continues now. # vcpu stops because vcpu is kicked out... # vcpu continues now. # vcpu stops because vcpu is kicked out... # Notifying vcpu to continue # vcpu continues now. # Iteration 7 collected 523 pages # vcpu stops because dirty ring is full... # vcpu continues now. # vcpu stops because dirty ring is full... # Notifying vcpu to continue # Iteration 8 collected 65346 pages # vcpu continues now. # vcpu stops because vcpu is kicked out... # vcpu continues now. # vcpu stops because vcpu is kicked out... # Notifying vcpu to continue # vcpu continues now. # Iteration 9 collected 19910 pages # vcpu stops because dirty ring is full... # vcpu continues now. # vcpu stops because dirty ring is full... # Notifying vcpu to continue # Iteration 10 collected 65094 pages # vcpu continues now. # vcpu stops because vcpu is kicked out... # vcpu continues now. # vcpu stops because vcpu is kicked out... # Notifying vcpu to continue # vcpu continues now. # Iteration 11 collected 22085 pages # vcpu stops because dirty ring is full... # vcpu continues now. # vcpu stops because dirty ring is full... # Notifying vcpu to continue # Iteration 12 collected 65271 pages # vcpu continues now. # vcpu stops because vcpu is kicked out... # vcpu continues now. # vcpu stops because vcpu is kicked out... # Notifying vcpu to continue # vcpu continues now. # Iteration 13 collected 21901 pages # vcpu stops because dirty ring is full... # vcpu continues now. # vcpu stops because dirty ring is full... # Notifying vcpu to continue # Iteration 14 collected 64976 pages # vcpu continues now. # vcpu stops because vcpu is kicked out... # vcpu continues now. # vcpu stops because vcpu is kicked out... # Notifying vcpu to continue # vcpu continues now. # Iteration 15 collected 21552 pages # vcpu stops because dirty ring is full... # vcpu continues now. # vcpu stops because dirty ring is full... # Notifying vcpu to continue # Iteration 16 collected 64973 pages # vcpu continues now. # vcpu stops because vcpu is kicked out... # vcpu continues now. # vcpu stops because vcpu is kicked out... # Notifying vcpu to continue # vcpu continues now. # Iteration 17 collected 21451 pages # vcpu stops because dirty ring is full... # vcpu continues now. # vcpu stops because dirty ring is full... # Notifying vcpu to continue # Iteration 18 collected 64982 pages # vcpu continues now. # vcpu stops because vcpu is kicked out... # vcpu continues now. # vcpu stops because vcpu is kicked out... # Notifying vcpu to continue # vcpu continues now. # Iteration 19 collected 21541 pages # vcpu stops because dirty ring is full... # vcpu continues now. # vcpu stops because dirty ring is full... # Notifying vcpu to continue # Iteration 20 collected 65118 pages # vcpu continues now. # vcpu stops because vcpu is kicked out... # vcpu continues now. # vcpu stops because vcpu is kicked out... # Notifying vcpu to continue # vcpu continues now. # Iteration 21 collected 14938 pages # vcpu stops because dirty ring is full... # vcpu continues now. # vcpu stops because dirty ring is full... # Notifying vcpu to continue # Iteration 22 collected 65201 pages # vcpu continues now. # vcpu stops because vcpu is kicked out... # vcpu continues now. # vcpu stops because vcpu is kicked out... # Notifying vcpu to continue # vcpu continues now. # Iteration 23 collected 13709 pages # vcpu stops because dirty ring is full... # vcpu continues now. # vcpu stops because dirty ring is full... # Notifying vcpu to continue # Iteration 24 collected 65195 pages # vcpu continues now. # vcpu stops because vcpu is kicked out... # vcpu continues now. # vcpu stops because vcpu is kicked out... # Notifying vcpu to continue # vcpu continues now. # Iteration 25 collected 12413 pages # vcpu stops because dirty ring is full... # vcpu continues now. # vcpu stops because dirty ring is full... # Notifying vcpu to continue # Iteration 26 collected 65239 pages # vcpu continues now. # vcpu stops because vcpu is kicked out... # vcpu continues now. # vcpu stops because vcpu is kicked out... # Notifying vcpu to continue # vcpu continues now. # Iteration 27 collected 12981 pages # vcpu stops because dirty ring is full... # vcpu continues now. # vcpu stops because dirty ring is full... # Notifying vcpu to continue # Iteration 28 collected 65086 pages # vcpu continues now. # vcpu stops because vcpu is kicked out... # vcpu continues now. # vcpu stops because vcpu is kicked out... # Notifying vcpu to continue # vcpu continues now. # Iteration 29 collected 14514 pages # vcpu stops because dirty ring is full... # vcpu continues now. # vcpu stops because dirty ring is full... # Notifying vcpu to continue # Iteration 30 collected 65000 pages # vcpu continues now. # vcpu stops because vcpu is kicked out... # vcpu continues now. # vcpu stops because vcpu is kicked out... # Notifying vcpu to continue # vcpu continues now. # Iteration 31 collected 14280 pages # vcpu stops because dirty ring is full... # vcpu continues now. # Dirtied 1189888 pages # Total bits checked: dirty (1190150), clear (6936407), track_next (933332) ok 33 selftests: kvm: dirty_log_test # selftests: kvm: dirty_log_perf_test # Test iterations: 2 # Testing guest mode: PA-bits:ANY, VA-bits:48, 4K pages # guest physical test memory offset: 0x7fbffff000 # Populate memory time: 5.186181588s # Enabling dirty logging time: 0.004796996s # # Iteration 1 dirty memory time: 0.005262213s # Iteration 1 get dirty log time: 0.000043141s # Iteration 1 clear dirty log time: 0.019176292s # Iteration 2 dirty memory time: 0.116988952s # Iteration 2 get dirty log time: 0.000009464s # Iteration 2 clear dirty log time: 0.018754741s # Disabling dirty logging time: 0.012961895s # Get dirty log over 2 iterations took 0.000052605s. (Avg 0.000026302s/iteration) # Clear dirty log over 2 iterations took 0.037931033s. (Avg 0.018965516s/iteration) ok 34 selftests: kvm: dirty_log_perf_test # selftests: kvm: hardware_disable_test ok 35 selftests: kvm: hardware_disable_test # selftests: kvm: kvm_create_max_vcpus # KVM_CAP_MAX_VCPU_ID: 1023 # KVM_CAP_MAX_VCPUS: 288 # Testing creating 288 vCPUs, with IDs 0...287. # Testing creating 288 vCPUs, with IDs 735...1022. ok 36 selftests: kvm: kvm_create_max_vcpus # selftests: kvm: kvm_page_table_test # Testing guest mode: PA-bits:ANY, VA-bits:48, 4K pages # Testing memory backing src type: anonymous # Testing memory backing src granularity: 0x1000 # Testing memory size(aligned): 0x40000000 # Guest physical test memory offset: 0x7fbffff000 # Host virtual test memory offset: 0x7fcc81ac4000 # Number of testing vCPUs: 1 # Started all vCPUs successfully # KVM_CREATE_MAPPINGS: total execution time: 6.534772678s # # KVM_UPDATE_MAPPINGS: total execution time: 1.654584963s # # KVM_ADJUST_MAPPINGS: total execution time: 1.525596929s # ok 37 selftests: kvm: kvm_page_table_test # selftests: kvm: memslot_modification_stress_test # Testing guest mode: PA-bits:ANY, VA-bits:48, 4K pages # guest physical test memory offset: 0x7fbffff000 # Finished creating vCPUs # Started all vCPUs # All vCPU threads joined ok 38 selftests: kvm: memslot_modification_stress_test # selftests: kvm: memslot_perf_test # Testing map performance with 1 runs, 5 seconds each # Memslot count too high for this test, decrease the cap (max is 8194) # # Testing unmap performance with 1 runs, 5 seconds each # not ok 39 selftests: kvm: memslot_perf_test # TIMEOUT 120 seconds # selftests: kvm: set_memory_region_test # Testing KVM_RUN with zero added memory regions # Allowed number of memory slots: 32764 # Adding slots 0..32763, each memory region with 2048K size # Testing MOVE of in-use region, 10 loops # Testing DELETE of in-use region, 10 loops ok 40 selftests: kvm: set_memory_region_test # selftests: kvm: steal_time ok 41 selftests: kvm: steal_time # selftests: kvm: kvm_binary_stats_test ok 42 selftests: kvm: kvm_binary_stats_test