[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220401063636.2414200-7-mizhang@google.com>
Date: Fri, 1 Apr 2022 06:36:36 +0000
From: Mingwei Zhang <mizhang@...gle.com>
To: Paolo Bonzini <pbonzini@...hat.com>
Cc: Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org, Mingwei Zhang <mizhang@...gle.com>,
Yosry Ahmed <yosryahmed@...gle.com>,
Ben Gardon <bgardon@...gle.com>,
David Matlack <dmatlack@...gle.com>,
Jing Zhang <jingzhangos@...gle.com>,
Peter Xu <peterx@...hat.com>
Subject: [PATCH v3 6/6] selftests: KVM: use page stats to check if dirty
logging works properly
When dirty logging is enabled, KVM will remap all accessed pages in
NPT/EPT at 4K. This property could be used to check if
the page stats metrics work properly in KVM mmu. At the same time, this
logic might be used the other way around: using page stats to verify if
dirty logging really splits all huge pages. Moreover, when dirty logging is
disabled, KVM zaps corresponding SPTEs and we could check whether the large
pages come back when guest touches the pages again.
So add page stats checking in dirty logging performance selftest. In
particular, add checks in three locations:
- just after vm is created;
- after populating memory into vm but before enabling dirty logging;
- finish dirty logging but before disabling it;
- behind the final iteration after disabling dirty logging.
Tested using commands:
- ./dirty_log_perf_test -s anonymous_hugetlb_1gb
- ./dirty_log_perf_test -s anonymous_hugetlb_2mb
- ./dirty_log_perf_test -s anonymous_thp
Cc: Sean Christopherson <seanjc@...gle.com>
Cc: David Matlack <dmatlack@...gle.com>
Cc: Jing Zhang <jingzhangos@...gle.com>
Cc: Peter Xu <peterx@...hat.com>
Suggested-by: Ben Gardon <bgardon@...gle.com>
Reviewed-by: Ben Gardon <bgardon@...gle.com>
Signed-off-by: Mingwei Zhang <mizhang@...gle.com>
---
.../selftests/kvm/dirty_log_perf_test.c | 53 +++++++++++++++++++
1 file changed, 53 insertions(+)
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index c9d9e513ca04..dd48aabfff5c 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -25,6 +25,10 @@
#define GICR_BASE_GPA 0x80A0000ULL
#endif
+#ifdef __x86_64__
+#include "processor.h"
+#endif
+
/* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
#define TEST_HOST_LOOP_N 2UL
@@ -191,6 +195,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
p->slots, p->backing_src,
p->partition_vcpu_memory_access);
+#ifdef __x86_64__
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") == 0,
+ "4K page is non zero");
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") == 0,
+ "2M page is non zero");
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") == 0,
+ "1G page is non zero");
+#endif
perf_test_set_wr_fract(vm, p->wr_fract);
guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
@@ -232,6 +244,17 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("Populate memory time: %ld.%.9lds\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
+#ifdef __x86_64__
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") != 0,
+ "4K page is zero");
+ if (p->backing_src == VM_MEM_SRC_ANONYMOUS_THP ||
+ p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_2MB)
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") != 0,
+ "2M page is zero");
+ if (p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB)
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") != 0,
+ "1G page is zero");
+#endif
/* Enable dirty logging */
clock_gettime(CLOCK_MONOTONIC, &start);
enable_dirty_logging(vm, p->slots);
@@ -277,6 +300,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
}
}
+#ifdef __x86_64__
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") != 0,
+ "4K page is zero after dirty logging");
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") == 0,
+ "2M page is non-zero after dirty logging");
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") == 0,
+ "1G page is non-zero after dirty logging");
+#endif
/* Disable dirty logging */
clock_gettime(CLOCK_MONOTONIC, &start);
@@ -285,6 +316,28 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("Disabling dirty logging time: %ld.%.9lds\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
+ /*
+ * Increment iteration to run the vcpus again to ensure all pages come
+ * back.
+ */
+ iteration++;
+ pr_info("Starting the final iteration to get all pages back.\n");
+ for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+ while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id])
+ != iteration)
+ ;
+ }
+
+#ifdef __x86_64__
+ if (p->backing_src == VM_MEM_SRC_ANONYMOUS_THP ||
+ p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_2MB)
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") != 0,
+ "2M page is zero");
+ if (p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB)
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") != 0,
+ "1G page is zero");
+#endif
+
/* Tell the vcpu thread to quit */
host_quit = true;
perf_test_join_vcpu_threads(nr_vcpus);
--
2.35.1.1094.g7c7d902a7c-goog
Powered by blists - more mailing lists