[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220323184915.1335049-6-mizhang@google.com>
Date: Wed, 23 Mar 2022 18:49:15 +0000
From: Mingwei Zhang <mizhang@...gle.com>
To: Paolo Bonzini <pbonzini@...hat.com>
Cc: Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org, Ben Gardon <bgardon@...gle.com>,
Mingwei Zhang <mizhang@...gle.com>,
David Matlack <dmatlack@...gle.com>,
Jing Zhang <jingzhangos@...gle.com>,
Peter Xu <peterx@...hat.com>
Subject: [PATCH v2 4/4] selftests: KVM: use dirty logging to check if page
stats work correctly
When dirty logging is enabled, KVM will remap all accessed pages in
NPT/EPT at 4K. This property could be used to check if
the page stats metrics work properly in KVM mmu. At the same time, this
logic might be used the other way around: using page stats to verify if
dirty logging really splits all huge pages. Moreover, when dirty logging is
disabled, KVM zaps corresponding SPTEs and we could check whether the large
pages come back when guest touches the pages again.
So add page stats checking in dirty logging performance selftest. In
particular, add checks in three locations:
- just after vm is created;
- after populating memory into vm but before enabling dirty logging;
- finish dirty logging but before disabling it;
- behind the final iteration after disabling dirty logging.
Tested using commands:
- ./dirty_log_perf_test -s anonymous_hugetlb_1gb
- ./dirty_log_perf_test -s anonymous_hugetlb_2mb
- ./dirty_log_perf_test -s anonymous_thp
Cc: Sean Christopherson <seanjc@...gle.com>
Cc: David Matlack <dmatlack@...gle.com>
Cc: Jing Zhang <jingzhangos@...gle.com>
Cc: Peter Xu <peterx@...hat.com>
Suggested-by: Ben Gardon <bgardon@...gle.com>
Signed-off-by: Mingwei Zhang <mizhang@...gle.com>
---
.../selftests/kvm/dirty_log_perf_test.c | 53 +++++++++++++++++++
1 file changed, 53 insertions(+)
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index 1954b964d1cf..21431b0f5547 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -19,6 +19,10 @@
#include "perf_test_util.h"
#include "guest_modes.h"
+#ifdef __x86_64__
+#include "processor.h"
+#endif
+
/* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
#define TEST_HOST_LOOP_N 2UL
@@ -185,6 +189,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
p->slots, p->backing_src,
p->partition_vcpu_memory_access);
+#ifdef __x86_64__
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") == 0,
+ "4K page is non zero");
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") == 0,
+ "2M page is non zero");
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") == 0,
+ "1G page is non zero");
+#endif
perf_test_set_wr_fract(vm, p->wr_fract);
guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
@@ -222,6 +234,17 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("Populate memory time: %ld.%.9lds\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
+#ifdef __x86_64__
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") != 0,
+ "4K page is zero");
+ if (p->backing_src == VM_MEM_SRC_ANONYMOUS_THP ||
+ p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_2MB)
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") != 0,
+ "2M page is zero");
+ if (p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB)
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") != 0,
+ "1G page is zero");
+#endif
/* Enable dirty logging */
clock_gettime(CLOCK_MONOTONIC, &start);
enable_dirty_logging(vm, p->slots);
@@ -267,6 +290,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
}
}
+#ifdef __x86_64__
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") != 0,
+ "4K page is zero after dirty logging");
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") == 0,
+ "2M page is non-zero after dirty logging");
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") == 0,
+ "1G page is non-zero after dirty logging");
+#endif
/* Disable dirty logging */
clock_gettime(CLOCK_MONOTONIC, &start);
@@ -275,6 +306,28 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("Disabling dirty logging time: %ld.%.9lds\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
+ /*
+ * Increment iteration to run the vcpus again to ensure all pages come
+ * back.
+ */
+ iteration++;
+ pr_info("Starting the final iteration to get all pages back.\n");
+ for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+ while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id])
+ != iteration)
+ ;
+ }
+
+#ifdef __x86_64__
+ if (p->backing_src == VM_MEM_SRC_ANONYMOUS_THP ||
+ p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_2MB)
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") != 0,
+ "2M page is zero");
+ if (p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB)
+ TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") != 0,
+ "1G page is zero");
+#endif
+
/* Tell the vcpu thread to quit */
host_quit = true;
perf_test_join_vcpu_threads(nr_vcpus);
--
2.35.1.1021.g381101b075-goog
Powered by blists - more mailing lists