lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220819005601.198436-4-gshan@redhat.com>
Date:   Fri, 19 Aug 2022 08:55:59 +0800
From:   Gavin Shan <gshan@...hat.com>
To:     kvmarm@...ts.cs.columbia.edu
Cc:     linux-arm-kernel@...ts.infradead.org, kvm@...r.kernel.org,
        linux-kernel@...r.kernel.org, linux-doc@...r.kernel.org,
        linux-kselftest@...r.kernel.org, peterx@...hat.com,
        pbonzini@...hat.com, corbet@....net, maz@...nel.org,
        james.morse@....com, alexandru.elisei@....com,
        suzuki.poulose@....com, oliver.upton@...ux.dev,
        catalin.marinas@....com, will@...nel.org, shuah@...nel.org,
        seanjc@...gle.com, drjones@...hat.com, dmatlack@...gle.com,
        bgardon@...gle.com, ricarkol@...gle.com, zhenyzha@...hat.com,
        shan.gavin@...il.com
Subject: [PATCH v1 3/5] KVM: selftests: Dirty host pages in dirty_log_test

It's assumed that 1024 host pages, instead of guest pages, are dirtied
in each iteration in guest_code(). The current implementation misses
the case of mismatched page sizes in host and guest. For example,
ARM64 could have 64KB page size in guest, but 4KB page size in host.
(TEST_PAGES_PER_LOOP / 16), instead of TEST_PAGES_PER_LOOP, host pages
are dirtied in every iteration.

Fix the issue by touching all sub-pages when we have mismatched
page sizes in host and guest.

Signed-off-by: Gavin Shan <gshan@...hat.com>
---
 tools/testing/selftests/kvm/dirty_log_test.c | 50 +++++++++++++++-----
 1 file changed, 39 insertions(+), 11 deletions(-)

diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 9c883c94d478..50b02186ce12 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -70,6 +70,7 @@
  * that may change.
  */
 static uint64_t host_page_size;
+static uint64_t host_num_pages;
 static uint64_t guest_page_size;
 static uint64_t guest_num_pages;
 static uint64_t random_array[TEST_PAGES_PER_LOOP];
@@ -94,8 +95,23 @@ static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
  */
 static void guest_code(void)
 {
+	uint64_t num_pages, page_size, sub_page_size;
 	uint64_t addr;
-	int i;
+	int pages_per_loop, i, j;
+
+	/*
+	 * The page sizes on host and VM could be different. We need
+	 * to perform writing on all sub-pages.
+	 */
+	if (host_page_size >= guest_page_size) {
+		num_pages = host_num_pages;
+		page_size = host_page_size;
+		sub_page_size = host_page_size;
+	} else {
+		num_pages = guest_num_pages;
+		page_size = guest_page_size;
+		sub_page_size = host_page_size;
+	}
 
 	/*
 	 * On s390x, all pages of a 1M segment are initially marked as dirty
@@ -103,18 +119,29 @@ static void guest_code(void)
 	 * To compensate this specialty in this test, we need to touch all
 	 * pages during the first iteration.
 	 */
-	for (i = 0; i < guest_num_pages; i++) {
-		addr = guest_test_virt_mem + i * guest_page_size;
-		*(uint64_t *)addr = READ_ONCE(iteration);
+	for (i = 0; i < num_pages; i++) {
+		addr = guest_test_virt_mem + i * page_size;
+		addr = align_down(addr, page_size);
+
+		for (j = 0; j < page_size / sub_page_size; j++) {
+			*(uint64_t *)(addr + j * sub_page_size) =
+				READ_ONCE(iteration);
+		}
 	}
 
+	pages_per_loop = (TEST_PAGES_PER_LOOP * sub_page_size) / page_size;
+
 	while (true) {
-		for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
+		for (i = 0; i < pages_per_loop; i++) {
 			addr = guest_test_virt_mem;
-			addr += (READ_ONCE(random_array[i]) % guest_num_pages)
-				* guest_page_size;
-			addr = align_down(addr, host_page_size);
-			*(uint64_t *)addr = READ_ONCE(iteration);
+			addr += (READ_ONCE(random_array[i]) % num_pages)
+				* page_size;
+			addr = align_down(addr, page_size);
+
+			for (j = 0; j < page_size / sub_page_size; j++) {
+				*(uint64_t *)(addr + j * sub_page_size) =
+					READ_ONCE(iteration);
+			}
 		}
 
 		/* Tell the host that we need more random numbers */
@@ -713,14 +740,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 		       2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K), guest_code);
 
 	guest_page_size = vm->page_size;
+	host_page_size = getpagesize();
+
 	/*
 	 * A little more than 1G of guest page sized pages.  Cover the
 	 * case where the size is not aligned to 64 pages.
 	 */
 	guest_num_pages = (1ul << (DIRTY_MEM_BITS - vm->page_shift)) + 3;
 	guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
-
-	host_page_size = getpagesize();
 	host_num_pages = vm_num_host_pages(mode, guest_num_pages);
 
 	if (!p->phys_offset) {
@@ -760,6 +787,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 	sync_global_to_guest(vm, host_page_size);
 	sync_global_to_guest(vm, guest_page_size);
 	sync_global_to_guest(vm, guest_test_virt_mem);
+	sync_global_to_guest(vm, host_num_pages);
 	sync_global_to_guest(vm, guest_num_pages);
 
 	/* Start the iterations */
-- 
2.23.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ