[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220407155645.940890-24-vkuznets@redhat.com>
Date: Thu, 7 Apr 2022 17:56:37 +0200
From: Vitaly Kuznetsov <vkuznets@...hat.com>
To: kvm@...r.kernel.org, Paolo Bonzini <pbonzini@...hat.com>
Cc: Sean Christopherson <seanjc@...gle.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Michael Kelley <mikelley@...rosoft.com>,
Siddharth Chandrasekaran <sidcha@...zon.de>,
linux-kernel@...r.kernel.org
Subject: [PATCH v2 23/31] KVM: selftests: Make it possible to replace PTEs with __virt_pg_map()
__virt_pg_map() makes an assumption that leaf PTE is not present. This
is not suitable if the test wants to replace an already present
PTE. Hyper-V PV TLB flush test is going to need that.
Signed-off-by: Vitaly Kuznetsov <vkuznets@...hat.com>
---
tools/testing/selftests/kvm/include/x86_64/processor.h | 2 +-
tools/testing/selftests/kvm/lib/x86_64/processor.c | 6 +++---
tools/testing/selftests/kvm/max_guest_memory_test.c | 2 +-
tools/testing/selftests/kvm/x86_64/mmu_role_test.c | 2 +-
4 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 9ad7602a257b..c20b18d05119 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -473,7 +473,7 @@ enum x86_page_size {
X86_PAGE_SIZE_1G,
};
void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- enum x86_page_size page_size);
+ enum x86_page_size page_size, bool replace);
/*
* Basic CPU control in CR0
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 9f000dfb5594..20df3e84d777 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -229,7 +229,7 @@ static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm,
}
void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- enum x86_page_size page_size)
+ enum x86_page_size page_size, bool replace)
{
const uint64_t pg_size = 1ull << ((page_size * 9) + 12);
struct pageUpperEntry *pml4e, *pdpe, *pde;
@@ -270,7 +270,7 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
/* Fill in page table entry. */
pte = virt_get_pte(vm, pde->pfn, vaddr, 0);
- TEST_ASSERT(!pte->present,
+ TEST_ASSERT(replace || !pte->present,
"PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
pte->pfn = paddr >> vm->page_shift;
pte->writable = true;
@@ -279,7 +279,7 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
{
- __virt_pg_map(vm, vaddr, paddr, X86_PAGE_SIZE_4K);
+ __virt_pg_map(vm, vaddr, paddr, X86_PAGE_SIZE_4K, false);
}
static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid,
diff --git a/tools/testing/selftests/kvm/max_guest_memory_test.c b/tools/testing/selftests/kvm/max_guest_memory_test.c
index 3875c4b23a04..437f77633b0e 100644
--- a/tools/testing/selftests/kvm/max_guest_memory_test.c
+++ b/tools/testing/selftests/kvm/max_guest_memory_test.c
@@ -244,7 +244,7 @@ int main(int argc, char *argv[])
#ifdef __x86_64__
/* Identity map memory in the guest using 1gb pages. */
for (i = 0; i < slot_size; i += size_1gb)
- __virt_pg_map(vm, gpa + i, gpa + i, X86_PAGE_SIZE_1G);
+ __virt_pg_map(vm, gpa + i, gpa + i, X86_PAGE_SIZE_1G, false);
#else
for (i = 0; i < slot_size; i += vm_get_page_size(vm))
virt_pg_map(vm, gpa + i, gpa + i);
diff --git a/tools/testing/selftests/kvm/x86_64/mmu_role_test.c b/tools/testing/selftests/kvm/x86_64/mmu_role_test.c
index da2325fcad87..e3fdf320b9f4 100644
--- a/tools/testing/selftests/kvm/x86_64/mmu_role_test.c
+++ b/tools/testing/selftests/kvm/x86_64/mmu_role_test.c
@@ -35,7 +35,7 @@ static void mmu_role_test(u32 *cpuid_reg, u32 evil_cpuid_val)
run = vcpu_state(vm, VCPU_ID);
/* Map 1gb page without a backing memlot. */
- __virt_pg_map(vm, MMIO_GPA, MMIO_GPA, X86_PAGE_SIZE_1G);
+ __virt_pg_map(vm, MMIO_GPA, MMIO_GPA, X86_PAGE_SIZE_1G, false);
r = _vcpu_run(vm, VCPU_ID);
--
2.35.1
Powered by blists - more mailing lists