[<prev] [next>] [day] [month] [year] [list]
Message-ID: <98b54b29-6f1d-4a3b-97d1-1e08b0ed0465@intel.com>
Date: Fri, 15 Mar 2024 23:24:41 -0700
From: "Chen, Zide" <zide.chen@...el.com>
To: linux-kselftest@...r.kernel.org, Ackerley Tng <ackerleytng@...gle.com>,
"Afranji, Ryan" <afranji@...gle.com>, "Aktas, Erdem"
<erdemaktas@...gle.com>, Sagi Shahar <sagis@...gle.com>,
"Yamahata, Isaku" <isaku.yamahata@...el.com>
Cc: Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>, Shuah Khan <shuah@...nel.org>,
Peter Gonda <pgonda@...gle.com>, "Xu, Haibo1" <haibo1.xu@...el.com>,
Chao Peng <chao.p.peng@...ux.intel.com>,
"Annapurve, Vishal" <vannapurve@...gle.com>,
Roger Wang <runanwang@...gle.com>, Vipin Sharma <vipinsh@...gle.com>,
jmattson@...gle.com, dmatlack@...gle.com, linux-kernel@...r.kernel.org,
kvm@...r.kernel.org, linux-mm@...ck.org
Subject: Re: [RFC PATCH v5 22/29] KVM: selftests: Add functions to allow
mapping as shared
On 12/12/2023 12:47 PM, Sagi Shahar wrote:
>
>
> -----Original Message-----
> From: Sagi Shahar <sagis@...gle.com>
> Sent: Tuesday, December 12, 2023 12:47 PM
> To: linux-kselftest@...r.kernel.org; Ackerley Tng <ackerleytng@...gle.com>; Afranji, Ryan <afranji@...gle.com>; Aktas, Erdem <erdemaktas@...gle.com>; Sagi Shahar <sagis@...gle.com>; Yamahata, Isaku <isaku.yamahata@...el.com>
> Cc: Sean Christopherson <seanjc@...gle.com>; Paolo Bonzini <pbonzini@...hat.com>; Shuah Khan <shuah@...nel.org>; Peter Gonda <pgonda@...gle.com>; Xu, Haibo1 <haibo1.xu@...el.com>; Chao Peng <chao.p.peng@...ux.intel.com>; Annapurve, Vishal <vannapurve@...gle.com>; Roger Wang <runanwang@...gle.com>; Vipin Sharma <vipinsh@...gle.com>; jmattson@...gle.com; dmatlack@...gle.com; linux-kernel@...r.kernel.org; kvm@...r.kernel.org; linux-mm@...ck.org
> Subject: [RFC PATCH v5 22/29] KVM: selftests: Add functions to allow mapping as shared
Since protected_phy_pages is introduced to keep track of the guest
memory's private/shared property, it's better to keep it consistent with
the guest mappings.
Instead of having a set of new APIs to force it to map shared guest
pages, how about to update protected_phy_pages sparsebits right before
the mapping, and just call the existing virt_pg_map() to do the mapping?
> From: Ackerley Tng <ackerleytng@...gle.com>
>
> Signed-off-by: Ackerley Tng <ackerleytng@...gle.com>
> Signed-off-by: Ryan Afranji <afranji@...gle.com>
> Signed-off-by: Sagi Shahar <sagis@...gle.com>
> ---
> .../selftests/kvm/include/kvm_util_base.h | 24 ++++++++++++++
> tools/testing/selftests/kvm/lib/kvm_util.c | 32 +++++++++++++++++++
> .../selftests/kvm/lib/x86_64/processor.c | 15 +++++++--
> 3 files changed, 69 insertions(+), 2 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
> index b353617fcdd1..efd7ae8abb20 100644
> --- a/tools/testing/selftests/kvm/include/kvm_util_base.h
> +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
> @@ -574,6 +574,8 @@ vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
>
> void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
> unsigned int npages);
> +void virt_map_shared(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
> + unsigned int npages);
> void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); @@ -1034,6 +1036,28 @@ static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr
> virt_arch_pg_map(vm, vaddr, paddr);
> }
>
> +/*
> + * VM Virtual Page Map as Shared
> + *
> + * Input Args:
> + * vm - Virtual Machine
> + * vaddr - VM Virtual Address
> + * paddr - VM Physical Address
> + * memslot - Memory region slot for new virtual translation tables
> + *
> + * Output Args: None
> + *
> + * Return: None
> + *
> + * Within @vm, creates a virtual translation for the page starting
> + * at @vaddr to the page starting at @paddr.
> + */
> +void virt_arch_pg_map_shared(struct kvm_vm *vm, uint64_t vaddr,
> +uint64_t paddr);
> +
> +static inline void virt_pg_map_shared(struct kvm_vm *vm, uint64_t
> +vaddr, uint64_t paddr) {
> + virt_arch_pg_map_shared(vm, vaddr, paddr); }
>
> /*
> * Address Guest Virtual to Guest Physical diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> index 4f1ae0f1eef0..28780fa1f0f2 100644
> --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> @@ -1573,6 +1573,38 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
> }
> }
>
> +/*
> + * Map a range of VM virtual address to the VM's physical address as
> +shared
> + *
> + * Input Args:
> + * vm - Virtual Machine
> + * vaddr - Virtuall address to map
> + * paddr - VM Physical Address
> + * npages - The number of pages to map
> + *
> + * Output Args: None
> + *
> + * Return: None
> + *
> + * Within the VM given by @vm, creates a virtual translation for
> + * @npages starting at @vaddr to the page range starting at @paddr.
> + */
> +void virt_map_shared(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
> + unsigned int npages)
> +{
> + size_t page_size = vm->page_size;
> + size_t size = npages * page_size;
> +
> + TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
> + TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
> +
> + while (npages--) {
> + virt_pg_map_shared(vm, vaddr, paddr);
> + vaddr += page_size;
> + paddr += page_size;
> + }
> +}
> +
> /*
> * Address VM Physical to Host Virtual
> *
> diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
> index 566d82829da4..aa2a57ddb8d3 100644
> --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
> +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
> @@ -190,7 +190,8 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
> return pte;
> }
>
> -void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
> +static void ___virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
> + int level, bool protected)
> {
> const uint64_t pg_size = PG_LEVEL_SIZE(level);
> uint64_t *pml4e, *pdpe, *pde;
> @@ -235,17 +236,27 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
> "PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
> *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
>
> - if (vm_is_gpa_protected(vm, paddr))
> + if (protected)> *pte |= vm->arch.c_bit;
> else
> *pte |= vm->arch.s_bit;
> }
>
> +void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
> +int level) {
> + ___virt_pg_map(vm, vaddr, paddr, level, vm_is_gpa_protected(vm,
> +paddr)); }
> +
> void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) {
> __virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K); }
>
> +void virt_arch_pg_map_shared(struct kvm_vm *vm, uint64_t vaddr,
> +uint64_t paddr) {
> + ___virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K, false); }
Here, it tries to create the guest mappings regardless of what the value
of protected_phy_pages is, which could create confusion.
> +
> void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
> uint64_t nr_bytes, int level)
> {
> --
> 2.43.0.472.g3155946c3a-goog
>
Powered by blists - more mailing lists