[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <86wmfyr1j4.wl-maz@kernel.org>
Date: Tue, 17 Dec 2024 11:29:03 +0000
From: Marc Zyngier <maz@...nel.org>
To: Quentin Perret <qperret@...gle.com>
Cc: Oliver Upton <oliver.upton@...ux.dev>,
Joey Gouly <joey.gouly@....com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Zenghui Yu <yuzenghui@...wei.com>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
Fuad Tabba <tabba@...gle.com>,
Vincent Donnefort <vdonnefort@...gle.com>,
Sebastian Ene <sebastianene@...gle.com>,
linux-arm-kernel@...ts.infradead.org,
kvmarm@...ts.linux.dev,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v3 11/18] KVM: arm64: Introduce __pkvm_host_unshare_guest()
On Mon, 16 Dec 2024 17:57:56 +0000,
Quentin Perret <qperret@...gle.com> wrote:
>
> In preparation for letting the host unmap pages from non-protected
> guests, introduce a new hypercall implementing the host-unshare-guest
> transition.
>
> Signed-off-by: Quentin Perret <qperret@...gle.com>
> ---
> arch/arm64/include/asm/kvm_asm.h | 1 +
> arch/arm64/kvm/hyp/include/nvhe/mem_protect.h | 1 +
> arch/arm64/kvm/hyp/include/nvhe/pkvm.h | 6 ++
> arch/arm64/kvm/hyp/nvhe/hyp-main.c | 21 ++++++
> arch/arm64/kvm/hyp/nvhe/mem_protect.c | 67 +++++++++++++++++++
> arch/arm64/kvm/hyp/nvhe/pkvm.c | 12 ++++
> 6 files changed, 108 insertions(+)
>
> diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
> index 449337f5b2a3..0b6c4d325134 100644
> --- a/arch/arm64/include/asm/kvm_asm.h
> +++ b/arch/arm64/include/asm/kvm_asm.h
> @@ -66,6 +66,7 @@ enum __kvm_host_smccc_func {
> __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
> __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
> __KVM_HOST_SMCCC_FUNC___pkvm_host_share_guest,
> + __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_guest,
> __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
> __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
> __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
> diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> index a7976e50f556..e528a42ed60e 100644
> --- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> @@ -40,6 +40,7 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
> int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
> int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
> int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot);
> +int __pkvm_host_unshare_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm);
>
> bool addr_is_memory(phys_addr_t phys);
> int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
> diff --git a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
> index be52c5b15e21..0cc2a429f1fb 100644
> --- a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
> +++ b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
> @@ -64,6 +64,11 @@ static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu)
> return vcpu_is_protected(&hyp_vcpu->vcpu);
> }
>
> +static inline bool pkvm_hyp_vm_is_protected(struct pkvm_hyp_vm *hyp_vm)
> +{
> + return kvm_vm_is_protected(&hyp_vm->kvm);
> +}
> +
> void pkvm_hyp_vm_table_init(void *tbl);
>
> int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
> @@ -78,6 +83,7 @@ void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
> struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void);
>
> struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle);
> +struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle);
> void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm);
>
> #endif /* __ARM64_KVM_NVHE_PKVM_H__ */
> diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> index d659462fbf5d..3c3a27c985a2 100644
> --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> @@ -244,6 +244,26 @@ static void handle___pkvm_host_share_guest(struct kvm_cpu_context *host_ctxt)
> cpu_reg(host_ctxt, 1) = ret;
> }
>
> +static void handle___pkvm_host_unshare_guest(struct kvm_cpu_context *host_ctxt)
> +{
> + DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
> + DECLARE_REG(u64, gfn, host_ctxt, 2);
> + struct pkvm_hyp_vm *hyp_vm;
> + int ret = -EINVAL;
> +
> + if (!is_protected_kvm_enabled())
> + goto out;
> +
> + hyp_vm = get_np_pkvm_hyp_vm(handle);
> + if (!hyp_vm)
> + goto out;
> +
> + ret = __pkvm_host_unshare_guest(gfn, hyp_vm);
> + put_pkvm_hyp_vm(hyp_vm);
> +out:
> + cpu_reg(host_ctxt, 1) = ret;
> +}
> +
> static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
> {
> DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
> @@ -454,6 +474,7 @@ static const hcall_t host_hcall[] = {
> HANDLE_FUNC(__pkvm_host_share_hyp),
> HANDLE_FUNC(__pkvm_host_unshare_hyp),
> HANDLE_FUNC(__pkvm_host_share_guest),
> + HANDLE_FUNC(__pkvm_host_unshare_guest),
> HANDLE_FUNC(__kvm_adjust_pc),
> HANDLE_FUNC(__kvm_vcpu_run),
> HANDLE_FUNC(__kvm_flush_vm_context),
> diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> index fb9592e721cf..30243b7922f1 100644
> --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> @@ -1421,3 +1421,70 @@ int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
>
> return ret;
> }
> +
> +static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ipa)
> +{
> + enum pkvm_page_state state;
> + struct hyp_page *page;
> + kvm_pte_t pte;
> + u64 phys;
> + s8 level;
> + int ret;
> +
> + ret = kvm_pgtable_get_leaf(&vm->pgt, ipa, &pte, &level);
> + if (ret)
> + return ret;
> + if (level != KVM_PGTABLE_LAST_LEVEL)
So there is still a very strong assumption that a guest is only
provided page mappings, and no blocks?
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
Powered by blists - more mailing lists