[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9737d0db-0cce-41c5-94fa-c3d9550d7300@intel.com>
Date: Fri, 19 Apr 2024 15:35:09 +0800
From: Xiaoyao Li <xiaoyao.li@...el.com>
To: Paolo Bonzini <pbonzini@...hat.com>, linux-kernel@...r.kernel.org,
kvm@...r.kernel.org
Cc: isaku.yamahata@...el.com, binbin.wu@...ux.intel.com, chao.gao@...el.com
Subject: Re: [PATCH v2 10/10] KVM: x86/mmu: check for invalid async page
faults involving private memory
On 4/17/2024 4:19 AM, Paolo Bonzini wrote:
> Right now the error code is not used when an async page fault is completed.
> This is not a problem in the current code, but it is untidy. For protected
> VMs, we will also need to check that the page attributes match the current
> state of the page, because asynchronous page faults can only occur on
> shared pages (private pages go through kvm_faultin_pfn_private() instead of
> __gfn_to_pfn_memslot()).
>
> Start by piping the error code from kvm_arch_setup_async_pf() to
> kvm_arch_async_page_ready() via the architecture-specific async page
> fault data.
It is missed in this patch ...
> For now, it can be used to assert that there are no
> async page faults on private memory.
>
> Extracted from a patch by Isaku Yamahata.
>
> Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
> ---
> arch/x86/include/asm/kvm_host.h | 1 +
> arch/x86/kvm/mmu/mmu.c | 17 ++++++++++-------
> 2 files changed, 11 insertions(+), 7 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 7c73952b6f4e..57ec96bd4221 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1850,6 +1850,7 @@ struct kvm_arch_async_pf {
> gfn_t gfn;
> unsigned long cr3;
> bool direct_map;
> + u64 error_code;
> };
>
> extern u32 __read_mostly kvm_nr_uret_msrs;
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 33aea47dce8b..402d04aa5423 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -4207,24 +4207,27 @@ static u32 alloc_apf_token(struct kvm_vcpu *vcpu)
> return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
> }
>
> -static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> - gfn_t gfn)
> +static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu,
> + struct kvm_page_fault *fault)
> {
> struct kvm_arch_async_pf arch;
>
> arch.token = alloc_apf_token(vcpu);
> - arch.gfn = gfn;
> + arch.gfn = fault->gfn;
> arch.direct_map = vcpu->arch.mmu->root_role.direct;
> arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu);
+ arch.error_code = fault->error_code;
>
> - return kvm_setup_async_pf(vcpu, cr2_or_gpa,
> - kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
> + return kvm_setup_async_pf(vcpu, fault->addr,
> + kvm_vcpu_gfn_to_hva(vcpu, fault->gfn), &arch);
> }
>
> void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
> {
> int r;
>
> + if (WARN_ON_ONCE(work->arch.error_code & PFERR_GUEST_ENC_MASK))
> + return;
> +
> if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
> work->wakeup_all)
> return;
> @@ -4237,7 +4240,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
> work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
> return;
>
> - kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true, NULL);
> + kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, work->arch.error_code, true, NULL);
> }
>
> static inline u8 kvm_max_level_for_order(int order)
> @@ -4342,7 +4345,7 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
> trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn);
> kvm_make_request(KVM_REQ_APF_HALT, vcpu);
> return RET_PF_RETRY;
> - } else if (kvm_arch_setup_async_pf(vcpu, fault->addr, fault->gfn)) {
> + } else if (kvm_arch_setup_async_pf(vcpu, fault)) {
> return RET_PF_RETRY;
> }
> }
Powered by blists - more mailing lists