[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <41fa83df-c450-b3ff-24cd-9993f8df9b27@redhat.com>
Date: Tue, 3 Aug 2021 11:00:37 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: Maxim Levitsky <mlevitsk@...hat.com>, kvm@...r.kernel.org
Cc: Wanpeng Li <wanpengli@...cent.com>,
Thomas Gleixner <tglx@...utronix.de>,
Joerg Roedel <joro@...tes.org>, Borislav Petkov <bp@...en8.de>,
Sean Christopherson <seanjc@...gle.com>,
Jim Mattson <jmattson@...gle.com>,
"maintainer:X86 ARCHITECTURE (32-BIT AND 64-BIT)" <x86@...nel.org>,
"open list:X86 ARCHITECTURE (32-BIT AND 64-BIT)"
<linux-kernel@...r.kernel.org>,
Suravee Suthikulpanit <suravee.suthikulpanit@....com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>
Subject: Re: [PATCH v3 04/12] KVM: x86/mmu: allow kvm_faultin_pfn to return
page fault handling code
On 02/08/21 20:33, Maxim Levitsky wrote:
> This will allow it to return RET_PF_EMULATE for APIC mmio
> emulation.
>
> This code is based on a patch from Sean Christopherson:
> https://lkml.org/lkml/2021/7/19/2970
>
> Suggested-by: Sean Christopherson <seanjc@...gle.com>
> Signed-off-by: Maxim Levitsky <mlevitsk@...hat.com>
> ---
> arch/x86/kvm/mmu/mmu.c | 17 ++++++++++-------
> arch/x86/kvm/mmu/paging_tmpl.h | 4 ++--
> 2 files changed, 12 insertions(+), 9 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index c5e0ecf5f758..6f77f6efd43c 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -3844,7 +3844,7 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
>
> static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
> gpa_t cr2_or_gpa, kvm_pfn_t *pfn, hva_t *hva,
> - bool write, bool *writable)
> + bool write, bool *writable, int *r)
> {
> struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
> bool async;
> @@ -3855,7 +3855,7 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
> * be zapped before KVM inserts a new MMIO SPTE for the gfn.
> */
> if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
> - return true;
> + goto out_retry;
>
> /* Don't expose private memslots to L2. */
> if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
> @@ -3875,14 +3875,17 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
> if (kvm_find_async_pf_gfn(vcpu, gfn)) {
> trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
> kvm_make_request(KVM_REQ_APF_HALT, vcpu);
> - return true;
> + goto out_retry;
> } else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
> - return true;
> + goto out_retry;
> }
>
> *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL,
> write, writable, hva);
> - return false;
> +
> +out_retry:
> + *r = RET_PF_RETRY;
> + return true;
> }
>
> static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
> @@ -3913,8 +3916,8 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
> smp_rmb();
>
> if (kvm_faultin_pfn(vcpu, prefault, gfn, gpa, &pfn, &hva,
> - write, &map_writable))
> - return RET_PF_RETRY;
> + write, &map_writable, &r))
> + return r;
>
> if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
> return r;
> diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
> index f349eae69bf3..7d03e9b7ccfa 100644
> --- a/arch/x86/kvm/mmu/paging_tmpl.h
> +++ b/arch/x86/kvm/mmu/paging_tmpl.h
> @@ -882,8 +882,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
> smp_rmb();
>
> if (kvm_faultin_pfn(vcpu, prefault, walker.gfn, addr, &pfn, &hva,
> - write_fault, &map_writable))
> - return RET_PF_RETRY;
> + write_fault, &map_writable, &r))
> + return r;
>
> if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
> return r;
>
Reviewed-by: Paolo Bonzini <pbonzini@...hat.com>
Powered by blists - more mailing lists