[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20200615160224.GG351167@redhat.com>
Date: Mon, 15 Jun 2020 12:02:24 -0400
From: Vivek Goyal <vgoyal@...hat.com>
To: Vitaly Kuznetsov <vkuznets@...hat.com>
Cc: kvm@...r.kernel.org, Paolo Bonzini <pbonzini@...hat.com>,
Sean Christopherson <sean.j.christopherson@...el.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH] KVM: async_pf: change
kvm_setup_async_pf()/kvm_arch_setup_async_pf() return type to bool
On Mon, Jun 15, 2020 at 02:13:34PM +0200, Vitaly Kuznetsov wrote:
> Unlike normal 'int' functions returning '0' on success, kvm_setup_async_pf()/
> kvm_arch_setup_async_pf() return '1' when a job to handle page fault
> asynchronously was scheduled and '0' otherwise. To avoid the confusion
> change return type to 'bool'.
>
> No functional change intended.
>
> Suggested-by: Sean Christopherson <sean.j.christopherson@...el.com>
> Signed-off-by: Vitaly Kuznetsov <vkuznets@...hat.com>
Looks good to me. Nobody checks the error from kvm_arch_setup_async_pf().
Callers only seem to care about success or failure at this point of time,
and they fall back to synchorounous page fault upon failure. This
could be changed back once somebody needs specific error code.
Acked-by: Vivek Goyal <vgoyal@...hat.com>
Vivek
> ---
> arch/s390/kvm/kvm-s390.c | 20 +++++++++-----------
> arch/x86/kvm/mmu/mmu.c | 4 ++--
> include/linux/kvm_host.h | 4 ++--
> virt/kvm/async_pf.c | 16 ++++++++++------
> 4 files changed, 23 insertions(+), 21 deletions(-)
>
> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> index d47c19718615..7fd4fdb165fc 100644
> --- a/arch/s390/kvm/kvm-s390.c
> +++ b/arch/s390/kvm/kvm-s390.c
> @@ -3954,33 +3954,31 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
> return true;
> }
>
> -static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
> +static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
> {
> hva_t hva;
> struct kvm_arch_async_pf arch;
> - int rc;
>
> if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
> - return 0;
> + return false;
> if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
> vcpu->arch.pfault_compare)
> - return 0;
> + return false;
> if (psw_extint_disabled(vcpu))
> - return 0;
> + return false;
> if (kvm_s390_vcpu_has_irq(vcpu, 0))
> - return 0;
> + return false;
> if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
> - return 0;
> + return false;
> if (!vcpu->arch.gmap->pfault_enabled)
> - return 0;
> + return false;
>
> hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
> hva += current->thread.gmap_addr & ~PAGE_MASK;
> if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
> - return 0;
> + return false;
>
> - rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
> - return rc;
> + return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
> }
>
> static int vcpu_pre_run(struct kvm_vcpu *vcpu)
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 979a7e1c263d..3dd0af7e7515 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -4045,8 +4045,8 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
> walk_shadow_page_lockless_end(vcpu);
> }
>
> -static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> - gfn_t gfn)
> +static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> + gfn_t gfn)
> {
> struct kvm_arch_async_pf arch;
>
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 62ec926c78a0..9edc6fc71a89 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -211,8 +211,8 @@ struct kvm_async_pf {
>
> void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
> void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
> -int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> - unsigned long hva, struct kvm_arch_async_pf *arch);
> +bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> + unsigned long hva, struct kvm_arch_async_pf *arch);
> int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
> #endif
>
> diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
> index 45799606bb3e..390f758d5a27 100644
> --- a/virt/kvm/async_pf.c
> +++ b/virt/kvm/async_pf.c
> @@ -156,17 +156,21 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
> }
> }
>
> -int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> - unsigned long hva, struct kvm_arch_async_pf *arch)
> +/*
> + * Try to schedule a job to handle page fault asynchronously. Returns 'true' on
> + * success, 'false' on failure (page fault has to be handled synchronously).
> + */
> +bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> + unsigned long hva, struct kvm_arch_async_pf *arch)
> {
> struct kvm_async_pf *work;
>
> if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
> - return 0;
> + return false;
>
> /* Arch specific code should not do async PF in this case */
> if (unlikely(kvm_is_error_hva(hva)))
> - return 0;
> + return false;
>
> /*
> * do alloc nowait since if we are going to sleep anyway we
> @@ -174,7 +178,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> */
> work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
> if (!work)
> - return 0;
> + return false;
>
> work->wakeup_all = false;
> work->vcpu = vcpu;
> @@ -193,7 +197,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
>
> schedule_work(&work->work);
>
> - return 1;
> + return true;
> }
>
> int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
> --
> 2.25.4
>
Powered by blists - more mailing lists