[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230824080408.2933205-8-stevensd@google.com>
Date: Thu, 24 Aug 2023 17:04:07 +0900
From: David Stevens <stevensd@...omium.org>
To: Sean Christopherson <seanjc@...gle.com>
Cc: Yu Zhang <yu.c.zhang@...ux.intel.com>,
Isaku Yamahata <isaku.yamahata@...il.com>,
Marc Zyngier <maz@...nel.org>,
Michael Ellerman <mpe@...erman.id.au>,
Peter Xu <peterx@...hat.com>,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev,
linux-kernel@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org,
kvm@...r.kernel.org, David Stevens <stevensd@...omium.org>
Subject: [PATCH v8 7/8] KVM: PPC: Migrate to __kvm_follow_pfn
From: David Stevens <stevensd@...omium.org>
Migrate from __gfn_to_pfn_memslot to __kvm_follow_pfn. As part of the
refactoring, remove the redundant calls to get_user_page_fast_only,
since the check for !async && !atomic was removed from the KVM generic
code in b9b33da2aa74. Also, remove the kvm_ro parameter because the KVM
generic code handles RO memslots.
Signed-off-by: David Stevens <stevensd@...omium.org>
---
arch/powerpc/include/asm/kvm_book3s.h | 2 +-
arch/powerpc/kvm/book3s_64_mmu_hv.c | 38 +++++++++-----------
arch/powerpc/kvm/book3s_64_mmu_radix.c | 50 +++++++++++---------------
arch/powerpc/kvm/book3s_hv_nested.c | 4 +--
4 files changed, 38 insertions(+), 56 deletions(-)
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index bbf5e2c5fe09..bf48c511e700 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -202,7 +202,7 @@ extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested,
extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
unsigned long gpa,
struct kvm_memory_slot *memslot,
- bool writing, bool kvm_ro,
+ bool writing,
pte_t *inserted_pte, unsigned int *levelp);
extern int kvmppc_init_vm_radix(struct kvm *kvm);
extern void kvmppc_free_radix(struct kvm *kvm);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 7f765d5ad436..4688046626af 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -523,6 +523,9 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
unsigned long rcbits;
long mmio_update;
pte_t pte, *ptep;
+ struct kvm_follow_pfn foll = {
+ .try_map_writable = true,
+ };
if (kvm_is_radix(kvm))
return kvmppc_book3s_radix_page_fault(vcpu, ea, dsisr);
@@ -599,29 +602,20 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
page = NULL;
writing = (dsisr & DSISR_ISSTORE) != 0;
/* If writing != 0, then the HPTE must allow writing, if we get here */
- write_ok = writing;
- hva = gfn_to_hva_memslot(memslot, gfn);
- /*
- * Do a fast check first, since __gfn_to_pfn_memslot doesn't
- * do it with !atomic && !async, which is how we call it.
- * We always ask for write permission since the common case
- * is that the page is writable.
- */
- if (get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
- write_ok = true;
- } else {
- /* Call KVM generic code to do the slow-path check */
- pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
- writing, &write_ok, NULL);
- if (is_error_noslot_pfn(pfn))
- return -EFAULT;
- page = NULL;
- if (pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
- if (PageReserved(page))
- page = NULL;
- }
+ foll.slot = memslot;
+ foll.gfn = gfn;
+ foll.flags = FOLL_GET | (writing ? FOLL_WRITE : 0);
+ pfn = __kvm_follow_pfn(&foll);
+ if (is_error_noslot_pfn(pfn))
+ return -EFAULT;
+ page = NULL;
+ write_ok = foll.writable;
+ hva = foll.hva;
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+ if (PageReserved(page))
+ page = NULL;
}
/*
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 572707858d65..498f89128c3a 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -815,47 +815,39 @@ bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing,
int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
unsigned long gpa,
struct kvm_memory_slot *memslot,
- bool writing, bool kvm_ro,
+ bool writing,
pte_t *inserted_pte, unsigned int *levelp)
{
struct kvm *kvm = vcpu->kvm;
struct page *page = NULL;
unsigned long mmu_seq;
- unsigned long hva, gfn = gpa >> PAGE_SHIFT;
- bool upgrade_write = false;
- bool *upgrade_p = &upgrade_write;
+ unsigned long hva, pfn, gfn = gpa >> PAGE_SHIFT;
+ bool upgrade_write;
pte_t pte, *ptep;
unsigned int shift, level;
int ret;
bool large_enable;
+ struct kvm_follow_pfn foll = {
+ .slot = memslot,
+ .gfn = gfn,
+ .flags = FOLL_GET | (writing ? FOLL_WRITE : 0),
+ .try_map_writable = true,
+ };
/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
- /*
- * Do a fast check first, since __gfn_to_pfn_memslot doesn't
- * do it with !atomic && !async, which is how we call it.
- * We always ask for write permission since the common case
- * is that the page is writable.
- */
- hva = gfn_to_hva_memslot(memslot, gfn);
- if (!kvm_ro && get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
- upgrade_write = true;
- } else {
- unsigned long pfn;
-
- /* Call KVM generic code to do the slow-path check */
- pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
- writing, upgrade_p, NULL);
- if (is_error_noslot_pfn(pfn))
- return -EFAULT;
- page = NULL;
- if (pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
- if (PageReserved(page))
- page = NULL;
- }
+ pfn = __kvm_follow_pfn(&foll);
+ if (is_error_noslot_pfn(pfn))
+ return -EFAULT;
+ page = NULL;
+ hva = foll.hva;
+ upgrade_write = foll.writable;
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+ if (PageReserved(page))
+ page = NULL;
}
/*
@@ -944,7 +936,6 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot;
long ret;
bool writing = !!(dsisr & DSISR_ISSTORE);
- bool kvm_ro = false;
/* Check for unusual errors */
if (dsisr & DSISR_UNSUPP_MMU) {
@@ -997,7 +988,6 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
ea, DSISR_ISSTORE | DSISR_PROTFAULT);
return RESUME_GUEST;
}
- kvm_ro = true;
}
/* Failed to set the reference/change bits */
@@ -1015,7 +1005,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
/* Try to insert a pte */
ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing,
- kvm_ro, NULL, NULL);
+ NULL, NULL);
if (ret == 0 || ret == -EAGAIN)
ret = RESUME_GUEST;
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 377d0b4a05ee..6d531051df04 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -1497,7 +1497,6 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
unsigned long n_gpa, gpa, gfn, perm = 0UL;
unsigned int shift, l1_shift, level;
bool writing = !!(dsisr & DSISR_ISSTORE);
- bool kvm_ro = false;
long int ret;
if (!gp->l1_gr_to_hr) {
@@ -1577,7 +1576,6 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
ea, DSISR_ISSTORE | DSISR_PROTFAULT);
return RESUME_GUEST;
}
- kvm_ro = true;
}
/* 2. Find the host pte for this L1 guest real address */
@@ -1599,7 +1597,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) {
/* No suitable pte found -> try to insert a mapping */
ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot,
- writing, kvm_ro, &pte, &level);
+ writing, &pte, &level);
if (ret == -EAGAIN)
return RESUME_GUEST;
else if (ret)
--
2.42.0.rc1.204.g551eb34607-goog
Powered by blists - more mailing lists