[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230220183847.59159-2-michael.roth@amd.com>
Date: Mon, 20 Feb 2023 12:37:52 -0600
From: Michael Roth <michael.roth@....com>
To: <kvm@...r.kernel.org>
CC: <linux-coco@...ts.linux.dev>, <linux-mm@...ck.org>,
<linux-crypto@...r.kernel.org>, <x86@...nel.org>,
<linux-kernel@...r.kernel.org>, <tglx@...utronix.de>,
<mingo@...hat.com>, <jroedel@...e.de>, <thomas.lendacky@....com>,
<hpa@...or.com>, <ardb@...nel.org>, <pbonzini@...hat.com>,
<seanjc@...gle.com>, <vkuznets@...hat.com>, <jmattson@...gle.com>,
<luto@...nel.org>, <dave.hansen@...ux.intel.com>, <slp@...hat.com>,
<pgonda@...gle.com>, <peterz@...radead.org>,
<srinivas.pandruvada@...ux.intel.com>, <rientjes@...gle.com>,
<dovmurik@...ux.ibm.com>, <tobin@....com>, <bp@...en8.de>,
<vbabka@...e.cz>, <kirill@...temov.name>, <ak@...ux.intel.com>,
<tony.luck@...el.com>, <marcorr@...gle.com>,
<sathyanarayanan.kuppuswamy@...ux.intel.com>,
<alpergun@...gle.com>, <dgilbert@...hat.com>, <jarkko@...nel.org>,
<ashish.kalra@....com>, <nikunj.dadhania@....com>
Subject: [PATCH RFC v8 01/56] KVM: x86: Add 'fault_is_private' x86 op
This callback is used by the KVM MMU to check whether a #NPF was for a
private GPA or not.
In some cases the full 64-bit error code for the #NPF will be needed to
make this determination, so also update kvm_mmu_do_page_fault() to
accept the full 64-bit value so it can be plumbed through to the
callback.
Signed-off-by: Michael Roth <michael.roth@....com>
---
arch/x86/include/asm/kvm-x86-ops.h | 1 +
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/mmu/mmu.c | 3 +--
arch/x86/kvm/mmu/mmu_internal.h | 37 +++++++++++++++++++++++++++---
4 files changed, 37 insertions(+), 5 deletions(-)
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 8dc345cc6318..72183da010b8 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -131,6 +131,7 @@ KVM_X86_OP(msr_filter_changed)
KVM_X86_OP(complete_emulated_msr)
KVM_X86_OP(vcpu_deliver_sipi_vector)
KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
+KVM_X86_OP_OPTIONAL_RET0(fault_is_private);
#undef KVM_X86_OP
#undef KVM_X86_OP_OPTIONAL
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e552374f2357..f856d689dda0 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1643,6 +1643,7 @@ struct kvm_x86_ops {
void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
int root_level);
+ bool (*fault_is_private)(struct kvm *kvm, gpa_t gpa, u64 error_code, bool *private_fault);
bool (*has_wbinvd_exit)(void);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index eda615f3951c..fb3f34b7391c 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5724,8 +5724,7 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
}
if (r == RET_PF_INVALID) {
- r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
- lower_32_bits(error_code), false);
+ r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, error_code, false);
if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm))
return -EIO;
}
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index e642d431df4b..557a001210df 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -231,6 +231,37 @@ struct kvm_page_fault {
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
+static bool kvm_mmu_fault_is_private(struct kvm *kvm, gpa_t gpa, u64 err)
+{
+ struct kvm_memory_slot *slot;
+ bool private_fault = false;
+ gfn_t gfn = gpa_to_gfn(gpa);
+
+ slot = gfn_to_memslot(kvm, gfn);
+ if (!slot) {
+ pr_debug("%s: no slot, GFN: 0x%llx\n", __func__, gfn);
+ goto out;
+ }
+
+ if (!kvm_slot_can_be_private(slot)) {
+ pr_debug("%s: slot is not private, GFN: 0x%llx\n", __func__, gfn);
+ goto out;
+ }
+
+ if (static_call(kvm_x86_fault_is_private)(kvm, gpa, err, &private_fault))
+ goto out;
+
+ /*
+ * Handling below is for UPM self-tests and guests that treat userspace
+ * as the authority on whether a fault should be private or not.
+ */
+ private_fault = kvm_mem_is_private(kvm, gpa >> PAGE_SHIFT);
+
+out:
+ pr_debug("%s: GFN: 0x%llx, private: %d\n", __func__, gfn, private_fault);
+ return private_fault;
+}
+
/*
* Return values of handle_mmio_page_fault(), mmu.page_fault(), fast_page_fault(),
* and of course kvm_mmu_do_page_fault().
@@ -262,11 +293,11 @@ enum {
};
static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
- u32 err, bool prefetch)
+ u64 err, bool prefetch)
{
struct kvm_page_fault fault = {
.addr = cr2_or_gpa,
- .error_code = err,
+ .error_code = lower_32_bits(err),
.exec = err & PFERR_FETCH_MASK,
.write = err & PFERR_WRITE_MASK,
.present = err & PFERR_PRESENT_MASK,
@@ -280,7 +311,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
.max_level = KVM_MAX_HUGEPAGE_LEVEL,
.req_level = PG_LEVEL_4K,
.goal_level = PG_LEVEL_4K,
- .is_private = kvm_mem_is_private(vcpu->kvm, cr2_or_gpa >> PAGE_SHIFT),
+ .is_private = kvm_mmu_fault_is_private(vcpu->kvm, cr2_or_gpa, err),
};
int r;
--
2.25.1
Powered by blists - more mailing lists