[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <253b10a7bfcb7f9ff7911bc5a4b9971aebf0c4b9.1687991811.git.isaku.yamahata@intel.com>
Date: Wed, 28 Jun 2023 15:43:05 -0700
From: isaku.yamahata@...el.com
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Cc: isaku.yamahata@...el.com, isaku.yamahata@...il.com,
Paolo Bonzini <pbonzini@...hat.com>, erdemaktas@...gle.com,
Sean Christopherson <seanjc@...gle.com>,
Sagi Shahar <sagis@...gle.com>,
David Matlack <dmatlack@...gle.com>,
Kai Huang <kai.huang@...el.com>,
Zhi Wang <zhi.wang.linux@...il.com>, chen.bo@...el.com,
linux-coco@...ts.linux.dev,
Chao Peng <chao.p.peng@...ux.intel.com>,
Ackerley Tng <ackerleytng@...gle.com>,
Vishal Annapurve <vannapurve@...gle.com>,
Michael Roth <michael.roth@....com>,
Yuan Yao <yuan.yao@...ux.intel.com>
Subject: [RFC PATCH v3 06/11] KVM: x86: Introduce PFERR_GUEST_ENC_MASK to indicate fault is private
From: Isaku Yamahata <isaku.yamahata@...el.com>
Add two PFERR codes to designate that the page fault is private and that
it requires looking up memory attributes. The vendor kvm page fault
handler should set PFERR_GUEST_ENC_MASK bit based on their fault
information. It may or may not use the hardware value directly or
parse the hardware value to set the bit.
For KVM_X86_PROTECTED_VM, ask memory attributes for the fault privateness.
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
---
Changes v2 -> v3:
- Revive PFERR_GUEST_ENC_MASK
- rename struct kvm_page_fault::is_private => private
- Add check KVM_X86_PROTECTED_VM
Changes v1 -> v2:
- Introduced fault type and replaced is_private with fault_type.
- Add kvm_get_fault_type() to encapsulate the difference.
---
arch/x86/include/asm/kvm_host.h | 2 ++
arch/x86/kvm/mmu/mmu.c | 14 +++++++++-----
arch/x86/kvm/mmu/mmu_internal.h | 16 ++++++++++++++--
3 files changed, 25 insertions(+), 7 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3ca93e75041f..831bfd1e719a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -255,6 +255,7 @@ enum x86_intercept_stage;
#define PFERR_SGX_BIT 15
#define PFERR_GUEST_FINAL_BIT 32
#define PFERR_GUEST_PAGE_BIT 33
+#define PFERR_GUEST_ENC_BIT 34
#define PFERR_IMPLICIT_ACCESS_BIT 48
#define PFERR_PRESENT_MASK BIT(PFERR_PRESENT_BIT)
@@ -266,6 +267,7 @@ enum x86_intercept_stage;
#define PFERR_SGX_MASK BIT(PFERR_SGX_BIT)
#define PFERR_GUEST_FINAL_MASK BIT_ULL(PFERR_GUEST_FINAL_BIT)
#define PFERR_GUEST_PAGE_MASK BIT_ULL(PFERR_GUEST_PAGE_BIT)
+#define PFERR_GUEST_ENC_MASK BIT_ULL(PFERR_GUEST_ENC_BIT)
#define PFERR_IMPLICIT_ACCESS BIT_ULL(PFERR_IMPLICIT_ACCESS_BIT)
#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index b8ba7f11c3cb..464c70b35383 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3228,7 +3228,7 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
*/
fault->req_level = __kvm_mmu_max_mapping_level(vcpu->kvm, slot,
fault->gfn, fault->max_level,
- fault->is_private);
+ fault->private);
if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
return;
@@ -4328,7 +4328,7 @@ static int kvm_do_memory_fault_exit(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault)
{
vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT;
- if (fault->is_private)
+ if (fault->private)
vcpu->run->memory.flags = KVM_MEMORY_EXIT_FLAG_PRIVATE;
else
vcpu->run->memory.flags = 0;
@@ -4386,10 +4386,14 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
return RET_PF_EMULATE;
}
- if (fault->is_private != kvm_mem_is_private(vcpu->kvm, fault->gfn))
- return kvm_do_memory_fault_exit(vcpu, fault);
+ if (fault->private != kvm_mem_is_private(vcpu->kvm, fault->gfn)) {
+ if (vcpu->kvm->arch.vm_type == KVM_X86_PROTECTED_VM)
+ return RET_PF_RETRY;
+ else
+ return kvm_do_memory_fault_exit(vcpu, fault);
+ }
- if (fault->is_private)
+ if (fault->private)
return kvm_faultin_pfn_private(vcpu, fault);
async = false;
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 7f9ec1e5b136..a6e45b39ca90 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -200,10 +200,10 @@ struct kvm_page_fault {
const bool present;
const bool rsvd;
const bool user;
+ const bool private;
/* Derived from mmu and global state. */
const bool is_tdp;
- const bool is_private;
const bool nx_huge_page_workaround_enabled;
/*
@@ -282,6 +282,18 @@ enum {
RET_PF_SPURIOUS,
};
+static inline bool kvm_is_fault_private(struct kvm *kvm, gpa_t gpa, u64 error_code)
+{
+ /*
+ * This is racy with mmu_seq. If we hit a race, it would result in a
+ * spurious KVM_EXIT_MEMORY_FAULT.
+ */
+ if (kvm->arch.vm_type == KVM_X86_PROTECTED_VM)
+ return kvm_mem_is_private(kvm, gpa_to_gfn(gpa));
+
+ return error_code & PFERR_GUEST_ENC_MASK;
+}
+
static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
u64 err, bool prefetch, int *emulation_type)
{
@@ -293,6 +305,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
.present = err & PFERR_PRESENT_MASK,
.rsvd = err & PFERR_RSVD_MASK,
.user = err & PFERR_USER_MASK,
+ .private = kvm_is_fault_private(vcpu->kvm, cr2_or_gpa, err),
.prefetch = prefetch,
.is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
.nx_huge_page_workaround_enabled =
@@ -301,7 +314,6 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
.max_level = KVM_MAX_HUGEPAGE_LEVEL,
.req_level = PG_LEVEL_4K,
.goal_level = PG_LEVEL_4K,
- .is_private = kvm_mem_is_private(vcpu->kvm, cr2_or_gpa >> PAGE_SHIFT),
};
int r;
--
2.25.1
Powered by blists - more mailing lists