>From 763674ca414c8d54f914b21b9d86ba6bb304d294 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 6 May 2025 14:40:54 -0700 Subject: [PATCH 1/7] KVM: x86/mmu: Move "struct kvm_page_fault" definition to asm/kvm_host.h Make "struct kvm_page_fault" globally visible via asm/kvm_host.h so that the structure can be referenced by common KVM. No functional change intended. Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 68 ++++++++++++++++++++++++++++++++- arch/x86/kvm/mmu/mmu_internal.h | 67 -------------------------------- 2 files changed, 67 insertions(+), 68 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4c27f213ea55..ae61a4687d38 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -443,7 +443,73 @@ struct kvm_mmu_root_info { #define KVM_HAVE_MMU_RWLOCK struct kvm_mmu_page; -struct kvm_page_fault; + +struct kvm_page_fault { + /* arguments to kvm_mmu_do_page_fault. */ + const gpa_t addr; + const u64 error_code; + const bool prefetch; + + /* Derived from error_code. */ + const bool exec; + const bool write; + const bool present; + const bool rsvd; + const bool user; + + /* Derived from mmu and global state. */ + const bool is_tdp; + const bool is_private; + const bool nx_huge_page_workaround_enabled; + + /* + * Whether a >4KB mapping can be created or is forbidden due to NX + * hugepages. + */ + bool huge_page_disallowed; + + /* + * Maximum page size that can be created for this fault; input to + * FNAME(fetch), direct_map() and kvm_tdp_mmu_map(). + */ + u8 max_level; + + /* + * Page size that can be created based on the max_level and the + * page size used by the host mapping. + */ + u8 req_level; + + /* + * Page size that will be created based on the req_level and + * huge_page_disallowed. + */ + u8 goal_level; + + /* + * Shifted addr, or result of guest page table walk if addr is a gva. In + * the case of VM where memslot's can be mapped at multiple GPA aliases + * (i.e. TDX), the gfn field does not contain the bit that selects between + * the aliases (i.e. the shared bit for TDX). + */ + gfn_t gfn; + + /* The memslot containing gfn. May be NULL. */ + struct kvm_memory_slot *slot; + + /* Outputs of kvm_mmu_faultin_pfn(). */ + unsigned long mmu_seq; + kvm_pfn_t pfn; + struct page *refcounted_page; + bool map_writable; + + /* + * Indicates the guest is trying to write a gfn that contains one or + * more of the PTEs used to translate the write itself, i.e. the access + * is changing its own translation in the guest page tables. + */ + bool write_fault_to_shadow_pgtable; +}; /* * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit, diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index db8f33e4de62..384fc4d0bfec 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -230,73 +230,6 @@ static inline bool is_nx_huge_page_enabled(struct kvm *kvm) return READ_ONCE(nx_huge_pages) && !kvm->arch.disable_nx_huge_pages; } -struct kvm_page_fault { - /* arguments to kvm_mmu_do_page_fault. */ - const gpa_t addr; - const u64 error_code; - const bool prefetch; - - /* Derived from error_code. */ - const bool exec; - const bool write; - const bool present; - const bool rsvd; - const bool user; - - /* Derived from mmu and global state. */ - const bool is_tdp; - const bool is_private; - const bool nx_huge_page_workaround_enabled; - - /* - * Whether a >4KB mapping can be created or is forbidden due to NX - * hugepages. - */ - bool huge_page_disallowed; - - /* - * Maximum page size that can be created for this fault; input to - * FNAME(fetch), direct_map() and kvm_tdp_mmu_map(). - */ - u8 max_level; - - /* - * Page size that can be created based on the max_level and the - * page size used by the host mapping. - */ - u8 req_level; - - /* - * Page size that will be created based on the req_level and - * huge_page_disallowed. - */ - u8 goal_level; - - /* - * Shifted addr, or result of guest page table walk if addr is a gva. In - * the case of VM where memslot's can be mapped at multiple GPA aliases - * (i.e. TDX), the gfn field does not contain the bit that selects between - * the aliases (i.e. the shared bit for TDX). - */ - gfn_t gfn; - - /* The memslot containing gfn. May be NULL. */ - struct kvm_memory_slot *slot; - - /* Outputs of kvm_mmu_faultin_pfn(). */ - unsigned long mmu_seq; - kvm_pfn_t pfn; - struct page *refcounted_page; - bool map_writable; - - /* - * Indicates the guest is trying to write a gfn that contains one or - * more of the PTEs used to translate the write itself, i.e. the access - * is changing its own translation in the guest page tables. - */ - bool write_fault_to_shadow_pgtable; -}; - int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); /* base-commit: 94da2b969670d100730b5537f20523e49e989920 -- 2.49.0.967.g6a0df3ecc3-goog