[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <815d893b-63fc-4dec-8c04-6580344c7eef@linux.intel.com>
Date: Mon, 20 Nov 2023 19:24:51 +0800
From: Binbin Wu <binbin.wu@...ux.intel.com>
To: isaku.yamahata@...el.com
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
isaku.yamahata@...il.com, Paolo Bonzini <pbonzini@...hat.com>,
erdemaktas@...gle.com, Sean Christopherson <seanjc@...gle.com>,
Sagi Shahar <sagis@...gle.com>,
David Matlack <dmatlack@...gle.com>,
Kai Huang <kai.huang@...el.com>,
Zhi Wang <zhi.wang.linux@...il.com>, chen.bo@...el.com,
hang.yuan@...el.com, tina.zhang@...el.com,
Xiaoyao Li <xiaoyao.li@...el.com>
Subject: Re: [PATCH v6 09/16] KVM: TDX: Pass desired page level in err code
for page fault handler
On 11/7/2023 11:00 PM, isaku.yamahata@...el.com wrote:
> From: Xiaoyao Li <xiaoyao.li@...el.com>
>
> For TDX, EPT violation can happen when TDG.MEM.PAGE.ACCEPT.
> And TDG.MEM.PAGE.ACCEPT contains the desired accept page level of TD guest.
>
> 1. KVM can map it with 4KB page while TD guest wants to accept 2MB page.
>
> TD geust will get TDX_PAGE_SIZE_MISMATCH and it should try to accept
> 4KB size.
>
> 2. KVM can map it with 2MB page while TD guest wants to accept 4KB page.
>
> KVM needs to honor it because
> a) there is no way to tell guest KVM maps it as 2MB size. And
> b) guest accepts it in 4KB size since guest knows some other 4KB page
> in the same 2MB range will be used as shared page.
>
> For case 2, it need to pass desired page level to MMU's
> page_fault_handler. Use bit 29:31 of kvm PF error code for this purpose.
The shortlog is the same as patch 7/16..., I am a bit confused by the
structure of this patch series...
Can this patch be squashed into 7/16?
>
> Signed-off-by: Xiaoyao Li <xiaoyao.li@...el.com>
> ---
> arch/x86/include/asm/kvm_host.h | 2 ++
> arch/x86/kvm/vmx/common.h | 2 +-
> arch/x86/kvm/vmx/tdx.c | 7 ++++++-
> arch/x86/kvm/vmx/tdx.h | 19 -------------------
> arch/x86/kvm/vmx/tdx_arch.h | 19 +++++++++++++++++++
> arch/x86/kvm/vmx/vmx.c | 2 +-
> 6 files changed, 29 insertions(+), 22 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index eed36c1eedb7..c16823f3326e 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -285,6 +285,8 @@ enum x86_intercept_stage;
> PFERR_WRITE_MASK | \
> PFERR_PRESENT_MASK)
>
> +#define PFERR_LEVEL(err_code) (((err_code) & PFERR_LEVEL_MASK) >> PFERR_LEVEL_START_BIT)
It's defined, but never used?
> +
> /* apic attention bits */
> #define KVM_APIC_CHECK_VAPIC 0
> /*
> diff --git a/arch/x86/kvm/vmx/common.h b/arch/x86/kvm/vmx/common.h
> index bb00433932ee..787f59c44abc 100644
> --- a/arch/x86/kvm/vmx/common.h
> +++ b/arch/x86/kvm/vmx/common.h
> @@ -91,7 +91,7 @@ static inline int __vmx_handle_ept_violation(struct kvm_vcpu *vcpu, gpa_t gpa,
> if (kvm_is_private_gpa(vcpu->kvm, gpa))
> error_code |= PFERR_GUEST_ENC_MASK;
>
> - if (err_page_level > 0)
> + if (err_page_level > PG_LEVEL_NONE)
> error_code |= (err_page_level << PFERR_LEVEL_START_BIT) & PFERR_LEVEL_MASK;
>
> return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index 7b81811eb404..c614ab20c191 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -2713,6 +2713,7 @@ static int tdx_init_mem_region(struct kvm *kvm, struct kvm_tdx_cmd *cmd)
> struct kvm_tdx_init_mem_region region;
> struct kvm_vcpu *vcpu;
> struct page *page;
> + u64 error_code;
> int idx, ret = 0;
> bool added = false;
>
> @@ -2770,7 +2771,11 @@ static int tdx_init_mem_region(struct kvm *kvm, struct kvm_tdx_cmd *cmd)
> kvm_tdx->source_pa = pfn_to_hpa(page_to_pfn(page)) |
> (cmd->flags & KVM_TDX_MEASURE_MEMORY_REGION);
>
> - ret = kvm_mmu_map_tdp_page(vcpu, region.gpa, TDX_SEPT_PFERR,
> + /* TODO: large page support. */
> + error_code = TDX_SEPT_PFERR;
> + error_code |= (PG_LEVEL_4K << PFERR_LEVEL_START_BIT) &
> + PFERR_LEVEL_MASK;
> + ret = kvm_mmu_map_tdp_page(vcpu, region.gpa, error_code,
> PG_LEVEL_4K);
> put_page(page);
> if (ret)
> diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
> index 37ee944c36a1..54c3f6b83571 100644
> --- a/arch/x86/kvm/vmx/tdx.h
> +++ b/arch/x86/kvm/vmx/tdx.h
> @@ -72,25 +72,6 @@ union tdx_exit_reason {
> u64 full;
> };
>
> -union tdx_ext_exit_qualification {
> - struct {
> - u64 type : 4;
> - u64 reserved0 : 28;
> - u64 req_sept_level : 3;
> - u64 err_sept_level : 3;
> - u64 err_sept_state : 8;
> - u64 err_sept_is_leaf : 1;
> - u64 reserved1 : 17;
> - };
> - u64 full;
> -};
> -
> -enum tdx_ext_exit_qualification_type {
> - EXT_EXIT_QUAL_NONE,
> - EXT_EXIT_QUAL_ACCEPT,
> - NUM_EXT_EXIT_QUAL,
> -};
> -
> struct vcpu_tdx {
> struct kvm_vcpu vcpu;
>
> diff --git a/arch/x86/kvm/vmx/tdx_arch.h b/arch/x86/kvm/vmx/tdx_arch.h
> index 9f93250d22b9..ba41fefa47ee 100644
> --- a/arch/x86/kvm/vmx/tdx_arch.h
> +++ b/arch/x86/kvm/vmx/tdx_arch.h
> @@ -218,4 +218,23 @@ union tdx_sept_level_state {
> u64 raw;
> };
>
> +union tdx_ext_exit_qualification {
> + struct {
> + u64 type : 4;
> + u64 reserved0 : 28;
> + u64 req_sept_level : 3;
> + u64 err_sept_level : 3;
> + u64 err_sept_state : 8;
> + u64 err_sept_is_leaf : 1;
> + u64 reserved1 : 17;
> + };
> + u64 full;
> +};
> +
> +enum tdx_ext_exit_qualification_type {
> + EXT_EXIT_QUAL_NONE = 0,
> + EXT_EXIT_QUAL_ACCEPT,
Since this value should be fixed to 1, maybe better to initialize it to
1 for future proof?
> + NUM_EXT_EXIT_QUAL,
> +};
> +
> #endif /* __KVM_X86_TDX_ARCH_H */
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index ae9ba0731521..fb3913df6a5d 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -5753,7 +5753,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
> if (unlikely(allow_smaller_maxphyaddr && kvm_vcpu_is_illegal_gpa(vcpu, gpa)))
> return kvm_emulate_instruction(vcpu, 0);
>
> - return __vmx_handle_ept_violation(vcpu, gpa, exit_qualification, 0);
> + return __vmx_handle_ept_violation(vcpu, gpa, exit_qualification, PG_LEVEL_NONE);
> }
>
> static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
Powered by blists - more mailing lists