[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87o87s6mb7.ffs@tglx>
Date: Thu, 14 Oct 2021 10:30:04 +0200
From: Thomas Gleixner <tglx@...utronix.de>
To: Kuppuswamy Sathyanarayanan
<sathyanarayanan.kuppuswamy@...ux.intel.com>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
x86@...nel.org, Paolo Bonzini <pbonzini@...hat.com>,
David Hildenbrand <david@...hat.com>,
Andrea Arcangeli <aarcange@...hat.com>,
Josh Poimboeuf <jpoimboe@...hat.com>,
Juergen Gross <jgross@...e.com>, Deep Shah <sdeep@...are.com>,
VMware Inc <pv-drivers@...are.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>
Cc: Peter H Anvin <hpa@...or.com>, Dave Hansen <dave.hansen@...el.com>,
Tony Luck <tony.luck@...el.com>,
Dan Williams <dan.j.williams@...el.com>,
Andi Kleen <ak@...ux.intel.com>,
Kirill Shutemov <kirill.shutemov@...ux.intel.com>,
Sean Christopherson <seanjc@...gle.com>,
Kuppuswamy Sathyanarayanan <knsathya@...nel.org>,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v10 06/11] x86/traps: Add #VE support for TDX guest
On Fri, Oct 08 2021 at 22:37, Kuppuswamy Sathyanarayanan wrote:
>
> +/*
> + * Used by #VE exception handler to gather the #VE exception
> + * info from the TDX module. This is software only structure
> + * and not related to TDX module/VMM.
> + */
> +struct ve_info {
> + u64 exit_reason;
> + u64 exit_qual;
> + u64 gla; /* Guest Linear (virtual) Address */
> + u64 gpa; /* Guest Physical (virtual) Address */
Please do not use tail comments and with a tab between type and name
this becomes more readable:
/* Guest Linear (virtual) Address */
u64 gla;
/* Guest Physical (virtual) Address */
u64 gpa;
Hmm?
> +bool tdx_get_ve_info(struct ve_info *ve)
> +{
> + struct tdx_module_output out;
> + u64 ret;
> +
> + if (!ve)
> + return false;
This should be WARN_ON_ONCE() if at all.
> + /*
> + * NMIs and machine checks are suppressed. Before this point any
> + * #VE is fatal. After this point (TDGETVEINFO call), NMIs and
> + * additional #VEs are permitted (but it is expected not to
> + * happen unless kernel panics).
> + */
> + ret = __tdx_module_call(TDX_GET_VEINFO, 0, 0, 0, 0, &out);
> + if (ret)
> + return false;
if (__tdx...())
return false;
> + ve->exit_reason = out.rcx;
> + ve->exit_qual = out.rdx;
> + ve->gla = out.r8;
> + ve->gpa = out.r9;
> + ve->instr_len = out.r10 & UINT_MAX;
> + ve->instr_info = out.r10 >> 32;
> +
> + return true;
> +}
> +
> +bool tdx_handle_virtualization_exception(struct pt_regs *regs,
> + struct ve_info *ve)
> +{
> + pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
> + return false;
> +}
> +
> void __init tdx_early_init(void)
> {
> if (!is_tdx_guest())
> diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
> index a58800973aed..70d76c3a548f 100644
> --- a/arch/x86/kernel/traps.c
> +++ b/arch/x86/kernel/traps.c
> @@ -61,6 +61,7 @@
> #include <asm/insn.h>
> #include <asm/insn-eval.h>
> #include <asm/vdso.h>
> +#include <asm/tdx.h>
>
> #ifdef CONFIG_X86_64
> #include <asm/x86_init.h>
> @@ -1140,6 +1141,82 @@ DEFINE_IDTENTRY(exc_device_not_available)
> }
> }
>
> +#ifdef CONFIG_INTEL_TDX_GUEST
> +#define VE_FAULT_STR "VE fault"
> +static void ve_raise_fault(struct pt_regs *regs, long error_code)
Please do not glue the #define and the function definition
together. Newlines exist for a reaon.
> +{
> + struct task_struct *tsk = current;
> +
> + if (user_mode(regs)) {
> + tsk->thread.error_code = error_code;
> + tsk->thread.trap_nr = X86_TRAP_VE;
> +
> + /*
> + * Not fixing up VDSO exceptions similar to #GP handler
> + * because it is expected that VDSO doesn't trigger #VE.
Expected?
> + */
> + show_signal(tsk, SIGSEGV, "", VE_FAULT_STR, regs, error_code);
> + force_sig(SIGSEGV);
> + return;
> + }
> +
> + /*
> + * Attempt to recover from #VE exception failure without
> + * triggering OOPS (useful for MSR read/write failures)
> + */
> + if (fixup_exception(regs, X86_TRAP_VE, error_code, 0))
> + return;
> +
> + tsk->thread.error_code = error_code;
> + tsk->thread.trap_nr = X86_TRAP_VE;
> +
> + /*
> + * To be potentially processing a kprobe fault and to trust the result
> + * from kprobe_running(), it should be non-preemptible.
> + */
> + if (!preemptible() &&
> + kprobe_running() &&
if (!preemptible() && kprobe_running() &&
> + kprobe_fault_handler(regs, X86_TRAP_VE))
perhaps?
> +
> +DEFINE_IDTENTRY(exc_virtualization_exception)
> +{
> + struct ve_info ve;
> + bool ret;
> +
> + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
Please remove that. The idtentry code is already taking care of that.
> + /*
> + * NMIs/Machine-checks/Interrupts will be in a disabled state
> + * till TDGETVEINFO TDCALL is executed. This prevents #VE
> + * nesting issue.
s/This prevents.../This ensures that VE info cannot be overwritten by a
nested #VE/
Or something like that perhaps?
Also a some comment about #VE in general above the DEFINE_IDTENTRY()
would be appreciated.
Thanks,
tglx
Powered by blists - more mailing lists