[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CALCETrUJG4LLk4SkBdufUYAx=ec=NyY2wv=Js=72=am6nFQfiw@mail.gmail.com>
Date: Fri, 16 Oct 2020 18:48:53 -0700
From: Andy Lutomirski <luto@...capital.net>
To: Jarkko Sakkinen <jarkko.sakkinen@...ux.intel.com>
Cc: X86 ML <x86@...nel.org>, linux-sgx@...r.kernel.org,
LKML <linux-kernel@...r.kernel.org>,
Sean Christopherson <sean.j.christopherson@...el.com>,
Jethro Beekman <jethro@...tanix.com>,
Cedric Xing <cedric.xing@...el.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Andy Shevchenko <andriy.shevchenko@...ux.intel.com>,
asapek@...gle.com, Borislav Petkov <bp@...en8.de>,
chenalexchen@...gle.com, Conrad Parker <conradparker@...gle.com>,
cyhanish@...gle.com, Dave Hansen <dave.hansen@...el.com>,
"Huang, Haitao" <haitao.huang@...el.com>,
"Huang, Kai" <kai.huang@...el.com>,
"Svahn, Kai" <kai.svahn@...el.com>, Keith Moyer <kmoy@...gle.com>,
Christian Ludloff <ludloff@...gle.com>,
Andrew Lutomirski <luto@...nel.org>,
Neil Horman <nhorman@...hat.com>,
Nathaniel McCallum <npmccallum@...hat.com>,
Patrick Uiterwijk <puiterwijk@...hat.com>,
David Rientjes <rientjes@...gle.com>,
Thomas Gleixner <tglx@...utronix.de>, yaozhangx@...gle.com,
mikko.ylinen@...el.com
Subject: Re: [PATCH v39 21/24] x86/vdso: Implement a vDSO for Intel SGX
enclave call
On Fri, Oct 2, 2020 at 9:51 PM Jarkko Sakkinen
<jarkko.sakkinen@...ux.intel.com> wrote:
>
> From: Sean Christopherson <sean.j.christopherson@...el.com>
>
> An SGX runtime must be aware of the exceptions, which happen inside an
> enclave. Introduce a vDSO call that wraps EENTER/ERESUME cycle and returns
> the CPU exception back to the caller exactly when it happens.
>
> Kernel fixups the exception information to RDI, RSI and RDX. The SGX call
> vDSO handler fills this information to the user provided buffer or
> alternatively trigger user provided callback at the time of the exception.
>
> The calling convention supports providing the parameters in standard RDI
> RSI, RDX, RCX, R8 and R9 registers, i.e. it is possible to declare the vDSO
> as a C prototype, but other than that there is no specific support for
> SystemV ABI. Storing XSAVE etc. is all responsibility of the enclave and
> the associated run-time.
>
> Suggested-by: Andy Lutomirski <luto@...capital.net>
> Acked-by: Jethro Beekman <jethro@...tanix.com>
> Tested-by: Jethro Beekman <jethro@...tanix.com>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@...el.com>
> Co-developed-by: Cedric Xing <cedric.xing@...el.com>
> Signed-off-by: Cedric Xing <cedric.xing@...el.com>
> Co-developed-by: Jarkko Sakkinen <jarkko.sakkinen@...ux.intel.com>
> Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@...ux.intel.com>
> +SYM_FUNC_START(__vdso_sgx_enter_enclave)
> + /* Prolog */
> + .cfi_startproc
> + push %rbp
> + .cfi_adjust_cfa_offset 8
> + .cfi_rel_offset %rbp, 0
> + mov %rsp, %rbp
> + .cfi_def_cfa_register %rbp
> + push %rbx
> + .cfi_rel_offset %rbx, -8
This *looks* right, but I'm not really an expert.
> +
> + mov %ecx, %eax
> +.Lenter_enclave:
> + /* EENTER <= leaf <= ERESUME */
> + cmp $EENTER, %eax
> + jb .Linvalid_input
> + cmp $ERESUME, %eax
> + ja .Linvalid_input
> +
> + mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rcx
> +
> + /* Validate that the reserved area contains only zeros. */
> + push %rax
> + push %rbx
This could use a .cfi_register_something_or_other for rbx
> + mov $SGX_ENCLAVE_RUN_RESERVED_START, %rbx
> +1:
> + mov (%rcx, %rbx), %rax
> + cmpq $0, %rax
> + jne .Linvalid_input
> +
> + add $8, %rbx
> + cmpq $SGX_ENCLAVE_RUN_RESERVED_END, %rbx
> + jne 1b
> + pop %rbx
This should undo it.
> + pop %rax
> +
> + /* Load TCS and AEP */
> + mov SGX_ENCLAVE_RUN_TCS(%rcx), %rbx
> + lea .Lasync_exit_pointer(%rip), %rcx
> +
> + /* Single ENCLU serving as both EENTER and AEP (ERESUME) */
> +.Lasync_exit_pointer:
> +.Lenclu_eenter_eresume:
> + enclu
> +
> + /* EEXIT jumps here unless the enclave is doing something fancy. */
> + mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
> +
> + /* Set exit_reason. */
> + movl $EEXIT, SGX_ENCLAVE_RUN_LEAF(%rbx)
> +
> + /* Invoke userspace's exit handler if one was provided. */
> +.Lhandle_exit:
> + cmpq $0, SGX_ENCLAVE_RUN_USER_HANDLER(%rbx)
> + jne .Linvoke_userspace_handler
> +
> + /* Success, in the sense that ENCLU was attempted. */
> + xor %eax, %eax
> +
> +.Lout:
> + pop %rbx
and this should undo the .cfi_register.
> + leave
> + .cfi_def_cfa %rsp, 8
> + ret
> +
> + /* The out-of-line code runs with the pre-leave stack frame. */
> + .cfi_def_cfa %rbp, 16
> +
> +.Linvalid_input:
Here rbx and rax are pushed, and I guess pop rbx and leave fixes that
up, so okay.
> + mov $(-EINVAL), %eax
> + jmp .Lout
> +
> +.Lhandle_exception:
> + mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
> +
> + /* Set the exception info. */
> + mov %eax, (SGX_ENCLAVE_RUN_LEAF)(%rbx)
> + mov %di, (SGX_ENCLAVE_RUN_EXCEPTION_VECTOR)(%rbx)
> + mov %si, (SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE)(%rbx)
> + mov %rdx, (SGX_ENCLAVE_RUN_EXCEPTION_ADDR)(%rbx)
> + jmp .Lhandle_exit
> +
> +.Linvoke_userspace_handler:
> + /* Pass the untrusted RSP (at exit) to the callback via %rcx. */
> + mov %rsp, %rcx
> +
> + /* Save struct sgx_enclave_exception %rbx is about to be clobbered. */
> + mov %rbx, %rax
> +
> + /* Save the untrusted RSP offset in %rbx (non-volatile register). */
> + mov %rsp, %rbx
> + and $0xf, %rbx
> +
> + /*
> + * Align stack per x86_64 ABI. Note, %rsp needs to be 16-byte aligned
> + * _after_ pushing the parameters on the stack, hence the bonus push.
> + */
> + and $-0x10, %rsp
> + push %rax
> +
> + /* Push struct sgx_enclave_exception as a param to the callback. */
> + push %rax
> +
> + /* Clear RFLAGS.DF per x86_64 ABI */
> + cld
> +
> + /*
> + * Load the callback pointer to %rax and lfence for LVI (load value
> + * injection) protection before making the call.
> + */
> + mov SGX_ENCLAVE_RUN_USER_HANDLER(%rax), %rax
> + lfence
> + call *%rax
> +
> + /* Undo the post-exit %rsp adjustment. */
> + lea 0x10(%rsp, %rbx), %rsp
> +
> + /*
> + * If the return from callback is zero or negative, return immediately,
> + * else re-execute ENCLU with the postive return value interpreted as
> + * the requested ENCLU leaf.
> + */
> + cmp $0, %eax
> + jle .Lout
> + jmp .Lenter_enclave
> +
> + .cfi_endproc
> +
> +_ASM_VDSO_EXTABLE_HANDLE(.Lenclu_eenter_eresume, .Lhandle_exception)
Powered by blists - more mailing lists