lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <bf2fcc93babdbf541fffc6cc5f5756f391773a75.camel@kernel.org>
Date:   Thu, 14 Apr 2022 14:18:12 +0300
From:   Jarkko Sakkinen <jarkko@...nel.org>
To:     Reinette Chatre <reinette.chatre@...el.com>,
        dave.hansen@...ux.intel.com, tglx@...utronix.de, bp@...en8.de,
        luto@...nel.org, mingo@...hat.com, linux-sgx@...r.kernel.org,
        x86@...nel.org, shuah@...nel.org, linux-kselftest@...r.kernel.org
Cc:     seanjc@...gle.com, kai.huang@...el.com, cathy.zhang@...el.com,
        cedric.xing@...el.com, haitao.huang@...el.com,
        mark.shanahan@...el.com, vijay.dhanraj@...el.com, hpa@...or.com,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH V4 14/31] x86/sgx: Support VA page allocation without
 reclaiming

On Wed, 2022-04-13 at 14:10 -0700, Reinette Chatre wrote:
> struct sgx_encl should be protected with the mutex
> sgx_encl->lock. One exception is sgx_encl->page_cnt that
> is incremented (in sgx_encl_grow()) when an enclave page
> is added to the enclave. The reason the mutex is not held
> is to allow the reclaimer to be called directly if there are
> no EPC pages (in support of a new VA page) available at the time.
> 
> Incrementing sgx_encl->page_cnt without sgc_encl->lock held
> is currently (before SGX2) safe from concurrent updates because
> all paths in which sgx_encl_grow() is called occur before
> enclave initialization and are protected with an atomic
> operation on SGX_ENCL_IOCTL.
> 
> SGX2 includes support for dynamically adding pages after
> enclave initialization where the protection of SGX_ENCL_IOCTL
> is not available.
> 
> Make direct reclaim of EPC pages optional when new VA pages
> are added to the enclave. Essentially the existing "reclaim"
> flag used when regular EPC pages are added to an enclave
> becomes available to the caller when used to allocate VA pages
> instead of always being "true".
> 
> When adding pages without invoking the reclaimer it is possible
> to do so with sgx_encl->lock held, gaining its protection against
> concurrent updates to sgx_encl->page_cnt after enclave
> initialization.
> 
> No functional change.
> 
> Reported-by: Haitao Huang <haitao.huang@...el.com>
> Tested-by: Haitao Huang <haitao.huang@...el.com>
> Signed-off-by: Reinette Chatre <reinette.chatre@...el.com>

Reviewed-by: Jarkko Sakkinen <jarkko@...nel.org>

Nit: I don't think tested-by is in the right patch here. Maybe
Haitao's tested-by should be moved into patch that actually adds
support for EAUG? Not something I would NAK this patch, just
wondering...

> ---
> Changes since V3:
> - New patch prompted by Haitao encountering the
>   WARN_ON_ONCE(encl->page_cnt % SGX_VA_SLOT_COUNT)
>   within sgx_encl_grow() during his SGX2 multi-threaded
>   unit tests.
> 
>  arch/x86/kernel/cpu/sgx/encl.c  | 6 ++++--
>  arch/x86/kernel/cpu/sgx/encl.h  | 4 ++--
>  arch/x86/kernel/cpu/sgx/ioctl.c | 8 ++++----
>  3 files changed, 10 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
> index 546423753e4c..92516aeca405 100644
> --- a/arch/x86/kernel/cpu/sgx/encl.c
> +++ b/arch/x86/kernel/cpu/sgx/encl.c
> @@ -869,6 +869,8 @@ void sgx_zap_enclave_ptes(struct sgx_encl *encl, unsigned long addr)
>  
>  /**
>   * sgx_alloc_va_page() - Allocate a Version Array (VA) page
> + * @reclaim: Reclaim EPC pages directly if none available. Enclave
> + *           mutex should not be held if this is set.
>   *
>   * Allocate a free EPC page and convert it to a Version Array (VA) page.
>   *
> @@ -876,12 +878,12 @@ void sgx_zap_enclave_ptes(struct sgx_encl *encl, unsigned long addr)
>   *   a VA page,
>   *   -errno otherwise
>   */
> -struct sgx_epc_page *sgx_alloc_va_page(void)
> +struct sgx_epc_page *sgx_alloc_va_page(bool reclaim)
>  {
>         struct sgx_epc_page *epc_page;
>         int ret;
>  
> -       epc_page = sgx_alloc_epc_page(NULL, true);
> +       epc_page = sgx_alloc_epc_page(NULL, reclaim);
>         if (IS_ERR(epc_page))
>                 return ERR_CAST(epc_page);
>  
> diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
> index 253ebdd1c5be..66adb8faec45 100644
> --- a/arch/x86/kernel/cpu/sgx/encl.h
> +++ b/arch/x86/kernel/cpu/sgx/encl.h
> @@ -116,14 +116,14 @@ struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl,
>                                           unsigned long offset,
>                                           u64 secinfo_flags);
>  void sgx_zap_enclave_ptes(struct sgx_encl *encl, unsigned long addr);
> -struct sgx_epc_page *sgx_alloc_va_page(void);
> +struct sgx_epc_page *sgx_alloc_va_page(bool reclaim);
>  unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page);
>  void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset);
>  bool sgx_va_page_full(struct sgx_va_page *va_page);
>  void sgx_encl_free_epc_page(struct sgx_epc_page *page);
>  struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
>                                          unsigned long addr);
> -struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl);
> +struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl, bool reclaim);
>  void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page);
>  
>  #endif /* _X86_ENCL_H */
> diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
> index bb8cdb2ad0d1..5d41aa204761 100644
> --- a/arch/x86/kernel/cpu/sgx/ioctl.c
> +++ b/arch/x86/kernel/cpu/sgx/ioctl.c
> @@ -17,7 +17,7 @@
>  #include "encl.h"
>  #include "encls.h"
>  
> -struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl)
> +struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl, bool reclaim)
>  {
>         struct sgx_va_page *va_page = NULL;
>         void *err;
> @@ -30,7 +30,7 @@ struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl)
>                 if (!va_page)
>                         return ERR_PTR(-ENOMEM);
>  
> -               va_page->epc_page = sgx_alloc_va_page();
> +               va_page->epc_page = sgx_alloc_va_page(reclaim);
>                 if (IS_ERR(va_page->epc_page)) {
>                         err = ERR_CAST(va_page->epc_page);
>                         kfree(va_page);
> @@ -64,7 +64,7 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
>         struct file *backing;
>         long ret;
>  
> -       va_page = sgx_encl_grow(encl);
> +       va_page = sgx_encl_grow(encl, true);
>         if (IS_ERR(va_page))
>                 return PTR_ERR(va_page);
>         else if (va_page)
> @@ -275,7 +275,7 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src,
>                 return PTR_ERR(epc_page);
>         }
>  
> -       va_page = sgx_encl_grow(encl);
> +       va_page = sgx_encl_grow(encl, true);
>         if (IS_ERR(va_page)) {
>                 ret = PTR_ERR(va_page);
>                 goto err_out_free;


BR, Jarkko

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ