lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <016c34af-399b-480e-a99c-eaf3e397d33a@intel.com>
Date: Thu, 25 Jul 2024 12:52:05 +1200
From: "Huang, Kai" <kai.huang@...el.com>
To: Dmitrii Kuvaiskii <dmitrii.kuvaiskii@...el.com>,
	<dave.hansen@...ux.intel.com>, <jarkko@...nel.org>,
	<haitao.huang@...ux.intel.com>, <reinette.chatre@...el.com>,
	<linux-sgx@...r.kernel.org>, <linux-kernel@...r.kernel.org>
CC: <mona.vij@...el.com>, <kailun.qin@...el.com>, <stable@...r.kernel.org>,
	Marcelina Koƛcielnicka <mwk@...isiblethingslab.com>
Subject: Re: [PATCH v4 2/3] x86/sgx: Resolve EAUG race where losing thread
 returns SIGBUS



On 5/07/2024 7:45 pm, Dmitrii Kuvaiskii wrote:
> Imagine an mmap()'d file. Two threads touch the same address at the same
> time and fault. Both allocate a physical page and race to install a PTE
> for that page. Only one will win the race. The loser frees its page, but
> still continues handling the fault as a success and returns
> VM_FAULT_NOPAGE from the fault handler.
> 
> The same race can happen with SGX. But there's a bug: the loser in the
> SGX steers into a failure path. The loser EREMOVE's the winner's EPC
> page, then returns SIGBUS, likely killing the app.
> 
> Fix the SGX loser's behavior. Change the return code to VM_FAULT_NOPAGE
> to avoid SIGBUS and call sgx_free_epc_page() which avoids EREMOVE'ing
> the winner's page and only frees the page that the loser allocated.
> 
> The race can be illustrated as follows:
> 
> /*                             /*
>   * Fault on CPU1                * Fault on CPU2
>   * on enclave page X            * on enclave page X
>   */                             */
> sgx_vma_fault() {              sgx_vma_fault() {
> 
>    xa_load(&encl->page_array)     xa_load(&encl->page_array)
>        == NULL -->                    == NULL -->
> 
>    sgx_encl_eaug_page() {         sgx_encl_eaug_page() {
> 
>      ...                            ...
> 
>      /*                             /*
>       * alloc encl_page              * alloc encl_page
>       */                             */
>                                     mutex_lock(&encl->lock);
>                                     /*
>                                      * alloc EPC page
>                                      */
>                                     epc_page = sgx_alloc_epc_page(...);
>                                     /*
>                                      * add page to enclave's xarray
>                                      */
>                                     xa_insert(&encl->page_array, ...);
>                                     /*
>                                      * add page to enclave via EAUG
>                                      * (page is in pending state)
>                                      */
>                                     /*
>                                      * add PTE entry
>                                      */
>                                     vmf_insert_pfn(...);
> 
>                                     mutex_unlock(&encl->lock);
>                                     return VM_FAULT_NOPAGE;
>                                   }
>                                 }
>                                 /*
>                                  * All good up to here: enclave page
>                                  * successfully added to enclave,
>                                  * ready for EACCEPT from user space
>                                  */
>      mutex_lock(&encl->lock);
>      /*
>       * alloc EPC page
>       */
>      epc_page = sgx_alloc_epc_page(...);
>      /*
>       * add page to enclave's xarray,
>       * this fails with -EBUSY as this
>       * page was already added by CPU2
>       */
>      xa_insert(&encl->page_array, ...);

Seems the reason of this issue is we allocate encl_page outside of the 
encl->lock mutex, and the current way to detect "whether the fault has 
been handled by another thread" is by checking whether xa_insert() 
returns -EBUSY -- which ...


> 
>    err_out_shrink:
>      sgx_encl_free_epc_page(epc_page) {
>        /*
>         * remove page via EREMOVE
>         *
>         * *BUG*: page added by CPU2 is
>         * yanked from enclave while it
>         * remains accessible from OS
>         * perspective (PTE installed)
>         */
>        /*
>         * free EPC page
>         */
>        sgx_free_epc_page(epc_page);
>      }
> >      mutex_unlock(&encl->lock);
>      /*
>       * *BUG*: SIGBUS is returned
>       * for a valid enclave page
>       */
>      return VM_FAULT_SIGBUS;
>    }
> }
> 
> Fixes: 5a90d2c3f5ef ("x86/sgx: Support adding of pages to an initialized enclave")
> Cc: stable@...r.kernel.org
> Reported-by: Marcelina Koƛcielnicka <mwk@...isiblethingslab.com>
> Suggested-by: Reinette Chatre <reinette.chatre@...el.com>
> Signed-off-by: Dmitrii Kuvaiskii <dmitrii.kuvaiskii@...el.com>
> Reviewed-by: Haitao Huang <haitao.huang@...ux.intel.com>
> Reviewed-by: Jarkko Sakkinen <jarkko@...nel.org>
> Reviewed-by: Reinette Chatre <reinette.chatre@...el.com>
> ---
>   arch/x86/kernel/cpu/sgx/encl.c | 7 +++++--
>   1 file changed, 5 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
> index c0a3c00284c8..9f7f9e57cdeb 100644
> --- a/arch/x86/kernel/cpu/sgx/encl.c
> +++ b/arch/x86/kernel/cpu/sgx/encl.c
> @@ -380,8 +380,11 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
>   	 * If ret == -EBUSY then page was created in another flow while
>   	 * running without encl->lock
>   	 */
> -	if (ret)
> +	if (ret) {
> +		if (ret == -EBUSY)
> +			vmret = VM_FAULT_NOPAGE;
>   		goto err_out_shrink;
> +	}


... isn't done in the current code despite there's a comment for it

         ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc),
                         encl_page, GFP_KERNEL);
         /*
          * If ret == -EBUSY then page was created in another flow while
          * running without encl->lock
          */
         if (ret)
                 goto err_out_shrink;


And this patch actually does that.

But instead of using xa_insert() to detect such case, where we have done 
bunch of things and needs to revert of all them if xa_insert() fails, 
could we just re-check the encl_page inside the encl->lock and quickly 
mark it as done if another thread has already done the job?

Something like below (build tested only):

diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
index 279148e72459..7bf63d1b047b 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
@@ -339,6 +339,18 @@ static vm_fault_t sgx_encl_eaug_page(struct 
vm_area_struct *vma,
         if (!test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
                 return VM_FAULT_SIGBUS;

+
+       mutex_lock(&encl->lock);
+
+       /*
+        * Multiple threads may try to fault in the same EPC page
+        * concurrently.  Re-check if another thread has already
+        * done that.
+        */
+       encl_page = xa_load(&encl->page_array, PFN_DOWN(addr));
+       if(encl_page)
+               goto done;
+
         /*
          * Ignore internal permission checking for dynamically added pages.
          * They matter only for data added during the pre-initialization
@@ -347,10 +359,10 @@ static vm_fault_t sgx_encl_eaug_page(struct 
vm_area_struct *vma,
          */
         secinfo_flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_X;
         encl_page = sgx_encl_page_alloc(encl, addr - encl->base, 
secinfo_flags);
-       if (IS_ERR(encl_page))
-               return VM_FAULT_OOM;
-
-       mutex_lock(&encl->lock);
+       if (IS_ERR(encl_page)) {
+               vmret = VM_FAULT_OOM;
+               goto err_out;
+       }

         epc_page = sgx_encl_load_secs(encl);
         if (IS_ERR(epc_page)) {
@@ -378,10 +390,6 @@ static vm_fault_t sgx_encl_eaug_page(struct 
vm_area_struct *vma,

         ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc),
                         encl_page, GFP_KERNEL);
-       /*
-        * If ret == -EBUSY then page was created in another flow while
-        * running without encl->lock
-        */
         if (ret)
                 goto err_out_shrink;

@@ -391,7 +399,7 @@ static vm_fault_t sgx_encl_eaug_page(struct 
vm_area_struct *vma,

         ret = __eaug(&pginfo, sgx_get_epc_virt_addr(epc_page));
         if (ret)
-               goto err_out;
+               goto err_out_eaug;

         encl_page->encl = encl;
         encl_page->epc_page = epc_page;
@@ -410,10 +418,11 @@ static vm_fault_t sgx_encl_eaug_page(struct 
vm_area_struct *vma,
                 mutex_unlock(&encl->lock);
                 return VM_FAULT_SIGBUS;
         }
+done:
         mutex_unlock(&encl->lock);
         return VM_FAULT_NOPAGE;

-err_out:
+err_out_eaug:
         xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc));

  err_out_shrink:
@@ -421,9 +430,9 @@ static vm_fault_t sgx_encl_eaug_page(struct 
vm_area_struct *vma,
  err_out_epc:
         sgx_encl_free_epc_page(epc_page);
  err_out_unlock:
-       mutex_unlock(&encl->lock);
         kfree(encl_page);
-
+err_out:
+       mutex_unlock(&encl->lock);
         return vmret;
  }




Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ