[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <CVHWAYNF4W0Y.12Y1RIQ3M41CL@suppilovahvero>
Date: Wed, 13 Sep 2023 18:30:11 +0300
From: "Jarkko Sakkinen" <jarkko@...nel.org>
To: "Haitao Huang" <haitao.huang@...ux.intel.com>,
<dave.hansen@...ux.intel.com>, <tj@...nel.org>,
<linux-kernel@...r.kernel.org>, <linux-sgx@...r.kernel.org>,
<x86@...nel.org>, <cgroups@...r.kernel.org>, <tglx@...utronix.de>,
<mingo@...hat.com>, <bp@...en8.de>, <hpa@...or.com>,
<sohil.mehta@...el.com>
Cc: <zhiquan1.li@...el.com>, <kristen@...ux.intel.com>,
<seanjc@...gle.com>, <zhanb@...rosoft.com>,
<anakrish@...rosoft.com>, <mikko.ylinen@...ux.intel.com>,
<yangjie@...rosoft.com>
Subject: Re: [PATCH v4 08/18] x86/sgx: Use a list to track to-be-reclaimed
pages
On Wed Sep 13, 2023 at 7:06 AM EEST, Haitao Huang wrote:
> From: Kristen Carlson Accardi <kristen@...ux.intel.com>
>
> Change sgx_reclaim_pages() to use a list rather than an array for
> storing the epc_pages which will be reclaimed. This change is needed
> to transition to the LRU implementation for EPC cgroup support.
>
> When the EPC cgroup is implemented, the reclaiming process will do a
> pre-order tree walk for the subtree starting from the limit-violating
> cgroup. When each node is visited, candidate pages are selected from
> its "reclaimable" LRU list and moved into this temporary list. Passing a
> list from node to node for temporary storage in this walk is more
> straightforward than using an array.
>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@...el.com>
> Signed-off-by: Kristen Carlson Accardi <kristen@...ux.intel.com>
> Signed-off-by: Haitao Huang<haitao.huang@...ux.intel.com>
> Cc: Sean Christopherson <seanjc@...gle.com>
> ---
> V4:
> - Changes needed for patch reordering
> - Revised commit message
>
> V3:
> - Removed list wrappers
> ---
> arch/x86/kernel/cpu/sgx/main.c | 40 +++++++++++++++-------------------
> 1 file changed, 18 insertions(+), 22 deletions(-)
>
> diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
> index c1ae19a154d0..fba06dc5abfe 100644
> --- a/arch/x86/kernel/cpu/sgx/main.c
> +++ b/arch/x86/kernel/cpu/sgx/main.c
> @@ -293,12 +293,11 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page,
> */
> static void sgx_reclaim_pages(void)
> {
> - struct sgx_epc_page *chunk[SGX_NR_TO_SCAN];
> struct sgx_backing backing[SGX_NR_TO_SCAN];
> + struct sgx_epc_page *epc_page, *tmp;
> struct sgx_encl_page *encl_page;
> - struct sgx_epc_page *epc_page;
> pgoff_t page_index;
> - int cnt = 0;
> + LIST_HEAD(iso);
> int ret;
> int i;
>
> @@ -314,18 +313,22 @@ static void sgx_reclaim_pages(void)
>
> if (kref_get_unless_zero(&encl_page->encl->refcount) != 0) {
> sgx_epc_page_set_state(epc_page, SGX_EPC_PAGE_RECLAIM_IN_PROGRESS);
> - chunk[cnt++] = epc_page;
> + list_move_tail(&epc_page->list, &iso);
> } else {
> - /* The owner is freeing the page. No need to add the
> - * page back to the list of reclaimable pages.
> + /* The owner is freeing the page, remove it from the
> + * LRU list
> */
> sgx_epc_page_reset_state(epc_page);
> + list_del_init(&epc_page->list);
> }
> }
> spin_unlock(&sgx_global_lru.lock);
>
> - for (i = 0; i < cnt; i++) {
> - epc_page = chunk[i];
> + if (list_empty(&iso))
> + return;
> +
> + i = 0;
> + list_for_each_entry_safe(epc_page, tmp, &iso, list) {
> encl_page = epc_page->owner;
>
> if (!sgx_reclaimer_age(epc_page))
> @@ -340,6 +343,7 @@ static void sgx_reclaim_pages(void)
> goto skip;
> }
>
> + i++;
> encl_page->desc |= SGX_ENCL_PAGE_BEING_RECLAIMED;
> mutex_unlock(&encl_page->encl->lock);
> continue;
> @@ -347,27 +351,19 @@ static void sgx_reclaim_pages(void)
> skip:
> spin_lock(&sgx_global_lru.lock);
> sgx_epc_page_set_state(epc_page, SGX_EPC_PAGE_RECLAIMABLE);
> - list_add_tail(&epc_page->list, &sgx_global_lru.reclaimable);
> + list_move_tail(&epc_page->list, &sgx_global_lru.reclaimable);
> spin_unlock(&sgx_global_lru.lock);
>
> kref_put(&encl_page->encl->refcount, sgx_encl_release);
> -
> - chunk[i] = NULL;
> - }
> -
> - for (i = 0; i < cnt; i++) {
> - epc_page = chunk[i];
> - if (epc_page)
> - sgx_reclaimer_block(epc_page);
> }
>
> - for (i = 0; i < cnt; i++) {
> - epc_page = chunk[i];
> - if (!epc_page)
> - continue;
> + list_for_each_entry(epc_page, &iso, list)
> + sgx_reclaimer_block(epc_page);
>
> + i = 0;
> + list_for_each_entry_safe(epc_page, tmp, &iso, list) {
> encl_page = epc_page->owner;
> - sgx_reclaimer_write(epc_page, &backing[i]);
> + sgx_reclaimer_write(epc_page, &backing[i++]);
>
> kref_put(&encl_page->encl->refcount, sgx_encl_release);
> sgx_epc_page_reset_state(epc_page);
> --
> 2.25.1
LGTM
BR, Jarkko
Powered by blists - more mailing lists