[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231030182013.40086-7-haitao.huang@linux.intel.com>
Date: Mon, 30 Oct 2023 11:20:07 -0700
From: Haitao Huang <haitao.huang@...ux.intel.com>
To: jarkko@...nel.org, dave.hansen@...ux.intel.com, tj@...nel.org,
mkoutny@...e.com, linux-kernel@...r.kernel.org,
linux-sgx@...r.kernel.org, x86@...nel.org, cgroups@...r.kernel.org,
tglx@...utronix.de, mingo@...hat.com, bp@...en8.de, hpa@...or.com,
sohil.mehta@...el.com
Cc: zhiquan1.li@...el.com, kristen@...ux.intel.com, seanjc@...gle.com,
zhanb@...rosoft.com, anakrish@...rosoft.com,
mikko.ylinen@...ux.intel.com, yangjie@...rosoft.com,
Sean Christopherson <sean.j.christopherson@...el.com>,
Haitao Huang <haitao.huang@...ux.intel.com>
Subject: [PATCH v6 06/12] x86/sgx: Use sgx_epc_lru_list for existing active page list
From: Sean Christopherson <sean.j.christopherson@...el.com>
In future each cgroup needs a LRU list to track reclaimable pages. For
now just replace the existing sgx_active_page_list in the reclaimer and
its spinlock with a global sgx_epc_lru_list struct.
Signed-off-by: Sean Christopherson <sean.j.christopherson@...el.com>
Co-developed-by: Kristen Carlson Accardi <kristen@...ux.intel.com>
Signed-off-by: Kristen Carlson Accardi <kristen@...ux.intel.com>
Co-developed-by: Haitao Huang <haitao.huang@...ux.intel.com>
Signed-off-by: Haitao Huang <haitao.huang@...ux.intel.com>
Cc: Sean Christopherson <seanjc@...gle.com>
---
V5:
- Spelled out SECS, VA (Jarkko)
V4:
- No change, only reordered the patch.
V3:
- Remove usage of list wrapper
---
arch/x86/kernel/cpu/sgx/main.c | 39 +++++++++++++++++-----------------
1 file changed, 20 insertions(+), 19 deletions(-)
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 07606f391540..d347acd717fd 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -28,10 +28,9 @@ static DEFINE_XARRAY(sgx_epc_address_space);
/*
* These variables are part of the state of the reclaimer, and must be accessed
- * with sgx_reclaimer_lock acquired.
+ * with sgx_global_lru.lock acquired.
*/
-static LIST_HEAD(sgx_active_page_list);
-static DEFINE_SPINLOCK(sgx_reclaimer_lock);
+static struct sgx_epc_lru_list sgx_global_lru;
static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0);
@@ -306,13 +305,13 @@ static void sgx_reclaim_pages(void)
int ret;
int i;
- spin_lock(&sgx_reclaimer_lock);
+ spin_lock(&sgx_global_lru.lock);
for (i = 0; i < SGX_NR_TO_SCAN; i++) {
- if (list_empty(&sgx_active_page_list))
+ epc_page = list_first_entry_or_null(&sgx_global_lru.reclaimable,
+ struct sgx_epc_page, list);
+ if (!epc_page)
break;
- epc_page = list_first_entry(&sgx_active_page_list,
- struct sgx_epc_page, list);
list_del_init(&epc_page->list);
encl_page = epc_page->owner;
@@ -324,7 +323,7 @@ static void sgx_reclaim_pages(void)
*/
epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
}
- spin_unlock(&sgx_reclaimer_lock);
+ spin_unlock(&sgx_global_lru.lock);
for (i = 0; i < cnt; i++) {
epc_page = chunk[i];
@@ -347,9 +346,9 @@ static void sgx_reclaim_pages(void)
continue;
skip:
- spin_lock(&sgx_reclaimer_lock);
- list_add_tail(&epc_page->list, &sgx_active_page_list);
- spin_unlock(&sgx_reclaimer_lock);
+ spin_lock(&sgx_global_lru.lock);
+ list_add_tail(&epc_page->list, &sgx_global_lru.reclaimable);
+ spin_unlock(&sgx_global_lru.lock);
kref_put(&encl_page->encl->refcount, sgx_encl_release);
@@ -380,7 +379,7 @@ static void sgx_reclaim_pages(void)
static bool sgx_should_reclaim(unsigned long watermark)
{
return atomic_long_read(&sgx_nr_free_pages) < watermark &&
- !list_empty(&sgx_active_page_list);
+ !list_empty(&sgx_global_lru.reclaimable);
}
/*
@@ -432,6 +431,8 @@ static bool __init sgx_page_reclaimer_init(void)
ksgxd_tsk = tsk;
+ sgx_lru_init(&sgx_global_lru);
+
return true;
}
@@ -507,10 +508,10 @@ struct sgx_epc_page *__sgx_alloc_epc_page(void)
*/
void sgx_mark_page_reclaimable(struct sgx_epc_page *page)
{
- spin_lock(&sgx_reclaimer_lock);
+ spin_lock(&sgx_global_lru.lock);
page->flags |= SGX_EPC_PAGE_RECLAIMER_TRACKED;
- list_add_tail(&page->list, &sgx_active_page_list);
- spin_unlock(&sgx_reclaimer_lock);
+ list_add_tail(&page->list, &sgx_global_lru.reclaimable);
+ spin_unlock(&sgx_global_lru.lock);
}
/**
@@ -525,18 +526,18 @@ void sgx_mark_page_reclaimable(struct sgx_epc_page *page)
*/
int sgx_unmark_page_reclaimable(struct sgx_epc_page *page)
{
- spin_lock(&sgx_reclaimer_lock);
+ spin_lock(&sgx_global_lru.lock);
if (page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED) {
/* The page is being reclaimed. */
if (list_empty(&page->list)) {
- spin_unlock(&sgx_reclaimer_lock);
+ spin_unlock(&sgx_global_lru.lock);
return -EBUSY;
}
list_del(&page->list);
page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
}
- spin_unlock(&sgx_reclaimer_lock);
+ spin_unlock(&sgx_global_lru.lock);
return 0;
}
@@ -574,7 +575,7 @@ struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim)
break;
}
- if (list_empty(&sgx_active_page_list))
+ if (list_empty(&sgx_global_lru.reclaimable))
return ERR_PTR(-ENOMEM);
if (!reclaim) {
--
2.25.1
Powered by blists - more mailing lists