[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220922171057.1236139-5-kristen@linux.intel.com>
Date: Thu, 22 Sep 2022 10:10:41 -0700
From: Kristen Carlson Accardi <kristen@...ux.intel.com>
To: linux-kernel@...r.kernel.org, linux-sgx@...r.kernel.org,
cgroups@...r.kernel.org, Jarkko Sakkinen <jarkko@...nel.org>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
x86@...nel.org, "H. Peter Anvin" <hpa@...or.com>
Cc: Kristen Carlson Accardi <kristen@...ux.intel.com>,
Sean Christopherson <seanjc@...gle.com>
Subject: [RFC PATCH 04/20] x86/sgx: Add 'struct sgx_epc_lru' to encapsulate lru list(s)
From: Sean Christopherson <sean.j.christopherson@...el.com>
Wrap the existing reclaimable list and its spinlock in a struct to
minimize the code changes needed to handle multiple LRUs as well as
reclaimable and non-reclaimable lists, both of which will be introduced
and used by SGX EPC cgroups.
Signed-off-by: Sean Christopherson <sean.j.christopherson@...el.com>
Signed-off-by: Kristen Carlson Accardi <kristen@...ux.intel.com>
Cc: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/kernel/cpu/sgx/main.c | 37 +++++++++++++++++-----------------
arch/x86/kernel/cpu/sgx/sgx.h | 11 ++++++++++
2 files changed, 30 insertions(+), 18 deletions(-)
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 4cdeb915dc86..af68dc1c677b 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -26,10 +26,9 @@ static DEFINE_XARRAY(sgx_epc_address_space);
/*
* These variables are part of the state of the reclaimer, and must be accessed
- * with sgx_reclaimer_lock acquired.
+ * with sgx_global_lru.lock acquired.
*/
-static LIST_HEAD(sgx_active_page_list);
-static DEFINE_SPINLOCK(sgx_reclaimer_lock);
+static struct sgx_epc_lru sgx_global_lru;
static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0);
@@ -298,12 +297,12 @@ static void sgx_reclaim_pages(void)
int ret;
int i;
- spin_lock(&sgx_reclaimer_lock);
+ spin_lock(&sgx_global_lru.lock);
for (i = 0; i < SGX_NR_TO_SCAN; i++) {
- if (list_empty(&sgx_active_page_list))
+ if (list_empty(&sgx_global_lru.reclaimable))
break;
- epc_page = list_first_entry(&sgx_active_page_list,
+ epc_page = list_first_entry(&sgx_global_lru.reclaimable,
struct sgx_epc_page, list);
list_del_init(&epc_page->list);
encl_page = epc_page->owner;
@@ -316,7 +315,7 @@ static void sgx_reclaim_pages(void)
*/
epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
}
- spin_unlock(&sgx_reclaimer_lock);
+ spin_unlock(&sgx_global_lru.lock);
for (i = 0; i < cnt; i++) {
epc_page = chunk[i];
@@ -339,9 +338,9 @@ static void sgx_reclaim_pages(void)
continue;
skip:
- spin_lock(&sgx_reclaimer_lock);
- list_add_tail(&epc_page->list, &sgx_active_page_list);
- spin_unlock(&sgx_reclaimer_lock);
+ spin_lock(&sgx_global_lru.lock);
+ list_add_tail(&epc_page->list, &sgx_global_lru.reclaimable);
+ spin_unlock(&sgx_global_lru.lock);
kref_put(&encl_page->encl->refcount, sgx_encl_release);
@@ -374,7 +373,7 @@ static void sgx_reclaim_pages(void)
static bool sgx_should_reclaim(unsigned long watermark)
{
return atomic_long_read(&sgx_nr_free_pages) < watermark &&
- !list_empty(&sgx_active_page_list);
+ !list_empty(&sgx_global_lru.reclaimable);
}
/*
@@ -427,6 +426,8 @@ static bool __init sgx_page_reclaimer_init(void)
ksgxd_tsk = tsk;
+ sgx_lru_init(&sgx_global_lru);
+
return true;
}
@@ -502,10 +503,10 @@ struct sgx_epc_page *__sgx_alloc_epc_page(void)
*/
void sgx_mark_page_reclaimable(struct sgx_epc_page *page)
{
- spin_lock(&sgx_reclaimer_lock);
+ spin_lock(&sgx_global_lru.lock);
page->flags |= SGX_EPC_PAGE_RECLAIMER_TRACKED;
- list_add_tail(&page->list, &sgx_active_page_list);
- spin_unlock(&sgx_reclaimer_lock);
+ list_add_tail(&page->list, &sgx_global_lru.reclaimable);
+ spin_unlock(&sgx_global_lru.lock);
}
/**
@@ -520,18 +521,18 @@ void sgx_mark_page_reclaimable(struct sgx_epc_page *page)
*/
int sgx_unmark_page_reclaimable(struct sgx_epc_page *page)
{
- spin_lock(&sgx_reclaimer_lock);
+ spin_lock(&sgx_global_lru.lock);
if (page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED) {
/* The page is being reclaimed. */
if (list_empty(&page->list)) {
- spin_unlock(&sgx_reclaimer_lock);
+ spin_unlock(&sgx_global_lru.lock);
return -EBUSY;
}
list_del(&page->list);
page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
}
- spin_unlock(&sgx_reclaimer_lock);
+ spin_unlock(&sgx_global_lru.lock);
return 0;
}
@@ -564,7 +565,7 @@ struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim)
break;
}
- if (list_empty(&sgx_active_page_list))
+ if (list_empty(&sgx_global_lru.reclaimable))
return ERR_PTR(-ENOMEM);
if (!reclaim) {
diff --git a/arch/x86/kernel/cpu/sgx/sgx.h b/arch/x86/kernel/cpu/sgx/sgx.h
index 5a7e858a8f98..7b208ee8eb45 100644
--- a/arch/x86/kernel/cpu/sgx/sgx.h
+++ b/arch/x86/kernel/cpu/sgx/sgx.h
@@ -83,6 +83,17 @@ static inline void *sgx_get_epc_virt_addr(struct sgx_epc_page *page)
return section->virt_addr + index * PAGE_SIZE;
}
+struct sgx_epc_lru {
+ spinlock_t lock;
+ struct list_head reclaimable;
+};
+
+static inline void sgx_lru_init(struct sgx_epc_lru *lru)
+{
+ spin_lock_init(&lru->lock);
+ INIT_LIST_HEAD(&lru->reclaimable);
+}
+
struct sgx_epc_page *__sgx_alloc_epc_page(void);
void sgx_free_epc_page(struct sgx_epc_page *page);
--
2.37.3
Powered by blists - more mailing lists