[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221202183655.3767674-4-kristen@linux.intel.com>
Date: Fri, 2 Dec 2022 10:36:39 -0800
From: Kristen Carlson Accardi <kristen@...ux.intel.com>
To: jarkko@...nel.org, dave.hansen@...ux.intel.com, tj@...nel.org,
linux-kernel@...r.kernel.org, linux-sgx@...r.kernel.org,
cgroups@...r.kernel.org, Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
x86@...nel.org, "H. Peter Anvin" <hpa@...or.com>
Cc: zhiquan1.li@...el.com,
Kristen Carlson Accardi <kristen@...ux.intel.com>,
Sean Christopherson <seanjc@...gle.com>
Subject: [PATCH v2 03/18] x86/sgx: Add 'struct sgx_epc_lru_lists' to encapsulate lru list(s)
Introduce a data structure to wrap the existing reclaimable list
and its spinlock in a struct to minimize the code changes needed
to handle multiple LRUs as well as reclaimable and non-reclaimable
lists, both of which will be introduced and used by SGX EPC cgroups.
Signed-off-by: Sean Christopherson <sean.j.christopherson@...el.com>
Signed-off-by: Kristen Carlson Accardi <kristen@...ux.intel.com>
Cc: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/kernel/cpu/sgx/sgx.h | 65 +++++++++++++++++++++++++++++++++++
1 file changed, 65 insertions(+)
diff --git a/arch/x86/kernel/cpu/sgx/sgx.h b/arch/x86/kernel/cpu/sgx/sgx.h
index 39cb15a8abcb..5e6d88438fae 100644
--- a/arch/x86/kernel/cpu/sgx/sgx.h
+++ b/arch/x86/kernel/cpu/sgx/sgx.h
@@ -90,6 +90,71 @@ static inline void *sgx_get_epc_virt_addr(struct sgx_epc_page *page)
return section->virt_addr + index * PAGE_SIZE;
}
+/*
+ * This data structure wraps a list of reclaimable EPC pages, and a list of
+ * non-reclaimable EPC pages and is used to implement a LRU policy during
+ * reclamation.
+ */
+struct sgx_epc_lru_lists {
+ spinlock_t lock;
+ struct list_head reclaimable;
+ struct list_head unreclaimable;
+};
+
+static inline void sgx_lru_init(struct sgx_epc_lru_lists *lrus)
+{
+ spin_lock_init(&lrus->lock);
+ INIT_LIST_HEAD(&lrus->reclaimable);
+ INIT_LIST_HEAD(&lrus->unreclaimable);
+}
+
+/*
+ * Must be called with queue lock acquired
+ */
+static inline void __sgx_epc_page_list_push(struct list_head *list, struct sgx_epc_page *page)
+{
+ list_add_tail(&page->list, list);
+}
+
+/*
+ * Must be called with queue lock acquired
+ */
+static inline struct sgx_epc_page * __sgx_epc_page_list_pop(struct list_head *list)
+{
+ struct sgx_epc_page *epc_page;
+
+ if (list_empty(list))
+ return NULL;
+
+ epc_page = list_first_entry(list, struct sgx_epc_page, list);
+ list_del_init(&epc_page->list);
+ return epc_page;
+}
+
+static inline struct sgx_epc_page *
+sgx_epc_pop_reclaimable(struct sgx_epc_lru_lists *lrus)
+{
+ return __sgx_epc_page_list_pop(&(lrus)->reclaimable);
+}
+
+static inline void sgx_epc_push_reclaimable(struct sgx_epc_lru_lists *lrus,
+ struct sgx_epc_page *page)
+{
+ __sgx_epc_page_list_push(&(lrus)->reclaimable, page);
+}
+
+static inline struct sgx_epc_page *
+sgx_epc_pop_unreclaimable(struct sgx_epc_lru_lists *lrus)
+{
+ return __sgx_epc_page_list_pop(&(lrus)->unreclaimable);
+}
+
+static inline void sgx_epc_push_unreclaimable(struct sgx_epc_lru_lists *lrus,
+ struct sgx_epc_page *page)
+{
+ __sgx_epc_page_list_push(&(lrus)->unreclaimable, page);
+}
+
struct sgx_epc_page *__sgx_alloc_epc_page(void);
void sgx_free_epc_page(struct sgx_epc_page *page);
--
2.38.1
Powered by blists - more mailing lists