[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220906031230.107108-1-jarkko@kernel.org>
Date: Tue, 6 Sep 2022 06:12:30 +0300
From: Jarkko Sakkinen <jarkko@...nel.org>
To: linux-sgx@...r.kernel.org
Cc: Haitao Huang <haitao.huang@...ux.intel.com>,
Vijay Dhanraj <vijay.dhanraj@...el.com>,
Reinette Chatre <reinette.chatre@...el.com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Kai Huang <kai.huang@...el.com>,
Jarkko Sakkinen <jarkko@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
x86@...nel.org (maintainer:X86 ARCHITECTURE (32-BIT AND 64-BIT)),
"H. Peter Anvin" <hpa@...or.com>,
linux-kernel@...r.kernel.org (open list:X86 ARCHITECTURE (32-BIT AND
64-BIT))
Subject: [PATCH RFC] x86/sgx: Use a heap allocated list head for unsanitized pages
Allocate the list head for the unsanitized pages from heap, and transfer
its to ownership to ksgxd, which takes care of destroying it. Remove
sgx_dirty_page_list, as a global list is no longer required.
Signed-off-by: Jarkko Sakkinen <jarkko@...nel.org>
---
Depends on https://lore.kernel.org/linux-sgx/20220906000221.34286-1-jarkko@kernel.org/T/#t
Would this be plausible?
---
arch/x86/kernel/cpu/sgx/main.c | 44 ++++++++++++++++++++++++----------
1 file changed, 31 insertions(+), 13 deletions(-)
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 0aad028f04d4..6d0e38078d28 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -43,8 +43,6 @@ static nodemask_t sgx_numa_mask;
*/
static struct sgx_numa_node *sgx_numa_nodes;
-static LIST_HEAD(sgx_dirty_page_list);
-
/*
* Reset post-kexec EPC pages to the uninitialized state. The pages are removed
* from the input list, and made available for the page allocator. SECS pages
@@ -392,16 +390,23 @@ void sgx_reclaim_direct(void)
sgx_reclaim_pages();
}
-static int ksgxd(void *p)
+/*
+ * The page list head must be allocated from the heap, and its ownership is
+ * transferred to ksgxd, which takes care of destroying it.
+ */
+static int ksgxd(void *page_list_ptr)
{
+ struct list_head *page_list = page_list_ptr;
+
set_freezable();
/*
* Sanitize pages in order to recover from kexec(). The 2nd pass is
* required for SECS pages, whose child pages blocked EREMOVE.
*/
- __sgx_sanitize_pages(&sgx_dirty_page_list);
- WARN_ON(__sgx_sanitize_pages(&sgx_dirty_page_list));
+ __sgx_sanitize_pages(page_list);
+ WARN_ON(__sgx_sanitize_pages(page_list));
+ kfree(page_list);
while (!kthread_should_stop()) {
if (try_to_freeze())
@@ -420,11 +425,11 @@ static int ksgxd(void *p)
return 0;
}
-static bool __init sgx_page_reclaimer_init(void)
+static bool __init sgx_page_reclaimer_init(struct list_head *page_list)
{
struct task_struct *tsk;
- tsk = kthread_run(ksgxd, NULL, "ksgxd");
+ tsk = kthread_run(ksgxd, page_list, "ksgxd");
if (IS_ERR(tsk))
return false;
@@ -619,7 +624,8 @@ void sgx_free_epc_page(struct sgx_epc_page *page)
static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
unsigned long index,
- struct sgx_epc_section *section)
+ struct sgx_epc_section *section,
+ struct list_head *page_list)
{
unsigned long nr_pages = size >> PAGE_SHIFT;
unsigned long i;
@@ -643,7 +649,7 @@ static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
section->pages[i].flags = 0;
section->pages[i].owner = NULL;
section->pages[i].poison = 0;
- list_add_tail(§ion->pages[i].list, &sgx_dirty_page_list);
+ list_add_tail(§ion->pages[i].list, page_list);
}
return true;
@@ -784,7 +790,7 @@ static void __init arch_update_sysfs_visibility(int nid)
static void __init arch_update_sysfs_visibility(int nid) {}
#endif
-static bool __init sgx_page_cache_init(void)
+static bool __init sgx_page_cache_init(struct list_head *page_list)
{
u32 eax, ebx, ecx, edx, type;
u64 pa, size;
@@ -812,7 +818,7 @@ static bool __init sgx_page_cache_init(void)
pr_info("EPC section 0x%llx-0x%llx\n", pa, pa + size - 1);
- if (!sgx_setup_epc_section(pa, size, i, &sgx_epc_sections[i])) {
+ if (!sgx_setup_epc_section(pa, size, i, &sgx_epc_sections[i], page_list)) {
pr_err("No free memory for an EPC section\n");
break;
}
@@ -912,20 +918,32 @@ EXPORT_SYMBOL_GPL(sgx_set_attribute);
static int __init sgx_init(void)
{
+ struct list_head *page_list;
int ret;
int i;
if (!cpu_feature_enabled(X86_FEATURE_SGX))
return -ENODEV;
- if (!sgx_page_cache_init())
+ page_list = kzalloc(sizeof(*page_list), GFP_KERNEL);
+ if (!page_list)
return -ENOMEM;
- if (!sgx_page_reclaimer_init()) {
+ INIT_LIST_HEAD(page_list);
+
+ if (!sgx_page_cache_init(page_list)) {
+ kfree(page_list);
+ return -ENOMEM;
+ }
+
+ if (!sgx_page_reclaimer_init(page_list)) {
+ kfree(page_list);
ret = -ENOMEM;
goto err_page_cache;
}
+ /* page_list is now owned by ksgxd. */
+
ret = misc_register(&sgx_dev_provision);
if (ret)
goto err_kthread;
--
2.37.2
Powered by blists - more mailing lists