lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210221020631.171404-1-jarkko@kernel.org>
Date:   Sun, 21 Feb 2021 04:06:30 +0200
From:   Jarkko Sakkinen <jarkko@...nel.org>
To:     linux-sgx@...r.kernel.org
Cc:     haitao.huang@...el.com, dan.j.williams@...el.com,
        Jarkko Sakkinen <jarkko@...nel.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
        x86@...nel.org, "H. Peter Anvin" <hpa@...or.com>,
        Dave Hansen <dave.hansen@...ux.intel.com>,
        linux-kernel@...r.kernel.org
Subject: [PATCH] x86/sgx: Add a basic NUMA allocation scheme to sgx_alloc_epc_page()

Background
==========

EPC section is covered by one or more SRAT entries that are associated with
one and only one PXM (NUMA node). The motivation behind this patch is to
provide basic elements of building allocation scheme based on this premise.

It does not try to fully address NUMA. For instance, it does not provide
integration to the mempolicy API, but neither does introduce any
bottlenecks to address this later on. Memory allocation is a complex topic,
and thus it's better to start with baby steps.

Solution
========

Use phys_to_target_node() to associate each NUMA node with the EPC sections
contained within its range.

In sgx_alloc_epc_page(), first try to allocate from the NUMA node, where
the CPU is executing. If that fails, fallback to the legacy allocation.

Link: https://lore.kernel.org/lkml/158188326978.894464.217282995221175417.stgit@dwillia2-desk3.amr.corp.intel.com/
Signed-off-by: Jarkko Sakkinen <jarkko@...nel.org>
---
 arch/x86/Kconfig               |  1 +
 arch/x86/kernel/cpu/sgx/main.c | 52 +++++++++++++++++++++++++++++++++-
 arch/x86/kernel/cpu/sgx/sgx.h  |  1 +
 3 files changed, 53 insertions(+), 1 deletion(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 21f851179ff0..dcb73a5edf63 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1941,6 +1941,7 @@ config X86_SGX
 	depends on CRYPTO_SHA256=y
 	select SRCU
 	select MMU_NOTIFIER
+	select NUMA_KEEP_MEMINFO if NUMA
 	help
 	  Intel(R) Software Guard eXtensions (SGX) is a set of CPU instructions
 	  that can be used by applications to set aside private regions of code
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 8df81a3ed945..21addedc5240 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -18,6 +18,12 @@ static int sgx_nr_epc_sections;
 static struct task_struct *ksgxd_tsk;
 static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq);
 
+/* Nodes with one or more EPC sections. */
+static nodemask_t sgx_numa_mask;
+
+/* Array of lists of EPC sections for each NUMA node. */
+struct list_head *sgx_numa_nodes;
+
 /*
  * These variables are part of the state of the reclaimer, and must be accessed
  * with sgx_reclaimer_lock acquired.
@@ -473,6 +479,26 @@ static struct sgx_epc_page *__sgx_alloc_epc_page_from_section(struct sgx_epc_sec
 	return page;
 }
 
+static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid)
+{
+	struct sgx_epc_section *section;
+	struct sgx_epc_page *page;
+
+	if (WARN_ON_ONCE(nid < 0 || nid >= MAX_NUMNODES))
+		return NULL;
+
+	if (!node_isset(nid, sgx_numa_mask))
+		return NULL;
+
+	list_for_each_entry(section, &sgx_numa_nodes[nid], section_list) {
+		page = __sgx_alloc_epc_page_from_section(section);
+		if (page)
+			return page;
+	}
+
+	return NULL;
+}
+
 /**
  * __sgx_alloc_epc_page() - Allocate an EPC page
  *
@@ -485,13 +511,17 @@ static struct sgx_epc_page *__sgx_alloc_epc_page_from_section(struct sgx_epc_sec
  */
 struct sgx_epc_page *__sgx_alloc_epc_page(void)
 {
+	int current_nid = numa_node_id();
 	struct sgx_epc_section *section;
 	struct sgx_epc_page *page;
 	int i;
 
+	page = __sgx_alloc_epc_page_from_node(current_nid);
+	if (page)
+		return page;
+
 	for (i = 0; i < sgx_nr_epc_sections; i++) {
 		section = &sgx_epc_sections[i];
-
 		page = __sgx_alloc_epc_page_from_section(section);
 		if (page)
 			return page;
@@ -665,8 +695,12 @@ static bool __init sgx_page_cache_init(void)
 {
 	u32 eax, ebx, ecx, edx, type;
 	u64 pa, size;
+	int nid;
 	int i;
 
+	nodes_clear(sgx_numa_mask);
+	sgx_numa_nodes = kmalloc_array(MAX_NUMNODES, sizeof(*sgx_numa_nodes), GFP_KERNEL);
+
 	for (i = 0; i < ARRAY_SIZE(sgx_epc_sections); i++) {
 		cpuid_count(SGX_CPUID, i + SGX_CPUID_EPC, &eax, &ebx, &ecx, &edx);
 
@@ -690,6 +724,22 @@ static bool __init sgx_page_cache_init(void)
 		}
 
 		sgx_nr_epc_sections++;
+
+		nid = numa_map_to_online_node(phys_to_target_node(pa));
+
+		if (nid == NUMA_NO_NODE) {
+			pr_err(FW_BUG "unable to map EPC section %d to online node.\n", nid);
+			nid = 0;
+		} else if (WARN_ON_ONCE(nid < 0 || nid >= MAX_NUMNODES)) {
+			nid = 0;
+		}
+
+		if (!node_isset(nid, sgx_numa_mask)) {
+			INIT_LIST_HEAD(&sgx_numa_nodes[nid]);
+			node_set(nid, sgx_numa_mask);
+		}
+
+		list_add_tail(&sgx_epc_sections[i].section_list, &sgx_numa_nodes[nid]);
 	}
 
 	if (!sgx_nr_epc_sections) {
diff --git a/arch/x86/kernel/cpu/sgx/sgx.h b/arch/x86/kernel/cpu/sgx/sgx.h
index 5fa42d143feb..4bc31bc4bacf 100644
--- a/arch/x86/kernel/cpu/sgx/sgx.h
+++ b/arch/x86/kernel/cpu/sgx/sgx.h
@@ -45,6 +45,7 @@ struct sgx_epc_section {
 	spinlock_t lock;
 	struct list_head page_list;
 	unsigned long free_cnt;
+	struct list_head section_list;
 
 	/*
 	 * Pages which need EREMOVE run on them before they can be
-- 
2.30.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ