lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <f3d83dbf77500433986cdfb649bf603ea9031961.1748594840.git.libo.gcs85@bytedance.com>
Date: Fri, 30 May 2025 17:27:31 +0800
From: Bo Li <libo.gcs85@...edance.com>
To: tglx@...utronix.de,
	mingo@...hat.com,
	bp@...en8.de,
	dave.hansen@...ux.intel.com,
	x86@...nel.org,
	luto@...nel.org,
	kees@...nel.org,
	akpm@...ux-foundation.org,
	david@...hat.com,
	juri.lelli@...hat.com,
	vincent.guittot@...aro.org,
	peterz@...radead.org
Cc: dietmar.eggemann@....com,
	hpa@...or.com,
	acme@...nel.org,
	namhyung@...nel.org,
	mark.rutland@....com,
	alexander.shishkin@...ux.intel.com,
	jolsa@...nel.org,
	irogers@...gle.com,
	adrian.hunter@...el.com,
	kan.liang@...ux.intel.com,
	viro@...iv.linux.org.uk,
	brauner@...nel.org,
	jack@...e.cz,
	lorenzo.stoakes@...cle.com,
	Liam.Howlett@...cle.com,
	vbabka@...e.cz,
	rppt@...nel.org,
	surenb@...gle.com,
	mhocko@...e.com,
	rostedt@...dmis.org,
	bsegall@...gle.com,
	mgorman@...e.de,
	vschneid@...hat.com,
	jannh@...gle.com,
	pfalcato@...e.de,
	riel@...riel.com,
	harry.yoo@...cle.com,
	linux-kernel@...r.kernel.org,
	linux-perf-users@...r.kernel.org,
	linux-fsdevel@...r.kernel.org,
	linux-mm@...ck.org,
	duanxiongchun@...edance.com,
	yinhongbo@...edance.com,
	dengliang.1214@...edance.com,
	xieyongji@...edance.com,
	chaiwen.cc@...edance.com,
	songmuchun@...edance.com,
	yuanzhu@...edance.com,
	chengguozhu@...edance.com,
	sunjiadong.lff@...edance.com,
	Bo Li <libo.gcs85@...edance.com>
Subject: [RFC v2 03/35] RPAL: add service registration interface

Every rpal service should be registered and managed. Each RPAL service has
a 64-bit key as its unique identifier, the key should never repeat before
kernel reboot. Each RPAL service has an ID to indicate which 512GB virtual
address space it can use. Any alive RPAL service has its unique ID, which
will never be reused until the service dead.

This patch adds a registration interface for RPAL services. Newly
registered rpal_service instances are assigned a key that increments
starting from 1. The 64-bit length of the key ensures that keys are almost
impossible to exhaust before system reboot. Meanwhile, a bitmap is used to
allocate IDs, ensuring no duplicate IDs are assigned. RPAL services are
managed via a hash list, which facilitates quick lookup of the
corresponding rpal_service by key.

Signed-off-by: Bo Li <libo.gcs85@...edance.com>
---
 arch/x86/rpal/service.c | 130 ++++++++++++++++++++++++++++++++++++++++
 include/linux/rpal.h    |  31 ++++++++++
 2 files changed, 161 insertions(+)

diff --git a/arch/x86/rpal/service.c b/arch/x86/rpal/service.c
index c8e609798d4f..609c9550540d 100644
--- a/arch/x86/rpal/service.c
+++ b/arch/x86/rpal/service.c
@@ -13,13 +13,56 @@
 
 #include "internal.h"
 
+static DECLARE_BITMAP(rpal_id_bitmap, RPAL_NR_ID);
+static atomic64_t service_key_counter;
+static DEFINE_HASHTABLE(service_hash_table, ilog2(RPAL_NR_ID));
+DEFINE_SPINLOCK(hash_table_lock);
 static struct kmem_cache *service_cache;
 
+static inline void rpal_free_service_id(int id)
+{
+	clear_bit(id, rpal_id_bitmap);
+}
+
 static void __rpal_put_service(struct rpal_service *rs)
 {
 	kmem_cache_free(service_cache, rs);
 }
 
+static int rpal_alloc_service_id(void)
+{
+	int id;
+
+	do {
+		id = find_first_zero_bit(rpal_id_bitmap, RPAL_NR_ID);
+		if (id == RPAL_NR_ID) {
+			id = RPAL_INVALID_ID;
+			break;
+		}
+	} while (test_and_set_bit(id, rpal_id_bitmap));
+
+	return id;
+}
+
+static bool is_valid_id(int id)
+{
+	return id >= 0 && id < RPAL_NR_ID;
+}
+
+static u64 rpal_alloc_service_key(void)
+{
+	u64 key;
+
+	/* confirm we do not run out keys */
+	if (unlikely(atomic64_read(&service_key_counter) == _AC(-1, UL))) {
+		rpal_err("key is exhausted\n");
+		return RPAL_INVALID_KEY;
+	}
+
+	key = atomic64_fetch_inc(&service_key_counter);
+	return key;
+}
+
 struct rpal_service *rpal_get_service(struct rpal_service *rs)
 {
 	if (!rs)
@@ -37,6 +80,90 @@ void rpal_put_service(struct rpal_service *rs)
 		__rpal_put_service(rs);
 }
 
+static u32 get_hash_key(u64 key)
+{
+	return key % RPAL_NR_ID;
+}
+
+struct rpal_service *rpal_get_service_by_key(u64 key)
+{
+	struct rpal_service *rs, *rsp;
+	u32 hash_key = get_hash_key(key);
+
+	rs = NULL;
+	hash_for_each_possible(service_hash_table, rsp, hlist, hash_key) {
+		if (rsp->key == key) {
+			rs = rsp;
+			break;
+		}
+	}
+	return rpal_get_service(rs);
+}
+
+static void insert_service(struct rpal_service *rs)
+{
+	unsigned long flags;
+	int hash_key;
+
+	hash_key = get_hash_key(rs->key);
+
+	spin_lock_irqsave(&hash_table_lock, flags);
+	hash_add(service_hash_table, &rs->hlist, hash_key);
+	spin_unlock_irqrestore(&hash_table_lock, flags);
+}
+
+static void delete_service(struct rpal_service *rs)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&hash_table_lock, flags);
+	hash_del(&rs->hlist);
+	spin_unlock_irqrestore(&hash_table_lock, flags);
+}
+
+struct rpal_service *rpal_register_service(void)
+{
+	struct rpal_service *rs;
+
+	if (!rpal_inited)
+		return NULL;
+
+	rs = kmem_cache_zalloc(service_cache, GFP_KERNEL);
+	if (!rs)
+		goto alloc_fail;
+
+	rs->id = rpal_alloc_service_id();
+	if (!is_valid_id(rs->id))
+		goto id_fail;
+
+	rs->key = rpal_alloc_service_key();
+	if (unlikely(rs->key == RPAL_INVALID_KEY))
+		goto key_fail;
+
+	atomic_set(&rs->refcnt, 1);
+
+	insert_service(rs);
+
+	return rs;
+
+key_fail:
+	kmem_cache_free(service_cache, rs);
+id_fail:
+	rpal_free_service_id(rs->id);
+alloc_fail:
+	return NULL;
+}
+
+void rpal_unregister_service(struct rpal_service *rs)
+{
+	if (!rs)
+		return;
+
+	delete_service(rs);
+
+	rpal_put_service(rs);
+}
+
 int __init rpal_service_init(void)
 {
 	service_cache = kmem_cache_create("rpal_service_cache",
@@ -47,6 +174,9 @@ int __init rpal_service_init(void)
 		return -1;
 	}
 
+	bitmap_zero(rpal_id_bitmap, RPAL_NR_ID);
+	atomic64_set(&service_key_counter, RPAL_FIRST_KEY);
+
 	return 0;
 }
 
diff --git a/include/linux/rpal.h b/include/linux/rpal.h
index 73468884cc5d..75c5acf33844 100644
--- a/include/linux/rpal.h
+++ b/include/linux/rpal.h
@@ -11,13 +11,40 @@
 
 #include <linux/sched.h>
 #include <linux/types.h>
+#include <linux/hashtable.h>
 #include <linux/atomic.h>
 
 #define RPAL_ERROR_MSG "rpal error: "
 #define rpal_err(x...) pr_err(RPAL_ERROR_MSG x)
 #define rpal_err_ratelimited(x...) pr_err_ratelimited(RPAL_ERROR_MSG x)
 
+/*
+ * The first 512GB is reserved due to mmap_min_addr.
+ * The last 512GB is dropped since stack will be initially
+ * allocated at TASK_SIZE_MAX.
+ */
+#define RPAL_NR_ID 254
+#define RPAL_INVALID_ID -1
+#define RPAL_FIRST_KEY _AC(1, UL)
+#define RPAL_INVALID_KEY _AC(0, UL)
+
+/*
+ * Each RPAL service has a 64-bit key as its unique identifier, and
+ * the 64-bit length ensures that the key will never repeat before
+ * the kernel reboot.
+ *
+ * Each RPAL service has an ID to indicate which 512GB virtual address
+ * space it can use. All alive RPAL processes have unique IDs, ensuring
+ * their address spaces do not overlap. When a process exits, its ID
+ * is released, allowing newly started RPAL services to reuse the ID.
+ */
 struct rpal_service {
+	/* Unique identifier for RPAL service */
+	u64 key;
+	/* virtual address space id */
+	int id;
+	/* Hashtable list for this struct */
+	struct hlist_node hlist;
 	/* reference count of this struct */
 	atomic_t refcnt;
 };
@@ -40,4 +67,8 @@ struct rpal_service *rpal_get_service(struct rpal_service *rs);
  * @param rs The struct rpal_service to put.
  */
 void rpal_put_service(struct rpal_service *rs);
+
+void rpal_unregister_service(struct rpal_service *rs);
+struct rpal_service *rpal_register_service(void);
+struct rpal_service *rpal_get_service_by_key(u64 key);
 #endif
-- 
2.20.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ