lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240828022350.71456-2-zhangwarden@gmail.com>
Date: Wed, 28 Aug 2024 10:23:49 +0800
From: Wardenjohn <zhangwarden@...il.com>
To: jpoimboe@...nel.org,
	mbenes@...e.cz,
	jikos@...nel.org,
	pmladek@...e.com,
	joe.lawrence@...hat.com
Cc: live-patching@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	Wardenjohn <zhangwarden@...il.com>
Subject: [PATCH v4 1/2] Introduce klp_ops into klp_func structure

Before introduce feature "using". Klp transition will need an
easier way to get klp_ops from klp_func.

This patch make changes as follows:
1. Move klp_ops into klp_func structure.
Rewrite the logic of klp_find_ops and
other logic to get klp_ops of a function.

2. Move definition of struct klp_ops into
include/linux/livepatch.h

With this changes, we can get klp_ops of a klp_func easily. 
klp_find_ops can also be simple and straightforward. And we 
do not need to maintain one global list for now.

Signed-off-by: Wardenjohn <zhangwarden@...il.com>

diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 51a258c24ff5..d874aecc817b 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -22,6 +22,25 @@
 #define KLP_TRANSITION_UNPATCHED	 0
 #define KLP_TRANSITION_PATCHED		 1
 
+/**
+ * struct klp_ops - structure for tracking registered ftrace ops structs
+ *
+ * A single ftrace_ops is shared between all enabled replacement functions
+ * (klp_func structs) which have the same old_func.  This allows the switch
+ * between function versions to happen instantaneously by updating the klp_ops
+ * struct's func_stack list.  The winner is the klp_func at the top of the
+ * func_stack (front of the list).
+ *
+ * @node:	node for the global klp_ops list
+ * @func_stack:	list head for the stack of klp_func's (active func is on top)
+ * @fops:	registered ftrace ops struct
+ */
+struct klp_ops {
+	struct list_head node;
+	struct list_head func_stack;
+	struct ftrace_ops fops;
+};
+
 /**
  * struct klp_func - function structure for live patching
  * @old_name:	name of the function to be patched
@@ -32,6 +51,7 @@
  * @kobj:	kobject for sysfs resources
  * @node:	list node for klp_object func_list
  * @stack_node:	list node for klp_ops func_stack list
+ * @ops:	pointer to klp_ops struct for this function
  * @old_size:	size of the old function
  * @new_size:	size of the new function
  * @nop:        temporary patch to use the original code again; dyn. allocated
@@ -71,6 +91,7 @@ struct klp_func {
 	struct kobject kobj;
 	struct list_head node;
 	struct list_head stack_node;
+	struct klp_ops *ops;
 	unsigned long old_size, new_size;
 	bool nop;
 	bool patched;
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 52426665eecc..e4572bf34316 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -760,6 +760,8 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
 	if (!func->old_name)
 		return -EINVAL;
 
+	func->ops = NULL;
+
 	/*
 	 * NOPs get the address later. The patched module must be loaded,
 	 * see klp_init_object_loaded().
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index 90408500e5a3..8ab9c35570f4 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -20,18 +20,25 @@
 #include "patch.h"
 #include "transition.h"
 
-static LIST_HEAD(klp_ops);
 
 struct klp_ops *klp_find_ops(void *old_func)
 {
-	struct klp_ops *ops;
+	struct klp_patch *patch;
+	struct klp_object *obj;
 	struct klp_func *func;
 
-	list_for_each_entry(ops, &klp_ops, node) {
-		func = list_first_entry(&ops->func_stack, struct klp_func,
-					stack_node);
-		if (func->old_func == old_func)
-			return ops;
+	klp_for_each_patch(patch) {
+		klp_for_each_object(patch, obj) {
+			klp_for_each_func(obj, func) {
+				/*
+				 * Ignore entry where func->ops has not been
+				 * assigned yet. It is most likely the one
+				 * which is about to be created/added.
+				 */
+				if (func->old_func == old_func && func->ops)
+					return func->ops;
+			}
+		}
 	}
 
 	return NULL;
@@ -133,7 +140,7 @@ static void klp_unpatch_func(struct klp_func *func)
 	if (WARN_ON(!func->old_func))
 		return;
 
-	ops = klp_find_ops(func->old_func);
+	ops = func->ops;
 	if (WARN_ON(!ops))
 		return;
 
@@ -149,6 +156,7 @@ static void klp_unpatch_func(struct klp_func *func)
 
 		list_del_rcu(&func->stack_node);
 		list_del(&ops->node);
+		func->ops = NULL;
 		kfree(ops);
 	} else {
 		list_del_rcu(&func->stack_node);
@@ -168,7 +176,7 @@ static int klp_patch_func(struct klp_func *func)
 	if (WARN_ON(func->patched))
 		return -EINVAL;
 
-	ops = klp_find_ops(func->old_func);
+	ops = func->ops;
 	if (!ops) {
 		unsigned long ftrace_loc;
 
@@ -191,8 +199,6 @@ static int klp_patch_func(struct klp_func *func)
 				  FTRACE_OPS_FL_IPMODIFY |
 				  FTRACE_OPS_FL_PERMANENT;
 
-		list_add(&ops->node, &klp_ops);
-
 		INIT_LIST_HEAD(&ops->func_stack);
 		list_add_rcu(&func->stack_node, &ops->func_stack);
 
@@ -211,7 +217,7 @@ static int klp_patch_func(struct klp_func *func)
 			goto err;
 		}
 
-
+		func->ops = ops;
 	} else {
 		list_add_rcu(&func->stack_node, &ops->func_stack);
 	}
@@ -224,6 +230,7 @@ static int klp_patch_func(struct klp_func *func)
 	list_del_rcu(&func->stack_node);
 	list_del(&ops->node);
 	kfree(ops);
+	func->ops = NULL;
 	return ret;
 }
 
diff --git a/kernel/livepatch/patch.h b/kernel/livepatch/patch.h
index d5f2fbe373e0..21d0d20b7189 100644
--- a/kernel/livepatch/patch.h
+++ b/kernel/livepatch/patch.h
@@ -6,25 +6,6 @@
 #include <linux/list.h>
 #include <linux/ftrace.h>
 
-/**
- * struct klp_ops - structure for tracking registered ftrace ops structs
- *
- * A single ftrace_ops is shared between all enabled replacement functions
- * (klp_func structs) which have the same old_func.  This allows the switch
- * between function versions to happen instantaneously by updating the klp_ops
- * struct's func_stack list.  The winner is the klp_func at the top of the
- * func_stack (front of the list).
- *
- * @node:	node for the global klp_ops list
- * @func_stack:	list head for the stack of klp_func's (active func is on top)
- * @fops:	registered ftrace ops struct
- */
-struct klp_ops {
-	struct list_head node;
-	struct list_head func_stack;
-	struct ftrace_ops fops;
-};
-
 struct klp_ops *klp_find_ops(void *old_func);
 
 int klp_patch_object(struct klp_object *obj);
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index ba069459c101..d9a3f9c7a93b 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -230,7 +230,7 @@ static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
 		 * Check for the to-be-patched function
 		 * (the previous func).
 		 */
-		ops = klp_find_ops(func->old_func);
+		ops = func->ops;
 
 		if (list_is_singular(&ops->func_stack)) {
 			/* original function */
-- 
2.18.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ