lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 30 Aug 2017 17:38:44 -0400
From:   Jason Baron <jbaron@...mai.com>
To:     linux-kernel@...r.kernel.org, live-patching@...r.kernel.org
Cc:     jpoimboe@...hat.com, jeyu@...nel.org, jikos@...nel.org,
        mbenes@...e.cz, pmladek@...e.com
Subject: [PATCH v2 2/3] livepatch: add atomic replace

When doing cumulative patches, if patch A introduces a change to function 1,
and patch B reverts the change to function 1 and introduces changes to say
function 2 and 3 as well, the change that patch A introduced to function 1
is still present.

Introduce atomic replace, by first creating a 'no_op' klp_func for all
the functions that are reverted by patch B. The reason that 'no_op'
klp_funcs are created, instead of just unregistering directly from the ftrace
function hook, is to ensure that the consistency model is properly preserved.
By introducing the 'no_op' functions, 'atomic replace' leverages the existing
consistency model code. Then, after transition to the new code, 'atomic
replace' unregisters the ftrace handlers that are associated with the 'no_op'
klp_funcs, such that that we run the original un-patched function with no
additional no_op function overhead.

Since 'atomic replace' has completely replaced all previous livepatch modules,
it explicitly disables all previous livepatch modules, in the example- patch A,
such that the livepatch module associated with patch A can be completely removed
(rmmod). Patch A is now in a permanently disabled state. But if it is removed
from the kernel with rmmod, it can be re-inserted (insmod), and act as an atomic
replace on top of patch A.

Signed-off-by: Jason Baron <jbaron@...mai.com>
Cc: Josh Poimboeuf <jpoimboe@...hat.com>
Cc: Jessica Yu <jeyu@...nel.org>
Cc: Jiri Kosina <jikos@...nel.org>
Cc: Miroslav Benes <mbenes@...e.cz>
Cc: Petr Mladek <pmladek@...e.com>
---
 include/linux/livepatch.h     |   6 ++
 kernel/livepatch/core.c       | 177 +++++++++++++++++++++++++++++++++++++++---
 kernel/livepatch/core.h       |   5 ++
 kernel/livepatch/patch.c      |  19 +++--
 kernel/livepatch/patch.h      |   4 +-
 kernel/livepatch/transition.c |  47 ++++++++++-
 6 files changed, 234 insertions(+), 24 deletions(-)

diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 8d3df55..ee6d18b 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -50,6 +50,7 @@
  * @new_size:	size of the new function
  * @patched:	the func has been added to the klp_ops list
  * @transition:	the func is currently being applied or reverted
+ * @no_op:	this is a no_op function used to compelete revert a function
  *
  * The patched and transition variables define the func's patching state.  When
  * patching, a func is always in one of the following states:
@@ -88,6 +89,7 @@ struct klp_func {
 	unsigned long old_size, new_size;
 	bool patched;
 	bool transition;
+	bool no_op;
 };
 
 /**
@@ -119,10 +121,12 @@ struct klp_object {
  * @mod:	reference to the live patch module
  * @objs:	object entries for kernel objects to be patched
  * @immediate:  patch all funcs immediately, bypassing safety mechanisms
+ * @replace:	replace all funcs, reverting functions that are no longer patched
  * @list:	list node for global list of registered patches
  * @kobj:	kobject for sysfs resources
  * @obj_list:	head of list for dynamically allocated struct klp_object
  * @enabled:	the patch is enabled (but operation may be incomplete)
+ * @replaced:	the patch has been replaced an can not be re-enabled
  * @finish:	for waiting till it is safe to remove the patch module
  */
 struct klp_patch {
@@ -130,12 +134,14 @@ struct klp_patch {
 	struct module *mod;
 	struct klp_object *objs;
 	bool immediate;
+	bool replace;
 
 	/* internal */
 	struct list_head list;
 	struct kobject kobj;
 	struct list_head obj_list;
 	bool enabled;
+	bool replaced;
 	struct completion finish;
 };
 
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 6004be3..21cecc5 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -45,7 +45,7 @@
  */
 DEFINE_MUTEX(klp_mutex);
 
-static LIST_HEAD(klp_patches);
+LIST_HEAD(klp_patches);
 
 static struct kobject *klp_root_kobj;
 
@@ -351,6 +351,9 @@ static int __klp_enable_patch(struct klp_patch *patch)
 	if (klp_transition_patch)
 		return -EBUSY;
 
+	if (patch->replaced)
+		return -EINVAL;
+
 	if (WARN_ON(patch->enabled))
 		return -EINVAL;
 
@@ -600,13 +603,38 @@ static void klp_free_patch(struct klp_patch *patch)
 		list_del(&patch->list);
 }
 
-static int klp_init_func(struct klp_object *obj, struct klp_func *func)
+void klp_patch_free_no_ops(struct klp_patch *patch)
+{
+	struct klp_object *obj, *tmp_obj;
+	struct klp_func *func, *tmp_func;
+
+	klp_for_each_object(patch, obj) {
+		list_for_each_entry_safe(func, tmp_func, &obj->func_list,
+					 func_entry) {
+			list_del_init(&func->func_entry);
+			kobject_put(&func->kobj);
+			kfree(func->old_name);
+			kfree(func);
+		}
+		INIT_LIST_HEAD(&obj->func_list);
+	}
+	list_for_each_entry_safe(obj, tmp_obj, &patch->obj_list, obj_entry) {
+		list_del_init(&obj->obj_entry);
+		kobject_put(&obj->kobj);
+		kfree(obj->name);
+		kfree(obj);
+	}
+	INIT_LIST_HEAD(&patch->obj_list);
+}
+
+static int klp_init_func(struct klp_object *obj, struct klp_func *func,
+			 bool no_op)
 {
-	if (!func->old_name || !func->new_func)
+	if (!func->old_name || (!no_op && !func->new_func))
 		return -EINVAL;
 
-	INIT_LIST_HEAD(&func->stack_node);
 	INIT_LIST_HEAD(&func->func_entry);
+	INIT_LIST_HEAD(&func->stack_node);
 	func->patched = false;
 	func->transition = false;
 
@@ -670,19 +698,23 @@ static int klp_init_object_loaded(struct klp_patch *patch,
 	return 0;
 }
 
-static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
+static int klp_init_object(struct klp_patch *patch, struct klp_object *obj,
+			   bool no_op)
 {
 	struct klp_func *func;
 	int ret;
 	const char *name;
 
-	if (!obj->funcs)
+	if (!obj->funcs && !no_op)
 		return -EINVAL;
 
 	obj->patched = false;
 	obj->mod = NULL;
+	INIT_LIST_HEAD(&obj->obj_entry);
+	INIT_LIST_HEAD(&obj->func_list);
 
-	klp_find_object_module(obj);
+	if (!no_op)
+		klp_find_object_module(obj);
 
 	name = klp_is_module(obj) ? obj->name : "vmlinux";
 	ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
@@ -690,8 +722,12 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
 	if (ret)
 		return ret;
 
+	if (no_op)
+		return 0;
+
 	klp_for_each_func(obj, func) {
-		ret = klp_init_func(obj, func);
+		func->no_op = false;
+		ret = klp_init_func(obj, func, false);
 		if (ret)
 			goto free;
 	}
@@ -710,6 +746,115 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
 	return ret;
 }
 
+static int klp_init_patch_no_ops(struct klp_patch *prev_patch,
+				 struct klp_patch *patch)
+{
+	struct klp_object *obj, *prev_obj;
+	struct klp_func *prev_func, *func;
+	int ret;
+	bool found, mod;
+
+	klp_for_each_object(prev_patch, prev_obj) {
+		klp_for_each_func(prev_obj, prev_func) {
+next_func:
+			klp_for_each_object(patch, obj) {
+				klp_for_each_func(obj, func) {
+					if ((strcmp(prev_func->old_name,
+						    func->old_name) == 0) &&
+						(prev_func->old_sympos ==
+							func->old_sympos)) {
+						goto next_func;
+					}
+				}
+			}
+			mod = klp_is_module(prev_obj);
+			found = false;
+			klp_for_each_object(patch, obj) {
+				if (mod) {
+					if (klp_is_module(obj) &&
+					    strcmp(prev_obj->name,
+						   obj->name) == 0) {
+						found = true;
+						break;
+					}
+				} else if (!klp_is_module(obj)) {
+					found = true;
+					break;
+				}
+			}
+			if (!found) {
+				obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+				if (!obj)
+					return -ENOMEM;
+				if (prev_obj->name) {
+					obj->name = kstrdup(prev_obj->name,
+							    GFP_KERNEL);
+					if (!obj->name) {
+						kfree(obj);
+						return -ENOMEM;
+					}
+				} else {
+					obj->name = NULL;
+				}
+				obj->funcs = NULL;
+				ret = klp_init_object(patch, obj, true);
+				if (ret) {
+					kfree(obj->name);
+					kfree(obj);
+					return ret;
+				}
+				obj->mod = prev_obj->mod;
+				list_add(&obj->obj_entry, &patch->obj_list);
+			}
+			func = kzalloc(sizeof(*func), GFP_KERNEL);
+			if (!func)
+				return -ENOMEM;
+			if (prev_func->old_name) {
+				func->old_name = kstrdup(prev_func->old_name,
+							 GFP_KERNEL);
+				if (!func->old_name) {
+					kfree(func);
+					return -ENOMEM;
+				}
+			} else {
+				func->old_name = NULL;
+			}
+			func->new_func = NULL;
+			func->old_sympos = prev_func->old_sympos;
+			ret = klp_init_func(obj, func, true);
+			func->immediate = prev_func->immediate;
+			func->old_addr = prev_func->old_addr;
+			func->old_size = prev_func->old_size;
+			func->new_size = 0;
+			func->no_op = true;
+			list_add(&func->func_entry, &obj->func_list);
+		}
+	}
+	return 0;
+}
+
+static int klp_init_no_ops(struct klp_patch *patch)
+{
+	struct klp_patch *prev_patch;
+	int ret = 0;
+
+	if (!patch->replace)
+		return 0;
+
+	prev_patch = patch;
+	while (prev_patch->list.prev != &klp_patches) {
+		prev_patch = list_prev_entry(prev_patch, list);
+
+		ret = klp_init_patch_no_ops(prev_patch, patch);
+		if (ret)
+			return ret;
+
+		if (prev_patch->replace)
+			break;
+	}
+	return 0;
+}
+
 static int klp_init_patch(struct klp_patch *patch)
 {
 	struct klp_object *obj;
@@ -721,6 +866,8 @@ static int klp_init_patch(struct klp_patch *patch)
 	mutex_lock(&klp_mutex);
 
 	patch->enabled = false;
+	patch->replaced = false;
+
 	init_completion(&patch->finish);
 
 	ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
@@ -732,20 +879,25 @@ static int klp_init_patch(struct klp_patch *patch)
 
 	INIT_LIST_HEAD(&patch->obj_list);
 	klp_for_each_object(patch, obj) {
-		INIT_LIST_HEAD(&obj->obj_entry);
-		INIT_LIST_HEAD(&obj->func_list);
-		ret = klp_init_object(patch, obj);
+		ret = klp_init_object(patch, obj, false);
 		if (ret)
 			goto free;
 	}
 
 	list_add_tail(&patch->list, &klp_patches);
 
+	ret = klp_init_no_ops(patch);
+	if (ret) {
+		list_del(&patch->list);
+		goto free;
+	}
+
 	mutex_unlock(&klp_mutex);
 
 	return 0;
 
 free:
+	klp_patch_free_no_ops(patch);
 	klp_free_objects_limited(patch, obj);
 
 	mutex_unlock(&klp_mutex);
@@ -780,6 +932,7 @@ int klp_unregister_patch(struct klp_patch *patch)
 		goto err;
 	}
 
+	klp_patch_free_no_ops(patch);
 	klp_free_patch(patch);
 
 	mutex_unlock(&klp_mutex);
@@ -933,7 +1086,7 @@ void klp_module_going(struct module *mod)
 			if (patch->enabled || patch == klp_transition_patch) {
 				pr_notice("reverting patch '%s' on unloading module '%s'\n",
 					  patch->mod->name, obj->mod->name);
-				klp_unpatch_object(obj);
+				klp_unpatch_object(obj, false);
 			}
 
 			klp_free_object_loaded(obj);
diff --git a/kernel/livepatch/core.h b/kernel/livepatch/core.h
index c74f24c..0705ac3 100644
--- a/kernel/livepatch/core.h
+++ b/kernel/livepatch/core.h
@@ -1,6 +1,11 @@
 #ifndef _LIVEPATCH_CORE_H
 #define _LIVEPATCH_CORE_H
 
+#include <linux/livepatch.h>
+
 extern struct mutex klp_mutex;
+extern struct list_head klp_patches;
+
+void klp_patch_free_no_ops(struct klp_patch *patch);
 
 #endif /* _LIVEPATCH_CORE_H */
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index 52c4e90..10b75e3 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -117,6 +117,8 @@ static void notrace klp_ftrace_handler(unsigned long ip,
 		}
 	}
 
+	if (func->no_op)
+		goto unlock;
 	klp_arch_set_pc(regs, (unsigned long)func->new_func);
 unlock:
 	preempt_enable_notrace();
@@ -235,15 +237,20 @@ static int klp_patch_func(struct klp_func *func)
 	return ret;
 }
 
-void klp_unpatch_object(struct klp_object *obj)
+void klp_unpatch_object(struct klp_object *obj, bool no_op)
 {
 	struct klp_func *func;
 
-	klp_for_each_func(obj, func)
+	klp_for_each_func(obj, func) {
+		if (no_op && !func->no_op)
+			continue;
+
 		if (func->patched)
 			klp_unpatch_func(func);
+	}
 
-	obj->patched = false;
+	if (!no_op)
+		obj->patched = false;
 }
 
 int klp_patch_object(struct klp_object *obj)
@@ -257,7 +264,7 @@ int klp_patch_object(struct klp_object *obj)
 	klp_for_each_func(obj, func) {
 		ret = klp_patch_func(func);
 		if (ret) {
-			klp_unpatch_object(obj);
+			klp_unpatch_object(obj, false);
 			return ret;
 		}
 	}
@@ -266,11 +273,11 @@ int klp_patch_object(struct klp_object *obj)
 	return 0;
 }
 
-void klp_unpatch_objects(struct klp_patch *patch)
+void klp_unpatch_objects(struct klp_patch *patch, bool no_op)
 {
 	struct klp_object *obj;
 
 	klp_for_each_object(patch, obj)
 		if (obj->patched)
-			klp_unpatch_object(obj);
+			klp_unpatch_object(obj, no_op);
 }
diff --git a/kernel/livepatch/patch.h b/kernel/livepatch/patch.h
index 0db2271..2e13c50 100644
--- a/kernel/livepatch/patch.h
+++ b/kernel/livepatch/patch.h
@@ -27,7 +27,7 @@ struct klp_ops {
 struct klp_ops *klp_find_ops(unsigned long old_addr);
 
 int klp_patch_object(struct klp_object *obj);
-void klp_unpatch_object(struct klp_object *obj);
-void klp_unpatch_objects(struct klp_patch *patch);
+void klp_unpatch_object(struct klp_object *obj, bool no_op);
+void klp_unpatch_objects(struct klp_patch *patch, bool no_op);
 
 #endif /* _LIVEPATCH_PATCH_H */
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index b004a1f..d5e620e 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -21,6 +21,8 @@
 
 #include <linux/cpu.h>
 #include <linux/stacktrace.h>
+#include <linux/ftrace.h>
+#include <linux/delay.h>
 #include "core.h"
 #include "patch.h"
 #include "transition.h"
@@ -70,6 +72,7 @@ static void klp_synchronize_transition(void)
 	schedule_on_each_cpu(klp_sync);
 }
 
+
 /*
  * The transition to the target patch state is complete.  Clean up the data
  * structures.
@@ -81,14 +84,39 @@ static void klp_complete_transition(void)
 	struct task_struct *g, *task;
 	unsigned int cpu;
 	bool immediate_func = false;
+	bool no_op = false;
+	struct klp_patch *prev_patch;
+
+	/*
+	 * for replace patches, we disable all previous patches, and replace
+	 * the dynamic no-op functions by removing the ftrace hook. After
+	 * klp_synchronize_transition() is called its safe to free the
+	 * the dynamic no-op functions, done by klp_patch_free_no_ops()
+	 */
+	if (klp_target_state == KLP_PATCHED && klp_transition_patch->replace) {
+		prev_patch = klp_transition_patch;
+		while (prev_patch->list.prev != &klp_patches) {
+			prev_patch = list_prev_entry(prev_patch, list);
+			if (prev_patch->enabled) {
+				klp_unpatch_objects(prev_patch, false);
+				prev_patch->enabled = false;
+				prev_patch->replaced = true;
+				module_put(prev_patch->mod);
+			}
+		}
+		klp_unpatch_objects(klp_transition_patch, true);
+		no_op = true;
+	}
 
 	if (klp_target_state == KLP_UNPATCHED) {
 		/*
 		 * All tasks have transitioned to KLP_UNPATCHED so we can now
 		 * remove the new functions from the func_stack.
 		 */
-		klp_unpatch_objects(klp_transition_patch);
+		klp_unpatch_objects(klp_transition_patch, false);
+	}
 
+	if (klp_target_state == KLP_UNPATCHED || no_op) {
 		/*
 		 * Make sure klp_ftrace_handler() can no longer see functions
 		 * from this patch on the ops->func_stack.  Otherwise, after
@@ -130,6 +158,9 @@ static void klp_complete_transition(void)
 	}
 
 done:
+	if (no_op)
+		klp_patch_free_no_ops(klp_transition_patch);
+
 	klp_target_state = KLP_UNDEFINED;
 	klp_transition_patch = NULL;
 }
@@ -202,10 +233,18 @@ static int klp_check_stack_func(struct klp_func *func,
 		if (klp_target_state == KLP_UNPATCHED) {
 			 /*
 			  * Check for the to-be-unpatched function
-			  * (the func itself).
+			  * (the func itself). If we're unpatching
+			  * a no-op, then we're running the original
+			  * function. We never 'patch' a no-op function,
+			  * since we just remove the ftrace hook.
 			  */
-			func_addr = (unsigned long)func->new_func;
-			func_size = func->new_size;
+			if (func->no_op) {
+				func_addr = (unsigned long)func->old_addr;
+				func_size = func->old_size;
+			} else {
+				func_addr = (unsigned long)func->new_func;
+				func_size = func->new_size;
+			}
 		} else {
 			/*
 			 * Check for the to-be-patched function
-- 
2.6.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ