lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Mon,  4 May 2015 13:40:22 +0200
From:	Jiri Slaby <jslaby@...e.cz>
To:	live-patching@...r.kernel.org
Cc:	jpoimboe@...hat.com, sjenning@...hat.com, jkosina@...e.cz,
	vojtech@...e.cz, mingo@...hat.com, linux-kernel@...r.kernel.org,
	Jiri Slaby <jslaby@...e.cz>
Subject: [RFC kgr on klp 6/9] livepatch: do not allow failure while really patching

We separate the patching in two phases:
* preparation: this one can fail, but in the presence of the
  kgraft-like patching can be handled easily.
* patching: this cannot fail, so that kgraft-like patching need not
  handle failures in a very complicated way

Signed-off-by: Jiri Slaby <jslaby@...e.cz>
---
 include/linux/livepatch.h | 11 ++++++-
 include/linux/sched.h     |  5 +++
 kernel/livepatch/core.c   | 84 +++++++++++++++++++++++++++++++----------------
 3 files changed, 70 insertions(+), 30 deletions(-)

diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 009f308ff756..add2b1bd1cce 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -53,9 +53,18 @@ struct klp_cmodel {
 			struct pt_regs *regs);
 };
 
+/**
+ * enum klp_state -- state of patches, objects, functions
+ * @KLP_DISABLED: completely disabled
+ * @KLP_ENABLED: completely enabled (applied)
+ * @KLP_PREPARED: being applied
+ *
+ * @KLP_DISABLED & @KLP_ENABLED are part of the /sys ABI
+ */
 enum klp_state {
 	KLP_DISABLED,
-	KLP_ENABLED
+	KLP_ENABLED,
+	KLP_PREPARED,
 };
 
 /**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4c0555261cb1..6b2f4c01c516 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -3086,6 +3086,11 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
 #endif /* CONFIG_MEMCG */
 
 #if IS_ENABLED(CONFIG_LIVEPATCH)
+/*
+ * This function is called only when the given thread is not running
+ * any patched function. Therefore the flag might be cleared without
+ * klp_kgr_state_lock.
+ */
 static inline void klp_kgraft_mark_task_safe(struct task_struct *p)
 {
 	clear_tsk_thread_flag(p, TIF_KGR_IN_PROGRESS);
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index ab6a36688c93..87d94aadfebf 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -41,11 +41,13 @@
  * @node:	node for the global klp_ops list
  * @func_stack:	list head for the stack of klp_func's (active func is on top)
  * @fops:	registered ftrace ops struct
+ * @old_addr:   the address of the function which is beine patched
  */
 struct klp_ops {
 	struct list_head node;
 	struct list_head func_stack;
 	struct ftrace_ops fops;
+	unsigned long old_addr;
 };
 
 /*
@@ -65,12 +67,9 @@ static struct kobject *klp_root_kobj;
 static struct klp_ops *klp_find_ops(unsigned long old_addr)
 {
 	struct klp_ops *ops;
-	struct klp_func *func;
 
 	list_for_each_entry(ops, &klp_ops, node) {
-		func = list_first_entry(&ops->func_stack, struct klp_func,
-					stack_node);
-		if (func->old_addr == old_addr)
+		if (ops->old_addr == old_addr)
 			return ops;
 	}
 
@@ -326,11 +325,8 @@ static void notrace klp_ftrace_handler(unsigned long ip,
 	rcu_read_lock();
 	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
 				      stack_node);
-	if (WARN_ON_ONCE(!func))
-		goto unlock;
-
-	func->stub(&ops->func_stack, func, regs);
-unlock:
+	if (func)
+		func->stub(&ops->func_stack, func, regs);
 	rcu_read_unlock();
 }
 
@@ -338,18 +334,20 @@ static void klp_disable_func(struct klp_func *func)
 {
 	struct klp_ops *ops;
 
-	WARN_ON(func->state != KLP_ENABLED);
+	WARN_ON(func->state == KLP_DISABLED);
 	WARN_ON(!func->old_addr);
 
 	ops = klp_find_ops(func->old_addr);
 	if (WARN_ON(!ops))
 		return;
 
-	if (list_is_singular(&ops->func_stack)) {
+	if (list_empty(&ops->func_stack) ||
+			list_is_singular(&ops->func_stack)) {
 		WARN_ON(unregister_ftrace_function(&ops->fops));
 		WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
 
-		list_del_rcu(&func->stack_node);
+		if (!list_empty(&ops->func_stack))
+			list_del_rcu(&func->stack_node);
 		list_del(&ops->node);
 		kfree(ops);
 	} else {
@@ -359,7 +357,7 @@ static void klp_disable_func(struct klp_func *func)
 	func->state = KLP_DISABLED;
 }
 
-static int klp_enable_func(struct klp_func *func)
+static int klp_prepare_enable_func(struct klp_func *func)
 {
 	struct klp_ops *ops;
 	int ret;
@@ -376,6 +374,7 @@ static int klp_enable_func(struct klp_func *func)
 		if (!ops)
 			return -ENOMEM;
 
+		ops->old_addr = func->old_addr;
 		ops->fops.func = klp_ftrace_handler;
 		ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
 				  FTRACE_OPS_FL_DYNAMIC |
@@ -384,7 +383,6 @@ static int klp_enable_func(struct klp_func *func)
 		list_add(&ops->node, &klp_ops);
 
 		INIT_LIST_HEAD(&ops->func_stack);
-		list_add_rcu(&func->stack_node, &ops->func_stack);
 
 		ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
 		if (ret) {
@@ -400,35 +398,43 @@ static int klp_enable_func(struct klp_func *func)
 			ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
 			goto err;
 		}
-
-
-	} else {
-		list_add_rcu(&func->stack_node, &ops->func_stack);
 	}
 
-	func->state = KLP_ENABLED;
+	func->state = KLP_PREPARED;
 
 	return 0;
 
 err:
-	list_del_rcu(&func->stack_node);
 	list_del(&ops->node);
 	kfree(ops);
 	return ret;
 }
 
+static void klp_enable_func(struct klp_func *func)
+{
+	struct klp_ops *ops;
+
+	ops = klp_find_ops(func->old_addr);
+	if (WARN_ON(!ops)) /* we have just added that, so? */
+		return;
+
+	list_add_rcu(&func->stack_node, &ops->func_stack);
+
+	func->state = KLP_ENABLED;
+}
+
 static void klp_disable_object(struct klp_object *obj)
 {
 	struct klp_func *func;
 
 	klp_for_each_func(obj, func)
-		if (func->state == KLP_ENABLED)
+		if (func->state != KLP_DISABLED)
 			klp_disable_func(func);
 
 	obj->state = KLP_DISABLED;
 }
 
-static int klp_enable_object(struct klp_object *obj)
+static int klp_prepare_enable_object(struct klp_object *obj)
 {
 	struct klp_func *func;
 	int ret;
@@ -440,17 +446,27 @@ static int klp_enable_object(struct klp_object *obj)
 		return -EINVAL;
 
 	klp_for_each_func(obj, func) {
-		ret = klp_enable_func(func);
+		ret = klp_prepare_enable_func(func);
 		if (ret) {
 			klp_disable_object(obj);
 			return ret;
 		}
 	}
-	obj->state = KLP_ENABLED;
+	obj->state = KLP_PREPARED;
 
 	return 0;
 }
 
+static void klp_enable_object(struct klp_object *obj)
+{
+	struct klp_func *func;
+
+	klp_for_each_func(obj, func)
+		klp_enable_func(func);
+
+	obj->state = KLP_ENABLED;
+}
+
 static int __klp_disable_patch(struct klp_patch *patch)
 {
 	struct klp_object *obj;
@@ -463,7 +479,7 @@ static int __klp_disable_patch(struct klp_patch *patch)
 	pr_notice("disabling patch '%s'\n", patch->mod->name);
 
 	klp_for_each_object(patch, obj) {
-		if (obj->state == KLP_ENABLED)
+		if (obj->state == KLP_PREPARED || obj->state == KLP_ENABLED)
 			klp_disable_object(obj);
 	}
 
@@ -526,11 +542,18 @@ static int __klp_enable_patch(struct klp_patch *patch)
 		if (!klp_is_object_loaded(obj))
 			continue;
 
-		ret = klp_enable_object(obj);
+		ret = klp_prepare_enable_object(obj);
 		if (ret)
 			goto unregister;
 	}
 
+	klp_for_each_object(patch, obj) {
+		if (!klp_is_object_loaded(obj))
+			continue;
+
+		klp_enable_object(obj);
+	}
+
 	patch->state = KLP_ENABLED;
 
 	return 0;
@@ -937,10 +960,13 @@ static void klp_module_notify_coming(struct klp_patch *patch,
 	pr_notice("applying patch '%s' to loading module '%s'\n",
 		  pmod->name, mod->name);
 
-	ret = klp_enable_object(obj);
-	if (!ret)
-		return;
+	ret = klp_prepare_enable_object(obj);
+	if (ret)
+		goto err;
+
+	klp_enable_object(obj);
 
+	return;
 err:
 	pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
 		pmod->name, mod->name, ret);
-- 
2.3.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ