lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1458176139-17455-3-git-send-email-jeyu@redhat.com>
Date:	Wed, 16 Mar 2016 20:55:39 -0400
From:	Jessica Yu <jeyu@...hat.com>
To:	Josh Poimboeuf <jpoimboe@...hat.com>,
	Jiri Kosina <jikos@...nel.org>,
	Miroslav Benes <mbenes@...e.cz>,
	Petr Mladek <pmladek@...e.com>,
	Rusty Russell <rusty@...tcorp.com.au>
Cc:	Steven Rostedt <rostedt@...dmis.org>,
	live-patching@...r.kernel.org, linux-kernel@...r.kernel.org,
	Jessica Yu <jeyu@...hat.com>
Subject: [PATCH v3 2/2] livepatch/module: remove livepatch module notifier

Remove the livepatch module notifier in favor of directly enabling and
disabling patches to modules in the module loader. Hard-coding the
function calls ensures that ftrace_module_enable() is run before
klp_module_coming() during module load, and that klp_module_going() is
run before ftrace_release_mod() during module unload. This way, ftrace
and livepatch code is run in the correct order during the module
load/unload sequence without dependence on the module notifier call chain.

Signed-off-by: Jessica Yu <jeyu@...hat.com>
Reviewed-by: Petr Mladek <pmladek@...e.cz> 
Acked-by: Josh Poimboeuf <jpoimboe@...hat.com>
Acked-by: Rusty Russell <rusty@...tcorp.com.au>
---
 include/linux/livepatch.h |  13 ++++
 kernel/livepatch/core.c   | 147 ++++++++++++++++++++++------------------------
 kernel/module.c           |  10 ++++
 3 files changed, 94 insertions(+), 76 deletions(-)

diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index c056797..bd830d5 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -24,6 +24,8 @@
 #include <linux/module.h>
 #include <linux/ftrace.h>
 
+#if IS_ENABLED(CONFIG_LIVEPATCH)
+
 #include <asm/livepatch.h>
 
 enum klp_state {
@@ -132,4 +134,15 @@ int klp_unregister_patch(struct klp_patch *);
 int klp_enable_patch(struct klp_patch *);
 int klp_disable_patch(struct klp_patch *);
 
+/* Called from the module loader during module coming/going states */
+int klp_module_coming(struct module *mod);
+void klp_module_going(struct module *mod);
+
+#else /* !CONFIG_LIVEPATCH */
+
+static inline int klp_module_coming(struct module *mod) { return 0; }
+static inline void klp_module_going(struct module *mod) { }
+
+#endif /* CONFIG_LIVEPATCH */
+
 #endif /* _LINUX_LIVEPATCH_H_ */
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 780f00c..d68fbf6 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -99,12 +99,12 @@ static void klp_find_object_module(struct klp_object *obj)
 	/*
 	 * We do not want to block removal of patched modules and therefore
 	 * we do not take a reference here. The patches are removed by
-	 * a going module handler instead.
+	 * klp_module_going() instead.
 	 */
 	mod = find_module(obj->name);
 	/*
-	 * Do not mess work of the module coming and going notifiers.
-	 * Note that the patch might still be needed before the going handler
+	 * Do not mess work of klp_module_coming() and klp_module_going().
+	 * Note that the patch might still be needed before klp_module_going()
 	 * is called. Module functions can be called even in the GOING state
 	 * until mod->exit() finishes. This is especially important for
 	 * patches that modify semantic of the functions.
@@ -866,103 +866,108 @@ int klp_register_patch(struct klp_patch *patch)
 }
 EXPORT_SYMBOL_GPL(klp_register_patch);
 
-static int klp_module_notify_coming(struct klp_patch *patch,
-				     struct klp_object *obj)
+int klp_module_coming(struct module *mod)
 {
-	struct module *pmod = patch->mod;
-	struct module *mod = obj->mod;
 	int ret;
+	struct klp_patch *patch;
+	struct klp_object *obj;
 
-	ret = klp_init_object_loaded(patch, obj);
-	if (ret) {
-		pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
-			pmod->name, mod->name, ret);
-		return ret;
-	}
+	if (WARN_ON(mod->state != MODULE_STATE_COMING))
+		return -EINVAL;
 
-	if (patch->state == KLP_DISABLED)
-		return 0;
+	mutex_lock(&klp_mutex);
+	/*
+	 * Each module has to know that klp_module_coming()
+	 * has been called. We never know what module will
+	 * get patched by a new patch.
+	 */
+	mod->klp_alive = true;
 
-	pr_notice("applying patch '%s' to loading module '%s'\n",
-		  pmod->name, mod->name);
+	list_for_each_entry(patch, &klp_patches, list) {
+		klp_for_each_object(patch, obj) {
+			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
+				continue;
 
-	ret = klp_enable_object(obj);
-	if (ret)
-		pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
-			pmod->name, mod->name, ret);
-	return ret;
-}
+			obj->mod = mod;
 
-static void klp_module_notify_going(struct klp_patch *patch,
-				    struct klp_object *obj)
-{
-	struct module *pmod = patch->mod;
-	struct module *mod = obj->mod;
+			ret = klp_init_object_loaded(patch, obj);
+			if (ret) {
+				pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
+					patch->mod->name, obj->mod->name, ret);
+				goto err;
+			}
 
-	if (patch->state == KLP_DISABLED)
-		goto disabled;
+			if (patch->state == KLP_DISABLED)
+				break;
+
+			pr_notice("applying patch '%s' to loading module '%s'\n",
+				  patch->mod->name, obj->mod->name);
+
+			ret = klp_enable_object(obj);
+			if (ret) {
+				pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
+					patch->mod->name, obj->mod->name, ret);
+				goto err;
+			}
+
+			break;
+		}
+	}
 
-	pr_notice("reverting patch '%s' on unloading module '%s'\n",
-		  pmod->name, mod->name);
+	mutex_unlock(&klp_mutex);
 
-	klp_disable_object(obj);
+	return 0;
 
-disabled:
+err:
+	/*
+	 * If a patch is unsuccessfully applied, return
+	 * error to the module loader.
+	 */
+	pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
+		patch->mod->name, obj->mod->name, obj->mod->name);
+	mod->klp_alive = false;
 	klp_free_object_loaded(obj);
+	mutex_unlock(&klp_mutex);
+
+	return ret;
 }
 
-static int klp_module_notify(struct notifier_block *nb, unsigned long action,
-			     void *data)
+void klp_module_going(struct module *mod)
 {
-	int ret;
-	struct module *mod = data;
 	struct klp_patch *patch;
 	struct klp_object *obj;
 
-	if (action != MODULE_STATE_COMING && action != MODULE_STATE_GOING)
-		return 0;
+	if (WARN_ON(mod->state != MODULE_STATE_GOING &&
+		    mod->state != MODULE_STATE_COMING))
+		return;
 
 	mutex_lock(&klp_mutex);
-
 	/*
-	 * Each module has to know that the notifier has been called.
-	 * We never know what module will get patched by a new patch.
+	 * Each module has to know that klp_module_going()
+	 * has been called. We never know what module will
+	 * get patched by a new patch.
 	 */
-	if (action == MODULE_STATE_COMING)
-		mod->klp_alive = true;
-	else /* MODULE_STATE_GOING */
-		mod->klp_alive = false;
+	mod->klp_alive = false;
 
 	list_for_each_entry(patch, &klp_patches, list) {
 		klp_for_each_object(patch, obj) {
 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
 				continue;
 
-			if (action == MODULE_STATE_COMING) {
-				obj->mod = mod;
-				ret = klp_module_notify_coming(patch, obj);
-				if (ret) {
-					obj->mod = NULL;
-					pr_warn("patch '%s' is in an inconsistent state!\n",
-						patch->mod->name);
-				}
-			} else /* MODULE_STATE_GOING */
-				klp_module_notify_going(patch, obj);
+			if (patch->state != KLP_DISABLED) {
+				pr_notice("reverting patch '%s' on unloading module '%s'\n",
+					  patch->mod->name, obj->mod->name);
+				klp_disable_object(obj);
+			}
 
+			klp_free_object_loaded(obj);
 			break;
 		}
 	}
 
 	mutex_unlock(&klp_mutex);
-
-	return 0;
 }
 
-static struct notifier_block klp_module_nb = {
-	.notifier_call = klp_module_notify,
-	.priority = INT_MIN+1, /* called late but before ftrace notifier */
-};
-
 static int __init klp_init(void)
 {
 	int ret;
@@ -973,21 +978,11 @@ static int __init klp_init(void)
 		return -EINVAL;
 	}
 
-	ret = register_module_notifier(&klp_module_nb);
-	if (ret)
-		return ret;
-
 	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
-	if (!klp_root_kobj) {
-		ret = -ENOMEM;
-		goto unregister;
-	}
+	if (!klp_root_kobj)
+		return -ENOMEM;
 
 	return 0;
-
-unregister:
-	unregister_module_notifier(&klp_module_nb);
-	return ret;
 }
 
 module_init(klp_init);
diff --git a/kernel/module.c b/kernel/module.c
index a5e0089..041200c 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -53,6 +53,7 @@
 #include <asm/sections.h>
 #include <linux/tracepoint.h>
 #include <linux/ftrace.h>
+#include <linux/livepatch.h>
 #include <linux/async.h>
 #include <linux/percpu.h>
 #include <linux/kmemleak.h>
@@ -984,6 +985,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
 		mod->exit();
 	blocking_notifier_call_chain(&module_notify_list,
 				     MODULE_STATE_GOING, mod);
+	klp_module_going(mod);
 	ftrace_release_mod(mod);
 
 	async_synchronize_full();
@@ -3258,6 +3260,7 @@ fail:
 	module_put(mod);
 	blocking_notifier_call_chain(&module_notify_list,
 				     MODULE_STATE_GOING, mod);
+	klp_module_going(mod);
 	ftrace_release_mod(mod);
 	free_module(mod);
 	wake_up_all(&module_wq);
@@ -3344,7 +3347,13 @@ out:
 
 static int prepare_coming_module(struct module *mod)
 {
+	int err;
+
 	ftrace_module_enable(mod);
+	err = klp_module_coming(mod);
+	if (err)
+		return err;
+
 	blocking_notifier_call_chain(&module_notify_list,
 				     MODULE_STATE_COMING, mod);
 	return 0;
@@ -3496,6 +3505,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
  coming_cleanup:
 	blocking_notifier_call_chain(&module_notify_list,
 				     MODULE_STATE_GOING, mod);
+	klp_module_going(mod);
 
  bug_cleanup:
 	/* module_bug_cleanup needs module_mutex protection */
-- 
2.4.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ