[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <a5c582babc6088a7da1121a3301ea29a5a58641d.1504128316.git.jbaron@akamai.com>
Date: Wed, 30 Aug 2017 17:38:43 -0400
From: Jason Baron <jbaron@...mai.com>
To: linux-kernel@...r.kernel.org, live-patching@...r.kernel.org
Cc: jpoimboe@...hat.com, jeyu@...nel.org, jikos@...nel.org,
mbenes@...e.cz, pmladek@...e.com
Subject: [PATCH v2 1/3] livepatch: Add dynamic klp_object and klp_func iterators
In preparation to introducing atomic replace, introduce iterators for klp_func
and klp_object, such that objects and functions can be dynmically allocated
(needed for atomic replace). This patch is intended to effectively be a no-op
until atomic replace is introduced.
Signed-off-by: Jason Baron <jbaron@...mai.com>
Cc: Josh Poimboeuf <jpoimboe@...hat.com>
Cc: Jessica Yu <jeyu@...nel.org>
Cc: Jiri Kosina <jikos@...nel.org>
Cc: Miroslav Benes <mbenes@...e.cz>
Cc: Petr Mladek <pmladek@...e.com>
---
include/linux/livepatch.h | 94 +++++++++++++++++++++++++++++++++++++++++++++--
kernel/livepatch/core.c | 4 ++
2 files changed, 94 insertions(+), 4 deletions(-)
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 194991e..8d3df55 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/ftrace.h>
#include <linux/completion.h>
+#include <linux/list.h>
#if IS_ENABLED(CONFIG_LIVEPATCH)
@@ -44,6 +45,7 @@
* @old_addr: the address of the function being patched
* @kobj: kobject for sysfs resources
* @stack_node: list node for klp_ops func_stack list
+ * @func_entry: used to link struct klp_func to struct klp_object
* @old_size: size of the old function
* @new_size: size of the new function
* @patched: the func has been added to the klp_ops list
@@ -82,6 +84,7 @@ struct klp_func {
unsigned long old_addr;
struct kobject kobj;
struct list_head stack_node;
+ struct list_head func_entry;
unsigned long old_size, new_size;
bool patched;
bool transition;
@@ -92,6 +95,8 @@ struct klp_func {
* @name: module name (or NULL for vmlinux)
* @funcs: function entries for functions to be patched in the object
* @kobj: kobject for sysfs resources
+ * @func_list: head of list for dynamically allocate struct klp_func
+ * @obj_entry: used to link struct klp_object to struct klp_patch
* @mod: kernel module associated with the patched object
* (NULL for vmlinux)
* @patched: the object's funcs have been added to the klp_ops list
@@ -103,6 +108,8 @@ struct klp_object {
/* internal */
struct kobject kobj;
+ struct list_head func_list;
+ struct list_head obj_entry;
struct module *mod;
bool patched;
};
@@ -114,6 +121,7 @@ struct klp_object {
* @immediate: patch all funcs immediately, bypassing safety mechanisms
* @list: list node for global list of registered patches
* @kobj: kobject for sysfs resources
+ * @obj_list: head of list for dynamically allocated struct klp_object
* @enabled: the patch is enabled (but operation may be incomplete)
* @finish: for waiting till it is safe to remove the patch module
*/
@@ -126,17 +134,95 @@ struct klp_patch {
/* internal */
struct list_head list;
struct kobject kobj;
+ struct list_head obj_list;
bool enabled;
struct completion finish;
};
+static inline struct klp_object *obj_iter_next(struct klp_patch *patch,
+ struct klp_object *obj)
+{
+ struct klp_object *next_obj = NULL;
+
+ if (list_empty(&obj->obj_entry)) {
+ next_obj = obj + 1;
+ if (next_obj->funcs || next_obj->name)
+ goto out;
+ else
+ next_obj = NULL;
+ if (!list_empty(&patch->obj_list))
+ next_obj = container_of(patch->obj_list.next,
+ struct klp_object,
+ obj_entry);
+ goto out;
+ }
+ if (obj->obj_entry.next != &patch->obj_list)
+ next_obj = container_of(obj->obj_entry.next,
+ struct klp_object,
+ obj_entry);
+out:
+ return next_obj;
+}
+
+static inline struct klp_object *obj_iter_init(struct klp_patch *patch)
+{
+ if (patch->objs->funcs || patch->objs->name)
+ return patch->objs;
+ else
+ return NULL;
+}
+
#define klp_for_each_object(patch, obj) \
- for (obj = patch->objs; obj->funcs || obj->name; obj++)
+ for (obj = obj_iter_init(patch); obj; obj = obj_iter_next(patch, obj))
+
+static inline struct klp_func *func_iter_next(struct klp_object *obj,
+ struct klp_func *func)
+{
+ struct klp_func *next_func = NULL;
+
+ if (list_empty(&func->func_entry)) {
+ next_func = func + 1;
+ if (next_func->old_name || next_func->new_func ||
+ next_func->old_sympos)
+ goto out;
+ else
+ next_func = NULL;
+ if (!list_empty(&obj->func_list))
+ next_func = container_of(obj->func_list.next,
+ struct klp_func,
+ func_entry);
+ goto out;
+ }
+ if (func->func_entry.next != &obj->func_list)
+ next_func = container_of(func->func_entry.next,
+ struct klp_func,
+ func_entry);
+out:
+ return next_func;
+}
+
+static inline struct klp_func *func_iter_init(struct klp_object *obj)
+{
+ /* statically allocated */
+ if (list_empty(&obj->obj_entry)) {
+ if (obj->funcs->old_name || obj->funcs->new_func ||
+ obj->funcs->old_sympos)
+ return obj->funcs;
+ else
+ return NULL;
+ } else {
+ if (!list_empty(obj->func_list.next))
+ return container_of(obj->func_list.next,
+ struct klp_func,
+ func_entry);
+ else
+ return NULL;
+ }
+}
#define klp_for_each_func(obj, func) \
- for (func = obj->funcs; \
- func->old_name || func->new_func || func->old_sympos; \
- func++)
+ for (func = func_iter_init(obj); func; \
+ func = func_iter_next(obj, func))
int klp_register_patch(struct klp_patch *);
int klp_unregister_patch(struct klp_patch *);
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index b9628e4..6004be3 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -606,6 +606,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
return -EINVAL;
INIT_LIST_HEAD(&func->stack_node);
+ INIT_LIST_HEAD(&func->func_entry);
func->patched = false;
func->transition = false;
@@ -729,7 +730,10 @@ static int klp_init_patch(struct klp_patch *patch)
return ret;
}
+ INIT_LIST_HEAD(&patch->obj_list);
klp_for_each_object(patch, obj) {
+ INIT_LIST_HEAD(&obj->obj_entry);
+ INIT_LIST_HEAD(&obj->func_list);
ret = klp_init_object(patch, obj);
if (ret)
goto free;
--
2.6.1
Powered by blists - more mailing lists