[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240805064656.40017-2-zhangyongde.zyd@alibaba-inc.com>
Date: Mon, 5 Aug 2024 14:46:56 +0800
From: "zhangyongde.zyd" <zhangwarden@...il.com>
To: jpoimboe@...nel.org,
mbenes@...e.cz,
jikos@...nel.org,
pmladek@...e.com,
joe.lawrence@...hat.com
Cc: live-patching@...r.kernel.org,
linux-kernel@...r.kernel.org,
Wardenjohn <zhangwarden@...il.com>
Subject: [PATCH v2 1/1] livepatch: Add using attribute to klp_func for using function show
From: Wardenjohn <zhangwarden@...il.com>
One system may contains more than one livepatch module. We can see
which patch is enabled. If some patches applied to one system
modifing the same function, livepatch will use the function enabled
on top of the function stack. However, we can not excatly know
which function of which patch is now enabling.
This patch introduce one sysfs attribute of "using" to klp_func.
For example, if there are serval patches make changes to function
"meminfo_proc_show", the attribute "enabled" of all the patch is 1.
With this attribute, we can easily know the version enabling belongs
to which patch.
The "using" is set as three state. 0 is disabled, it means that this
version of function is not used. 1 is running, it means that this
version of function is now running. -1 is unknown, it means that
this version of function is under transition, some task is still
chaning their running version of this function.
cat /sys/kernel/livepatch/<patch1>/<object1>/<function1,sympos>/using -> 0
means that the function1 of patch1 is disabled.
cat /sys/kernel/livepatch/<patchN>/<object1>/<function1,sympos>/using -> 1
means that the function1 of patchN is enabled.
cat /sys/kernel/livepatch/<patchN>/<object1>/<function1,sympos>/using -> -1
means that the function1 of patchN is under transition and unknown.
Signed-off-by: Wardenjohn <zhangwarden@...il.com>
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 51a258c24ff5..fd8224969c5c 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -37,6 +37,7 @@
* @nop: temporary patch to use the original code again; dyn. allocated
* @patched: the func has been added to the klp_ops list
* @transition: the func is currently being applied or reverted
+ * @using: the func is on top of the function stack that is using
*
* The patched and transition variables define the func's patching state. When
* patching, a func is always in one of the following states:
@@ -52,6 +53,12 @@
* patched=1 transition=1: patched, may be visible to some tasks
* patched=0 transition=1: unpatched, temporary ending state
* patched=0 transition=0: unpatched
+ *
+ * 'using' flag is used to show if this function is now using
+ *
+ * using=-1 (unknown): the function is now under transition
+ * using=1 (using): the function is now running
+ * using=0 (not used): the function is not used
*/
struct klp_func {
/* external */
@@ -75,6 +82,7 @@ struct klp_func {
bool nop;
bool patched;
bool transition;
+ int using;
};
struct klp_object;
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 52426665eecc..67630f9f1a21 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -349,6 +349,7 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
* /sys/kernel/livepatch/<patch>/<object>
* /sys/kernel/livepatch/<patch>/<object>/patched
* /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
+ * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>/using
*/
static int __klp_disable_patch(struct klp_patch *patch);
@@ -470,6 +471,22 @@ static struct attribute *klp_object_attrs[] = {
};
ATTRIBUTE_GROUPS(klp_object);
+static ssize_t using_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct klp_func *func;
+
+ func = container_of(kobj, struct klp_func, kobj);
+ return sysfs_emit(buf, "%d\n", func->using);
+}
+
+static struct kobj_attribute using_kobj_attr = __ATTR_RO(using);
+static struct attribute *klp_func_attrs[] = {
+ &using_kobj_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(klp_func);
+
static void klp_free_object_dynamic(struct klp_object *obj)
{
kfree(obj->name);
@@ -631,6 +648,7 @@ static void klp_kobj_release_func(struct kobject *kobj)
static const struct kobj_type klp_ktype_func = {
.release = klp_kobj_release_func,
.sysfs_ops = &kobj_sysfs_ops,
+ .default_groups = klp_func_groups,
};
static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
@@ -773,6 +791,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
INIT_LIST_HEAD(&func->stack_node);
func->patched = false;
func->transition = false;
+ func->using = 0;
/* The format for the sysfs directory is <function,sympos> where sympos
* is the nth occurrence of this symbol in kallsyms for the patched
@@ -903,6 +922,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
static void klp_init_func_early(struct klp_object *obj,
struct klp_func *func)
{
+ func->using = false;
kobject_init(&func->kobj, &klp_ktype_func);
list_add_tail(&func->node, &obj->func_list);
}
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index 90408500e5a3..bf4a8edbd888 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -104,7 +104,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
* original function.
*/
func = list_entry_rcu(func->stack_node.next,
- struct klp_func, stack_node);
+ struct klp_func, stack_node);
if (&func->stack_node == &ops->func_stack)
goto unlock;
@@ -127,6 +127,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
static void klp_unpatch_func(struct klp_func *func)
{
struct klp_ops *ops;
+ struct klp_func *stack_top_func;
if (WARN_ON(!func->patched))
return;
@@ -152,6 +153,10 @@ static void klp_unpatch_func(struct klp_func *func)
kfree(ops);
} else {
list_del_rcu(&func->stack_node);
+ // the previous function is deleted, the stack top is under transition
+ stack_top_func = list_first_entry(&ops->func_stack, struct klp_func,
+ stack_node);
+ stack_top_func->using = -1;
}
func->patched = false;
@@ -160,6 +165,7 @@ static void klp_unpatch_func(struct klp_func *func)
static int klp_patch_func(struct klp_func *func)
{
struct klp_ops *ops;
+ struct klp_func *stack_top_func;
int ret;
if (WARN_ON(!func->old_func))
@@ -170,6 +176,7 @@ static int klp_patch_func(struct klp_func *func)
ops = klp_find_ops(func->old_func);
if (!ops) {
+ // this function is the first time to be patched
unsigned long ftrace_loc;
ftrace_loc = ftrace_location((unsigned long)func->old_func);
@@ -211,11 +218,16 @@ static int klp_patch_func(struct klp_func *func)
goto err;
}
-
} else {
+ // stack_top_func is going to be in transition
+ stack_top_func = list_first_entry(&ops->func_stack, struct klp_func,
+ stack_node);
+ stack_top_func->using = -1;
+ // The new patched function is the one enabling
list_add_rcu(&func->stack_node, &ops->func_stack);
- }
+ }
+ func->using = -1;
func->patched = true;
return 0;
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index ba069459c101..12241dabce6f 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -90,8 +90,9 @@ static void klp_synchronize_transition(void)
static void klp_complete_transition(void)
{
struct klp_object *obj;
- struct klp_func *func;
+ struct klp_func *func, *next_func, *stack_top_func;
struct task_struct *g, *task;
+ struct klp_ops *ops;
unsigned int cpu;
pr_debug("'%s': completing %s transition\n",
@@ -119,9 +120,35 @@ static void klp_complete_transition(void)
klp_synchronize_transition();
}
- klp_for_each_object(klp_transition_patch, obj)
- klp_for_each_func(obj, func)
- func->transition = false;
+ /*
+ * The transition patch is finished. The stack top function is now truly
+ * running. The previous function should be set as 0 as none task is
+ * using this function anymore.
+ *
+ * If this is a patching patch, all function is using.
+ * if this patch is unpatching, all function of the func stack top is using
+ */
+ if (klp_target_state == KLP_TRANSITION_PATCHED)
+ klp_for_each_object(klp_transition_patch, obj)
+ klp_for_each_func(obj, func){
+ func->using = 1;
+ func->transition = false;
+ next_func = list_entry_rcu(func->stack_node.next,
+ struct klp_func, stack_node);
+ next_func->using = 0;
+ }
+ else
+ // for the unpatch func, if ops exist, the top of this func is using
+ klp_for_each_object(klp_transition_patch, obj)
+ klp_for_each_func(obj, func){
+ func->transition = false;
+ ops = klp_find_ops(func->old_func);
+ if (ops){
+ stack_top_func = list_first_entry(&ops->func_stack, struct klp_func,
+ stack_node);
+ stack_top_func->using = 1;
+ }
+ }
/* Prevent klp_ftrace_handler() from seeing KLP_TRANSITION_IDLE state */
if (klp_target_state == KLP_TRANSITION_PATCHED)
@@ -538,6 +565,7 @@ void klp_start_transition(void)
klp_transition_patch->mod->name,
klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching");
+
/*
* Mark all normal tasks as needing a patch state update. They'll
* switch either in klp_try_complete_transition() or as they exit the
@@ -633,6 +661,9 @@ void klp_init_transition(struct klp_patch *patch, int state)
*
* When unpatching, the funcs are already in the func_stack and so are
* already visible to the ftrace handler.
+ *
+ * When this patch is in transition, all functions of this patch will
+ * set to be unknown
*/
klp_for_each_object(patch, obj)
klp_for_each_func(obj, func)
--
2.18.2
Powered by blists - more mailing lists