[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240828022350.71456-3-zhangwarden@gmail.com>
Date: Wed, 28 Aug 2024 10:23:50 +0800
From: Wardenjohn <zhangwarden@...il.com>
To: jpoimboe@...nel.org,
mbenes@...e.cz,
jikos@...nel.org,
pmladek@...e.com,
joe.lawrence@...hat.com
Cc: live-patching@...r.kernel.org,
linux-kernel@...r.kernel.org,
Wardenjohn <zhangwarden@...il.com>
Subject: [PATCH v4 2/2] livepatch: Add using attribute to klp_func for using function show
One system may contains more than one livepatch module. We can see
which patch is enabled. If some patches applied to one system
modifing the same function, livepatch will use the function enabled
on top of the function stack. However, we can not excatly know
which function of which patch is now enabling.
This patch introduce one sysfs attribute of "using" to klp_func.
For example, if there are serval patches make changes to function
"meminfo_proc_show", the attribute "enabled" of all the patch is 1.
With this attribute, we can easily know the version enabling belongs
to which patch.
The "using" is set as three state. 0 is disabled, it means that this
version of function is not used. 1 is running, it means that this
version of function is now running. -1 is unknown, it means that
this version of function is under transition, some task is still
chaning their running version of this function.
cat /sys/kernel/livepatch/<patch1>/<object1>/<function1,sympos>/using -> 0
means that the function1 of patch1 is disabled.
cat /sys/kernel/livepatch/<patchN>/<object1>/<function1,sympos>/using -> 1
means that the function1 of patchN is enabled.
cat /sys/kernel/livepatch/<patchN>/<object1>/<function1,sympos>/using -> -1
means that the function1 of patchN is under transition and unknown.
Signed-off-by: Wardenjohn <zhangwarden@...il.com>
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index d874aecc817b..5a6bacebd66f 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -57,6 +57,7 @@ struct klp_ops {
* @nop: temporary patch to use the original code again; dyn. allocated
* @patched: the func has been added to the klp_ops list
* @transition: the func is currently being applied or reverted
+ * @using: the func is on top of the function stack that is using
*
* The patched and transition variables define the func's patching state. When
* patching, a func is always in one of the following states:
@@ -72,6 +73,12 @@ struct klp_ops {
* patched=1 transition=1: patched, may be visible to some tasks
* patched=0 transition=1: unpatched, temporary ending state
* patched=0 transition=0: unpatched
+ *
+ * 'using' flag is used to show if this function is now using
+ *
+ * using=-1 (unknown): the function is now under transition
+ * using=1 (using): the function is now running
+ * using=0 (not used): the function is not used
*/
struct klp_func {
/* external */
@@ -96,6 +103,7 @@ struct klp_func {
bool nop;
bool patched;
bool transition;
+ int using;
};
struct klp_object;
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index e4572bf34316..bc1b2085e3c5 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -349,6 +349,7 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
* /sys/kernel/livepatch/<patch>/<object>
* /sys/kernel/livepatch/<patch>/<object>/patched
* /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
+ * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>/using
*/
static int __klp_disable_patch(struct klp_patch *patch);
@@ -470,6 +471,22 @@ static struct attribute *klp_object_attrs[] = {
};
ATTRIBUTE_GROUPS(klp_object);
+static ssize_t using_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct klp_func *func;
+
+ func = container_of(kobj, struct klp_func, kobj);
+ return sysfs_emit(buf, "%d\n", func->using);
+}
+
+static struct kobj_attribute using_kobj_attr = __ATTR_RO(using);
+static struct attribute *klp_func_attrs[] = {
+ &using_kobj_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(klp_func);
+
static void klp_free_object_dynamic(struct klp_object *obj)
{
kfree(obj->name);
@@ -631,6 +648,7 @@ static void klp_kobj_release_func(struct kobject *kobj)
static const struct kobj_type klp_ktype_func = {
.release = klp_kobj_release_func,
.sysfs_ops = &kobj_sysfs_ops,
+ .default_groups = klp_func_groups,
};
static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
@@ -775,6 +793,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
INIT_LIST_HEAD(&func->stack_node);
func->patched = false;
func->transition = false;
+ func->using = 0;
/* The format for the sysfs directory is <function,sympos> where sympos
* is the nth occurrence of this symbol in kallsyms for the patched
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index 8ab9c35570f4..5138cedfcfaa 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -134,6 +134,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
static void klp_unpatch_func(struct klp_func *func)
{
struct klp_ops *ops;
+ struct klp_func *stack_top_func;
if (WARN_ON(!func->patched))
return;
@@ -160,6 +161,10 @@ static void klp_unpatch_func(struct klp_func *func)
kfree(ops);
} else {
list_del_rcu(&func->stack_node);
+ // the previous function is deleted, the stack top is under transition
+ stack_top_func = list_first_entry(&ops->func_stack, struct klp_func,
+ stack_node);
+ stack_top_func->using = -1;
}
func->patched = false;
@@ -168,6 +173,7 @@ static void klp_unpatch_func(struct klp_func *func)
static int klp_patch_func(struct klp_func *func)
{
struct klp_ops *ops;
+ struct klp_func *stack_top_func;
int ret;
if (WARN_ON(!func->old_func))
@@ -219,10 +225,16 @@ static int klp_patch_func(struct klp_func *func)
func->ops = ops;
} else {
+ // stack_top_func is going to be in transition
+ stack_top_func = list_first_entry(&ops->func_stack, struct klp_func,
+ stack_node);
+ stack_top_func->using = -1;
+ // The new patched function is the one enabling
list_add_rcu(&func->stack_node, &ops->func_stack);
}
func->patched = true;
+ func->using = -1;
return 0;
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index d9a3f9c7a93b..365dac635efe 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -90,8 +90,9 @@ static void klp_synchronize_transition(void)
static void klp_complete_transition(void)
{
struct klp_object *obj;
- struct klp_func *func;
+ struct klp_func *func, *next_func, *stack_top_func;
struct task_struct *g, *task;
+ struct klp_ops *ops;
unsigned int cpu;
pr_debug("'%s': completing %s transition\n",
@@ -119,9 +120,39 @@ static void klp_complete_transition(void)
klp_synchronize_transition();
}
- klp_for_each_object(klp_transition_patch, obj)
- klp_for_each_func(obj, func)
- func->transition = false;
+ /*
+ * The transition patch is finished. The stack top function is now truly
+ * running. The previous function should be set as 0 as none task is
+ * using this function anymore.
+ *
+ * If this is a patching patch, all function is using.
+ * if this patch is unpatching, all function of the func stack top is using
+ */
+ if (klp_target_state == KLP_TRANSITION_PATCHED) {
+ klp_for_each_object(klp_transition_patch, obj) {
+ klp_for_each_func(obj, func) {
+ func->using = 1;
+ func->transition = false;
+ next_func = list_entry_rcu(func->stack_node.next,
+ struct klp_func, stack_node);
+ if (&func->stack_node != &func->ops->func_stack)
+ next_func->using = 0;
+ }
+ }
+ } else {
+ // for the unpatch func, if ops exist, the top of this func is using
+ klp_for_each_object(klp_transition_patch, obj) {
+ klp_for_each_func(obj, func) {
+ func->transition = false;
+ ops = func->ops;
+ if (ops) {
+ stack_top_func = list_first_entry(&ops->func_stack,
+ struct klp_func, stack_node);
+ stack_top_func->using = 1;
+ }
+ }
+ }
+ }
/* Prevent klp_ftrace_handler() from seeing KLP_TRANSITION_IDLE state */
if (klp_target_state == KLP_TRANSITION_PATCHED)
--
2.18.2
Powered by blists - more mailing lists