lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240920090404.52153-2-zhangwarden@gmail.com>
Date: Fri, 20 Sep 2024 17:04:03 +0800
From: Wardenjohn <zhangwarden@...il.com>
To: jpoimboe@...nel.org,
	mbenes@...e.cz,
	jikos@...nel.org,
	pmladek@...e.com,
	joe.lawrence@...hat.com
Cc: live-patching@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	Wardenjohn <zhangwarden@...il.com>
Subject: [PATCH 1/2] livepatch: introduce 'order' sysfs interface to klp_patch

This feature can provide livepatch patch order information.
With the order of sysfs interface of one klp_patch, we can
use patch order to find out which function of the patch is
now activate.

After the discussion, we decided that patch-level sysfs
interface is the only accaptable way to introduce this
information.

This feature is like:
cat /sys/kernel/livepatch/livepatch_1/order -> 1
means this livepatch_1 module is the 1st klp patch applied.

cat /sys/kernel/livepatch/livepatch_module/order -> N
means this lviepatch_module is the Nth klp patch applied
to the system.

Suggested-by: Petr Mladek <pmladek@...e.com>
Suggested-by: Miroslav Benes <mbenes@...e.cz>
Suggested-by: Josh Poimboeuf <jpoimboe@...nel.org>
Signed-off-by: Wardenjohn <zhangwarden@...il.com>

diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 51a258c24ff5..0fbbc1636ebe 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -154,6 +154,7 @@ struct klp_state {
  * @forced:	was involved in a forced transition
  * @free_work:	patch cleanup from workqueue-context
  * @finish:	for waiting till it is safe to remove the patch module
+ * @order:	the order of this patch applied to the system
  */
 struct klp_patch {
 	/* external */
@@ -170,6 +171,7 @@ struct klp_patch {
 	bool forced;
 	struct work_struct free_work;
 	struct completion finish;
+	int order;
 };
 
 #define klp_for_each_object_static(patch, obj) \
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 3c21c31796db..024853aa43a8 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -347,6 +347,7 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
  * /sys/kernel/livepatch/<patch>/transition
  * /sys/kernel/livepatch/<patch>/force
  * /sys/kernel/livepatch/<patch>/replace
+ * /sys/kernel/livepatch/<patch>/order
  * /sys/kernel/livepatch/<patch>/<object>
  * /sys/kernel/livepatch/<patch>/<object>/patched
  * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
@@ -452,15 +453,26 @@ static ssize_t replace_show(struct kobject *kobj,
 	return sysfs_emit(buf, "%d\n", patch->replace);
 }
 
+static ssize_t order_show(struct kobject *kobj,
+			struct kobj_attribute *attr, char *buf)
+{
+	struct klp_patch *patch;
+
+	patch = container_of(kobj, struct klp_patch, kobj);
+	return sysfs_emit(buf, "%d\n", patch->order);
+}
+
 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
 static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
 static struct kobj_attribute replace_kobj_attr = __ATTR_RO(replace);
+static struct kobj_attribute order_kobj_attr = __ATTR_RO(order);
 static struct attribute *klp_patch_attrs[] = {
 	&enabled_kobj_attr.attr,
 	&transition_kobj_attr.attr,
 	&force_kobj_attr.attr,
 	&replace_kobj_attr.attr,
+	&order_kobj_attr.attr,
 	NULL
 };
 ATTRIBUTE_GROUPS(klp_patch);
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index ba069459c101..73bce68d22f8 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -46,6 +46,15 @@ EXPORT_SYMBOL(klp_sched_try_switch_key);
 
 #endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
 
+static inline int klp_get_patch_order(struct klp_patch *patch)
+{
+	int order = 0;
+
+	klp_for_each_patch(patch)
+		order = order + 1;
+	return order;
+}
+
 /*
  * This work can be performed periodically to finish patching or unpatching any
  * "straggler" tasks which failed to transition in the first attempt.
@@ -591,6 +600,8 @@ void klp_init_transition(struct klp_patch *patch, int state)
 	pr_debug("'%s': initializing %s transition\n", patch->mod->name,
 		 klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching");
 
+	patch->order = klp_get_patch_order(patch);
+
 	/*
 	 * Initialize all tasks to the initial patch state to prepare them for
 	 * switching to the target state.
-- 
2.18.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ