lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZvKiPvID1K0dAHnq@pathway.suse.cz>
Date: Tue, 24 Sep 2024 13:27:58 +0200
From: Petr Mladek <pmladek@...e.com>
To: Wardenjohn <zhangwarden@...il.com>
Cc: jpoimboe@...nel.org, mbenes@...e.cz, jikos@...nel.org,
	joe.lawrence@...hat.com, live-patching@...r.kernel.org,
	linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/2] livepatch: introduce 'order' sysfs interface to
 klp_patch

On Fri 2024-09-20 17:04:03, Wardenjohn wrote:
> This feature can provide livepatch patch order information.
> With the order of sysfs interface of one klp_patch, we can
> use patch order to find out which function of the patch is
> now activate.
> 
> After the discussion, we decided that patch-level sysfs
> interface is the only accaptable way to introduce this
> information.
> 
> This feature is like:
> cat /sys/kernel/livepatch/livepatch_1/order -> 1
> means this livepatch_1 module is the 1st klp patch applied.
> 
> cat /sys/kernel/livepatch/livepatch_module/order -> N
> means this lviepatch_module is the Nth klp patch applied
> to the system.
>
> --- a/kernel/livepatch/transition.c
> +++ b/kernel/livepatch/transition.c
> @@ -46,6 +46,15 @@ EXPORT_SYMBOL(klp_sched_try_switch_key);
>  
>  #endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
>  
> +static inline int klp_get_patch_order(struct klp_patch *patch)
> +{
> +	int order = 0;
> +
> +	klp_for_each_patch(patch)
> +		order = order + 1;
> +	return order;
> +}

This does not work well. It uses the order on the stack when
the livepatch is being loaded. It is not updated when any livepatch gets
removed. It might create wrong values.

I have even tried to reproduce this:

	# modprobe livepatch-sample
	# modprobe livepatch-shadow-fix1
	# cat /sys/kernel/livepatch/livepatch_sample/order
	1
	# cat /sys/kernel/livepatch/livepatch_shadow_fix1/order
	2

	# echo 0 >/sys/kernel/livepatch/livepatch_sample/enabled
	# rmmod livepatch_sample
	# cat /sys/kernel/livepatch/livepatch_shadow_fix1/order
	2

	# modprobe livepatch-sample
	# cat /sys/kernel/livepatch/livepatch_shadow_fix1/order
	2
	# cat /sys/kernel/livepatch/livepatch_sample/order
	2

BANG: The livepatches have the same order.

I suggest to replace this with a global load counter. Something like:

diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 51a258c24ff5..44a8887573bb 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -150,10 +150,12 @@ struct klp_state {
  * @list:	list node for global list of actively used patches
  * @kobj:	kobject for sysfs resources
  * @obj_list:	dynamic list of the object entries
+ * @load_counter sequence counter in which the patch is loaded
  * @enabled:	the patch is enabled (but operation may be incomplete)
  * @forced:	was involved in a forced transition
  * @free_work:	patch cleanup from workqueue-context
  * @finish:	for waiting till it is safe to remove the patch module
+ * @order:	the order of this patch applied to the system
  */
 struct klp_patch {
 	/* external */
@@ -166,6 +168,7 @@ struct klp_patch {
 	struct list_head list;
 	struct kobject kobj;
 	struct list_head obj_list;
+	int load_counter;
 	bool enabled;
 	bool forced;
 	struct work_struct free_work;
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 3c21c31796db..3a858477ae02 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -44,6 +44,9 @@ DEFINE_MUTEX(klp_mutex);
  */
 LIST_HEAD(klp_patches);
 
+/* The counter is incremented everytime a new livepatch is being loaded. */
+static int klp_load_counter;
+
 static struct kobject *klp_root_kobj;
 
 static bool klp_is_module(struct klp_object *obj)
@@ -347,6 +350,7 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
  * /sys/kernel/livepatch/<patch>/transition
  * /sys/kernel/livepatch/<patch>/force
  * /sys/kernel/livepatch/<patch>/replace
+ * /sys/kernel/livepatch/<patch>/load_counter
  * /sys/kernel/livepatch/<patch>/<object>
  * /sys/kernel/livepatch/<patch>/<object>/patched
  * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
@@ -452,15 +456,26 @@ static ssize_t replace_show(struct kobject *kobj,
 	return sysfs_emit(buf, "%d\n", patch->replace);
 }
 
+static ssize_t load_counter_show(struct kobject *kobj,
+			struct kobj_attribute *attr, char *buf)
+{
+	struct klp_patch *patch;
+
+	patch = container_of(kobj, struct klp_patch, kobj);
+	return sysfs_emit(buf, "%d\n", patch->load_counter);
+}
+
 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
 static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
 static struct kobj_attribute replace_kobj_attr = __ATTR_RO(replace);
+static struct kobj_attribute load_counter_kobj_attr = __ATTR_RO(load_counter);
 static struct attribute *klp_patch_attrs[] = {
 	&enabled_kobj_attr.attr,
 	&transition_kobj_attr.attr,
 	&force_kobj_attr.attr,
 	&replace_kobj_attr.attr,
+	&load_counter_kobj_attr.attr,
 	NULL
 };
 ATTRIBUTE_GROUPS(klp_patch);
@@ -934,6 +949,7 @@ static void klp_init_patch_early(struct klp_patch *patch)
 	INIT_LIST_HEAD(&patch->list);
 	INIT_LIST_HEAD(&patch->obj_list);
 	kobject_init(&patch->kobj, &klp_ktype_patch);
+	patch->load_counter = klp_load_counter + 1;
 	patch->enabled = false;
 	patch->forced = false;
 	INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
@@ -1050,6 +1066,7 @@ static int __klp_enable_patch(struct klp_patch *patch)
 	}
 
 	klp_start_transition();
+	klp_load_counter++;
 	patch->enabled = true;
 	klp_try_complete_transition();
 
-- 
2.46.1

Any better (shorter) name would be appreciated ;-)

Best Regards,
Petr

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ