lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20191026011741.xywerjv62vdmz6sp@treble>
Date:   Fri, 25 Oct 2019 20:17:41 -0500
From:   Josh Poimboeuf <jpoimboe@...hat.com>
To:     Peter Zijlstra <peterz@...radead.org>
Cc:     Petr Mladek <pmladek@...e.com>, x86@...nel.org,
        linux-kernel@...r.kernel.org, rostedt@...dmis.org,
        mhiramat@...nel.org, bristot@...hat.com, jbaron@...mai.com,
        torvalds@...ux-foundation.org, tglx@...utronix.de,
        mingo@...nel.org, namit@...are.com, hpa@...or.com, luto@...nel.org,
        ard.biesheuvel@...aro.org, jeyu@...nel.org,
        live-patching@...r.kernel.org, Mark Rutland <mark.rutland@....com>
Subject: Re: [PATCH v4 15/16] module: Move where we mark modules RO,X

On Fri, Oct 25, 2019 at 12:06:12PM +0200, Peter Zijlstra wrote:
> On Fri, Oct 25, 2019 at 10:43:00AM +0200, Peter Zijlstra wrote:
> 
> > But none of that explains why apply_alternatives() is also delayed.
> > 
> > So I'm very tempted to just revert that patchset for doing it all
> > wrong.
> 
> And I've done just that. This includes Josh's validation patch, the
> revert and my klp_appy_relocations_add() patches with the removal of
> module_disable_ro().
> 
> Josh, can you test or give me clue on how to test? I need to run a few
> errands today, but I'll try and have a poke either tonight or tomorrow.
> 
>   git://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git x86/rwx

I looked at this today.  A few potential tweaks:

- The new klp_apply_relocate_add() interface isn't needed.  Instead
  apply_relocate_add() can use the module state to decide whether to
  text_poke().

- For robustness I think we need to apply vmlinux-specific klp
  relocations at the same time as normal relocations.

Rough untested changes below.  I still need to finish changing
kpatch-build and then I'll need to do a LOT of testing.

I can take over the livepatch-specific patches if you want.  Or however
you want to do it.

diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 7fc519b9b4e0..6a70213854f0 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -451,14 +451,11 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
 		       unsigned int symindex, unsigned int relsec,
 		       struct module *me)
 {
-	return __apply_relocate_add(sechdrs, strtab, symindex, relsec, me, memwrite);
-}
+	int ret;
+	bool early = me->state != MODULE_STATE_LIVE;
 
-int klp_apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
-		       unsigned int symindex, unsigned int relsec,
-		       struct module *me)
-{
-	return __apply_relocate_add(sechdrs, strtab, symindex, relsec, me, s390_kernel_write);
+	return __apply_relocate_add(sechdrs, strtab, symindex, relsec, me,
+				    early ? memwrite : s390_kernel_write);
 }
 
 int module_finalize(const Elf_Ehdr *hdr,
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 5eee618a98c5..30174798ff79 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -222,20 +222,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 		   unsigned int symindex,
 		   unsigned int relsec,
 		   struct module *me)
-{
-	return __apply_relocate_add(sechdrs, strtab, symindex, relsec, me, memcpy);
-}
-
-int klp_apply_relocate_add(Elf64_Shdr *sechdrs,
-		   const char *strtab,
-		   unsigned int symindex,
-		   unsigned int relsec,
-		   struct module *me)
 {
 	int ret;
+	bool early = me->state != MODULE_STATE_LIVE;
+
+	ret = __apply_relocate_add(sechdrs, strtab, symindex, relsec, me,
+				   early ? memcpy : text_poke);
 
-	ret = __apply_relocate_add(sechdrs, strtab, symindex, relsec, me, text_poke);
-	if (!ret)
+	if (!ret && !early)
 		text_poke_sync();
 
 	return ret;
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index cc18f945bdb2..b00170696db2 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -214,12 +214,7 @@ void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
 void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor);
 void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
 
-
-extern int klp_apply_relocate_add(Elf64_Shdr *sechdrs,
-			      const char *strtab,
-			      unsigned int symindex,
-			      unsigned int relsec,
-			      struct module *me);
+int klp_write_relocations(struct module *mod, struct klp_object *obj);
 
 #else /* !CONFIG_LIVEPATCH */
 
@@ -229,6 +224,12 @@ static inline bool klp_patch_pending(struct task_struct *task) { return false; }
 static inline void klp_update_patch_state(struct task_struct *task) {}
 static inline void klp_copy_process(struct task_struct *child) {}
 
+static inline int klp_write_relocations(struct module *mod,
+					struct klp_object *obj)
+{
+	return 0;
+}
+
 #endif /* CONFIG_LIVEPATCH */
 
 #endif /* _LINUX_LIVEPATCH_H_ */
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 30395302a273..52eb91d0ee8d 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -256,27 +256,60 @@ static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
 	return 0;
 }
 
-int __weak klp_apply_relocate_add(Elf64_Shdr *sechdrs,
-			      const char *strtab,
-			      unsigned int symindex,
-			      unsigned int relsec,
-			      struct module *me)
-{
-	return apply_relocate_add(sechdrs, strtab, symindex, relsec, me);
-}
-
-static int klp_write_object_relocations(struct module *pmod,
-					struct klp_object *obj)
+/*
+ * This function is called for both vmlinux-specific and module-specific klp
+ * relocation sections:
+ *
+ * 1) When the klp module itself loads, the module code calls this function
+ *    to write vmlinux-specific klp relocations.  These relocations allow the
+ *    patched code/data to reference unexported vmlinux symbols.  They're
+ *    written as early as possible to ensure that other module init code
+ *    (.e.g., jump_label_apply_nops) can access any non-exported vmlinux
+ *    symbols which might be referenced by the klp module's special sections.
+ *
+ * 2) When a to-be-patched module loads (or is already loaded when the
+ *    klp module loads), klp code calls this function to write klp relocations
+ *    which are specific to the module.  These relocations allow the patched
+ *    code/data to reference module symbols, both unexported and exported.
+ *    They also enable late module patching, which means the to-be-patched
+ *    module may be loaded *after* the klp module.
+ *
+ *    The following restrictions apply to module-specific relocation sections:
+ *
+ *    a) References to vmlinux symbols are not allowed.  Otherwise there might
+ *       be module init ordering issues, and crashes might occur in some of the
+ *       other kernel patching components like paravirt patching or jump
+ *       labels.  All references to vmlinux symbols should use either normal
+ *       relas (for exported symbols) or vmlinux-specific klp relas (for
+ *       unexported symbols).  This restriction is enforced in
+ *       klp_resolve_symbols().
+ *
+ *    b) Relocations to special sections like __jump_table and .altinstructions
+ *       aren't allowed.  In other words, there should never be a
+ *       .klp.rela.{module}.__jump_table section.  This will definitely cause
+ *       initialization ordering issues, as such special sections are processed
+ *       during the loading of the klp module itself, *not* the to-be-patched
+ *       module.  This means that e.g., it's not currently possible to patch a
+ *       module function which uses a static key jump label, if you want to
+ *       have the replacement function also use the same static key.  In this
+ *       case, a non-static interface like static_key_enabled() can be used in
+ *       the new function instead.
+ *
+ *       On the other hand, a .klp.rela.vmlinux.__jump_table section is fine,
+ *       as it can be resolved early enough during the load of the klp module,
+ *       as described above.
+ */
+int klp_write_relocations(struct module *pmod, struct klp_object *obj)
 {
 	int i, cnt, ret = 0;
 	const char *objname, *secname;
 	char sec_objname[MODULE_NAME_LEN];
 	Elf_Shdr *sec;
 
-	if (WARN_ON(!klp_is_object_loaded(obj)))
+	if (WARN_ON(obj && !klp_is_object_loaded(obj)))
 		return -EINVAL;
 
-	objname = klp_is_module(obj) ? obj->name : "vmlinux";
+	objname = obj ? obj->name : "vmlinux";
 
 	/* For each klp relocation section */
 	for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
@@ -305,7 +338,7 @@ static int klp_write_object_relocations(struct module *pmod,
 		if (ret)
 			break;
 
-		ret = klp_apply_relocate_add(pmod->klp_info->sechdrs,
+		ret = apply_relocate_add(pmod->klp_info->sechdrs,
 					 pmod->core_kallsyms.strtab,
 					 pmod->klp_info->symndx, i, pmod);
 		if (ret)
@@ -733,16 +766,25 @@ static int klp_init_object_loaded(struct klp_patch *patch,
 	struct klp_func *func;
 	int ret;
 
-	mutex_lock(&text_mutex);
+	if (klp_is_module(obj)) {
+
+		/*
+		 * Only write module-specific relocations here.
+		 * vmlinux-specific relocations were already written during the
+		 * loading of the klp module.
+		 */
+
+		mutex_lock(&text_mutex);
+
+		ret = klp_write_relocations(patch->mod, obj);
+		if (ret) {
+			mutex_unlock(&text_mutex);
+			return ret;
+		}
 
-	ret = klp_write_object_relocations(patch->mod, obj);
-	if (ret) {
 		mutex_unlock(&text_mutex);
-		return ret;
 	}
 
-	mutex_unlock(&text_mutex);
-
 	klp_for_each_func(obj, func) {
 		ret = klp_find_object_symbol(obj->name, func->old_name,
 					     func->old_sympos,
diff --git a/kernel/module.c b/kernel/module.c
index fe5bd382759c..ff4347385f05 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2327,11 +2327,9 @@ static int apply_relocations(struct module *mod, const struct load_info *info)
 		if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
 			continue;
 
-		/* Livepatch relocation sections are applied by livepatch */
 		if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
-			continue;
-
-		if (info->sechdrs[i].sh_type == SHT_REL)
+			err = klp_write_relocations(mod, NULL);
+		else if (info->sechdrs[i].sh_type == SHT_REL)
 			err = apply_relocate(info->sechdrs, info->strtab,
 					     info->index.sym, i, mod);
 		else if (info->sechdrs[i].sh_type == SHT_RELA)
@@ -3812,18 +3810,24 @@ static int load_module(struct load_info *info, const char __user *uargs,
 	/* Set up MODINFO_ATTR fields */
 	setup_modinfo(mod, info);
 
+	if (is_livepatch_module(mod)) {
+		err = copy_module_elf(mod, info);
+		if (err < 0)
+			goto free_modinfo;
+	}
+
 	/* Fix up syms, so that st_value is a pointer to location. */
 	err = simplify_symbols(mod, info);
 	if (err < 0)
-		goto free_modinfo;
+		goto free_elf_copy;
 
 	err = apply_relocations(mod, info);
 	if (err < 0)
-		goto free_modinfo;
+		goto free_elf_copy;
 
 	err = post_relocation(mod, info);
 	if (err < 0)
-		goto free_modinfo;
+		goto free_elf_copy;
 
 	flush_module_icache(mod);
 
@@ -3866,12 +3870,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
 	if (err < 0)
 		goto coming_cleanup;
 
-	if (is_livepatch_module(mod)) {
-		err = copy_module_elf(mod, info);
-		if (err < 0)
-			goto sysfs_cleanup;
-	}
-
 	/* Get rid of temporary copy. */
 	free_copy(info);
 
@@ -3880,8 +3878,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
 
 	return do_init_module(mod);
 
- sysfs_cleanup:
-	mod_sysfs_teardown(mod);
  coming_cleanup:
 	mod->state = MODULE_STATE_GOING;
 	destroy_params(mod->kp, mod->num_kp);
@@ -3901,6 +3897,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
 	kfree(mod->args);
  free_arch_cleanup:
 	module_arch_cleanup(mod);
+ free_elf_copy:
+	free_module_elf(mod);
  free_modinfo:
 	free_modinfo(mod);
  free_unload:

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ