lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <7c6e35df8180650e6d1147476e8b5d9fa8643edb.1598363608.git.christophe.leroy@csgroup.eu>
Date:   Tue, 25 Aug 2020 13:54:03 +0000 (UTC)
From:   Christophe Leroy <christophe.leroy@...roup.eu>
To:     Benjamin Herrenschmidt <benh@...nel.crashing.org>,
        Paul Mackerras <paulus@...ba.org>,
        Michael Ellerman <mpe@...erman.id.au>
Cc:     linux-kernel@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org
Subject: [PATCH v1 5/9] powerpc/vdso: move to _install_special_mapping() and
 remove arch_vma_name()

>From commit 2fea7f6c98f5 ("arm64: vdso: move to
_install_special_mapping and remove arch_vma_name").

Use the new _install_special_mapping() API added by
commit a62c34bd2a8a ("x86, mm: Improve _install_special_mapping
and fix x86 vdso naming") which obsolete install_special_mapping().

And remove arch_vma_name() as the name is handled by the new API.

Signed-off-by: Christophe Leroy <christophe.leroy@...roup.eu>
---
 arch/powerpc/kernel/vdso.c | 59 +++++++++++++++++++-------------------
 1 file changed, 30 insertions(+), 29 deletions(-)

diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index bbb69832fd46..4ccfc0dc96b5 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -47,7 +47,6 @@
 
 static unsigned int vdso32_pages;
 static void *vdso32_kbase;
-static struct page **vdso32_pagelist;
 unsigned long vdso32_sigtramp;
 unsigned long vdso32_rt_sigtramp;
 
@@ -56,7 +55,6 @@ extern char vdso32_start, vdso32_end;
 extern char vdso64_start, vdso64_end;
 static void *vdso64_kbase = &vdso64_start;
 static unsigned int vdso64_pages;
-static struct page **vdso64_pagelist;
 #ifdef CONFIG_PPC64
 unsigned long vdso64_rt_sigtramp;
 #endif /* CONFIG_PPC64 */
@@ -117,6 +115,14 @@ struct lib64_elfinfo
 };
 
 
+static struct vm_special_mapping vdso32_spec __ro_after_init = {
+	.name = "[vdso]",
+};
+
+static struct vm_special_mapping vdso64_spec __ro_after_init = {
+	.name = "[vdso]",
+};
+
 /*
  * This is called from binfmt_elf, we create the special vma for the
  * vDSO and insert it into the mm struct tree
@@ -124,7 +130,8 @@ struct lib64_elfinfo
 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 {
 	struct mm_struct *mm = current->mm;
-	struct page **vdso_pagelist;
+	struct vm_special_mapping *vdso_spec;
+	struct vm_area_struct *vma;
 	unsigned long vdso_pages;
 	unsigned long vdso_base;
 	int rc;
@@ -133,11 +140,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 		return 0;
 
 	if (is_32bit_task()) {
-		vdso_pagelist = vdso32_pagelist;
+		vdso_spec = &vdso32_spec;
 		vdso_pages = vdso32_pages;
 		vdso_base = VDSO32_MBASE;
 	} else {
-		vdso_pagelist = vdso64_pagelist;
+		vdso_spec = &vdso64_spec;
 		vdso_pages = vdso64_pages;
 		/*
 		 * On 64bit we don't have a preferred map address. This
@@ -194,12 +201,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 	 * It's fine to use that for setting breakpoints in the vDSO code
 	 * pages though.
 	 */
-	rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
-				     VM_READ|VM_EXEC|
-				     VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
-				     vdso_pagelist);
-	if (rc) {
+	vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
+				       VM_READ | VM_EXEC | VM_MAYREAD |
+				       VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
+	if (IS_ERR(vma)) {
 		current->mm->context.vdso_base = 0;
+		rc = PTR_ERR(vma);
 		goto fail_mmapsem;
 	}
 
@@ -211,15 +218,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 	return rc;
 }
 
-const char *arch_vma_name(struct vm_area_struct *vma)
-{
-	if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
-		return "[vdso]";
-	return NULL;
-}
-
-
-
 #ifdef CONFIG_VDSO32
 static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname,
 				  unsigned long *size)
@@ -685,6 +683,7 @@ early_initcall(vdso_getcpu_init);
 static int __init vdso_init(void)
 {
 	int i;
+	struct page **pagelist;
 
 #ifdef CONFIG_PPC64
 	/*
@@ -740,27 +739,29 @@ static int __init vdso_init(void)
 
 	if (IS_ENABLED(CONFIG_VDSO32)) {
 		/* Make sure pages are in the correct state */
-		vdso32_pagelist = kcalloc(vdso32_pages + 1, sizeof(struct page *),
-					  GFP_KERNEL);
-		if (!vdso32_pagelist)
+		pagelist = kcalloc(vdso32_pages + 1, sizeof(struct page *), GFP_KERNEL);
+		if (!pagelist)
 			goto alloc_failed;
 
 		for (i = 0; i < vdso32_pages; i++)
-			vdso32_pagelist[i] = virt_to_page(vdso32_kbase + i * PAGE_SIZE);
+			pagelist[i] = virt_to_page(vdso32_kbase + i * PAGE_SIZE);
+
+		pagelist[i++] = virt_to_page(vdso_data);
 
-		vdso32_pagelist[i] = virt_to_page(vdso_data);
+		vdso32_spec.pages = pagelist;
 	}
 
 	if (IS_ENABLED(CONFIG_PPC64)) {
-		vdso64_pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *),
-					  GFP_KERNEL);
-		if (!vdso64_pagelist)
+		pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *), GFP_KERNEL);
+		if (!pagelist)
 			goto alloc_failed;
 
 		for (i = 0; i < vdso64_pages; i++)
-			vdso64_pagelist[i] = virt_to_page(vdso64_kbase + i * PAGE_SIZE);
+			pagelist[i] = virt_to_page(vdso64_kbase + i * PAGE_SIZE);
+
+		pagelist[i++] = virt_to_page(vdso_data);
 
-		vdso64_pagelist[i] = virt_to_page(vdso_data);
+		vdso64_spec.pages = pagelist;
 	}
 
 	smp_wmb();
-- 
2.25.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ