lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200131125403.654652162@infradead.org>
Date:   Fri, 31 Jan 2020 13:45:35 +0100
From:   Peter Zijlstra <peterz@...radead.org>
To:     Geert Uytterhoeven <geert@...ux-m68k.org>
Cc:     linux-m68k@...ts.linux-m68k.org, linux-kernel@...r.kernel.org,
        Will Deacon <will@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Michael Schmitz <schmitzmic@...il.com>,
        Greg Ungerer <gerg@...ux-m68k.org>
Subject: [PATCH -v2 04/10] m68k,mm: Move the pointer table allocator to motorola.c

Only the Motorola MMU makes use of this allocator, it is a waste of
.text to include it for Sun3/ColdFire. Also, this is going to avoid
build issues when we're going to make it more Motorola specific.

Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
 arch/m68k/mm/memory.c   |  102 ------------------------------------------------
 arch/m68k/mm/motorola.c |  102 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 102 insertions(+), 102 deletions(-)

--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -22,108 +22,6 @@
 #include <asm/machdep.h>
 
 
-/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
-   struct page instead of separately kmalloced struct.  Stolen from
-   arch/sparc/mm/srmmu.c ... */
-
-typedef struct list_head ptable_desc;
-static LIST_HEAD(ptable_list);
-
-#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
-#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
-#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
-
-#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
-
-void __init init_pointer_table(unsigned long ptable)
-{
-	ptable_desc *dp;
-	unsigned long page = ptable & PAGE_MASK;
-	unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
-
-	dp = PD_PTABLE(page);
-	if (!(PD_MARKBITS(dp) & mask)) {
-		PD_MARKBITS(dp) = 0xff;
-		list_add(dp, &ptable_list);
-	}
-
-	PD_MARKBITS(dp) &= ~mask;
-	pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
-
-	/* unreserve the page so it's possible to free that page */
-	__ClearPageReserved(PD_PAGE(dp));
-	init_page_count(PD_PAGE(dp));
-
-	return;
-}
-
-pmd_t *get_pointer_table (void)
-{
-	ptable_desc *dp = ptable_list.next;
-	unsigned char mask = PD_MARKBITS (dp);
-	unsigned char tmp;
-	unsigned int off;
-
-	/*
-	 * For a pointer table for a user process address space, a
-	 * table is taken from a page allocated for the purpose.  Each
-	 * page can hold 8 pointer tables.  The page is remapped in
-	 * virtual address space to be noncacheable.
-	 */
-	if (mask == 0) {
-		void *page;
-		ptable_desc *new;
-
-		if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
-			return NULL;
-
-		mmu_page_ctor(page);
-
-		new = PD_PTABLE(page);
-		PD_MARKBITS(new) = 0xfe;
-		list_add_tail(new, dp);
-
-		return (pmd_t *)page;
-	}
-
-	for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE)
-		;
-	PD_MARKBITS(dp) = mask & ~tmp;
-	if (!PD_MARKBITS(dp)) {
-		/* move to end of list */
-		list_move_tail(dp, &ptable_list);
-	}
-	return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
-}
-
-int free_pointer_table (pmd_t *ptable)
-{
-	ptable_desc *dp;
-	unsigned long page = (unsigned long)ptable & PAGE_MASK;
-	unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);
-
-	dp = PD_PTABLE(page);
-	if (PD_MARKBITS (dp) & mask)
-		panic ("table already free!");
-
-	PD_MARKBITS (dp) |= mask;
-
-	if (PD_MARKBITS(dp) == 0xff) {
-		/* all tables in page are free, free page */
-		list_del(dp);
-		mmu_page_dtor((void *)page);
-		free_page (page);
-		return 1;
-	} else if (ptable_list.next != dp) {
-		/*
-		 * move this descriptor to the front of the list, since
-		 * it has one or more free tables.
-		 */
-		list_move(dp, &ptable_list);
-	}
-	return 0;
-}
-
 /* invalidate page in both caches */
 static inline void clear040(unsigned long paddr)
 {
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -67,6 +67,108 @@ void mmu_page_dtor(void *page)
 	cache_page(page);
 }
 
+/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
+   struct page instead of separately kmalloced struct.  Stolen from
+   arch/sparc/mm/srmmu.c ... */
+
+typedef struct list_head ptable_desc;
+static LIST_HEAD(ptable_list);
+
+#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
+#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
+#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
+
+#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
+
+void __init init_pointer_table(unsigned long ptable)
+{
+	ptable_desc *dp;
+	unsigned long page = ptable & PAGE_MASK;
+	unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
+
+	dp = PD_PTABLE(page);
+	if (!(PD_MARKBITS(dp) & mask)) {
+		PD_MARKBITS(dp) = 0xff;
+		list_add(dp, &ptable_list);
+	}
+
+	PD_MARKBITS(dp) &= ~mask;
+	pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
+
+	/* unreserve the page so it's possible to free that page */
+	__ClearPageReserved(PD_PAGE(dp));
+	init_page_count(PD_PAGE(dp));
+
+	return;
+}
+
+pmd_t *get_pointer_table (void)
+{
+	ptable_desc *dp = ptable_list.next;
+	unsigned char mask = PD_MARKBITS (dp);
+	unsigned char tmp;
+	unsigned int off;
+
+	/*
+	 * For a pointer table for a user process address space, a
+	 * table is taken from a page allocated for the purpose.  Each
+	 * page can hold 8 pointer tables.  The page is remapped in
+	 * virtual address space to be noncacheable.
+	 */
+	if (mask == 0) {
+		void *page;
+		ptable_desc *new;
+
+		if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
+			return NULL;
+
+		mmu_page_ctor(page);
+
+		new = PD_PTABLE(page);
+		PD_MARKBITS(new) = 0xfe;
+		list_add_tail(new, dp);
+
+		return (pmd_t *)page;
+	}
+
+	for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE)
+		;
+	PD_MARKBITS(dp) = mask & ~tmp;
+	if (!PD_MARKBITS(dp)) {
+		/* move to end of list */
+		list_move_tail(dp, &ptable_list);
+	}
+	return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
+}
+
+int free_pointer_table (pmd_t *ptable)
+{
+	ptable_desc *dp;
+	unsigned long page = (unsigned long)ptable & PAGE_MASK;
+	unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);
+
+	dp = PD_PTABLE(page);
+	if (PD_MARKBITS (dp) & mask)
+		panic ("table already free!");
+
+	PD_MARKBITS (dp) |= mask;
+
+	if (PD_MARKBITS(dp) == 0xff) {
+		/* all tables in page are free, free page */
+		list_del(dp);
+		mmu_page_dtor((void *)page);
+		free_page (page);
+		return 1;
+	} else if (ptable_list.next != dp) {
+		/*
+		 * move this descriptor to the front of the list, since
+		 * it has one or more free tables.
+		 */
+		list_move(dp, &ptable_list);
+	}
+	return 0;
+}
+
 /* size of memory already mapped in head.S */
 extern __initdata unsigned long m68k_init_mapped_size;
 


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ