[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180306002538.1761-39-pasha.tatashin@oracle.com>
Date: Mon, 5 Mar 2018 19:25:11 -0500
From: Pavel Tatashin <pasha.tatashin@...cle.com>
To: steven.sistare@...cle.com, daniel.m.jordan@...cle.com,
linux-kernel@...r.kernel.org, Alexander.Levin@...rosoft.com,
dan.j.williams@...el.com, sathyanarayanan.kuppuswamy@...el.com,
pankaj.laxminarayan.bharadiya@...el.com, akuster@...sta.com,
cminyard@...sta.com, pasha.tatashin@...cle.com,
gregkh@...uxfoundation.org, stable@...r.kernel.org
Subject: [PATCH 4.1 38/65] kaiser: cleanups while trying for gold link
From: Hugh Dickins <hughd@...gle.com>
While trying to get our gold link to work, four cleanups:
matched the gdt_page declaration to its definition;
in fiddling unsuccessfully with PERCPU_INPUT(), lined up backslashes;
lined up the backslashes according to convention in percpu-defs.h;
deleted the unused irq_stack_pointer addition to irq_stack_union.
Sad to report that aligning backslashes does not appear to help gold
align to 8192: but while these did not help, they are worth keeping.
Signed-off-by: Hugh Dickins <hughd@...gle.com>
Acked-by: Jiri Kosina <jkosina@...e.cz>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
(cherry picked from commit c52e55a2a82d3a44189810d35717d81cb4cf61d4)
Signed-off-by: Pavel Tatashin <pasha.tatashin@...cle.com>
---
arch/x86/include/asm/desc.h | 2 +-
arch/x86/include/asm/processor.h | 5 -----
include/asm-generic/vmlinux.lds.h | 18 ++++++++----------
include/linux/percpu-defs.h | 22 +++++++++++-----------
4 files changed, 20 insertions(+), 27 deletions(-)
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 4e10d73cf018..880db91d9457 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -43,7 +43,7 @@ struct gdt_page {
struct desc_struct gdt[GDT_ENTRIES];
} __attribute__((aligned(PAGE_SIZE)));
-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
+DECLARE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(struct gdt_page, gdt_page);
static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
{
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 4abbd7d7cfb0..21c7924595e4 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -449,11 +449,6 @@ union irq_stack_union {
char gs_base[40];
unsigned long stack_canary;
};
-
- struct {
- char irq_stack_pointer[64];
- char unused[IRQ_STACK_SIZE - 64];
- };
};
DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 88c02f965f2e..8e265917b0a9 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -715,16 +715,14 @@
*/
#define PERCPU_INPUT(cacheline) \
VMLINUX_SYMBOL(__per_cpu_start) = .; \
- \
- VMLINUX_SYMBOL(__per_cpu_user_mapped_start) = .; \
- *(.data..percpu..first) \
- . = ALIGN(cacheline); \
- *(.data..percpu..user_mapped) \
- *(.data..percpu..user_mapped..shared_aligned) \
- . = ALIGN(PAGE_SIZE); \
- *(.data..percpu..user_mapped..page_aligned) \
- VMLINUX_SYMBOL(__per_cpu_user_mapped_end) = .; \
- \
+ VMLINUX_SYMBOL(__per_cpu_user_mapped_start) = .; \
+ *(.data..percpu..first) \
+ . = ALIGN(cacheline); \
+ *(.data..percpu..user_mapped) \
+ *(.data..percpu..user_mapped..shared_aligned) \
+ . = ALIGN(PAGE_SIZE); \
+ *(.data..percpu..user_mapped..page_aligned) \
+ VMLINUX_SYMBOL(__per_cpu_user_mapped_end) = .; \
. = ALIGN(PAGE_SIZE); \
*(.data..percpu..page_aligned) \
. = ALIGN(cacheline); \
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 141d0de913a9..7cecbdbf5c25 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -121,10 +121,10 @@
#define DEFINE_PER_CPU(type, name) \
DEFINE_PER_CPU_SECTION(type, name, "")
-#define DECLARE_PER_CPU_USER_MAPPED(type, name) \
+#define DECLARE_PER_CPU_USER_MAPPED(type, name) \
DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION)
-#define DEFINE_PER_CPU_USER_MAPPED(type, name) \
+#define DEFINE_PER_CPU_USER_MAPPED(type, name) \
DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION)
/*
@@ -156,11 +156,11 @@
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
____cacheline_aligned_in_smp
-#define DECLARE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(type, name) \
+#define DECLARE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(type, name) \
DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION PER_CPU_SHARED_ALIGNED_SECTION) \
____cacheline_aligned_in_smp
-#define DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(type, name) \
+#define DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(type, name) \
DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION PER_CPU_SHARED_ALIGNED_SECTION) \
____cacheline_aligned_in_smp
@@ -185,18 +185,18 @@
/*
* Declaration/definition used for per-CPU variables that must be page aligned and need to be mapped in user mode.
*/
-#define DECLARE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(type, name) \
- DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION"..page_aligned") \
- __aligned(PAGE_SIZE)
+#define DECLARE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION"..page_aligned") \
+ __aligned(PAGE_SIZE)
-#define DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(type, name) \
- DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION"..page_aligned") \
- __aligned(PAGE_SIZE)
+#define DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION"..page_aligned") \
+ __aligned(PAGE_SIZE)
/*
* Declaration/definition used for per-CPU variables that must be read mostly.
*/
-#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \
+#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \
DECLARE_PER_CPU_SECTION(type, name, "..read_mostly")
#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
--
2.16.2
Powered by blists - more mailing lists