[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170421141305.25180-6-jslaby@suse.cz>
Date: Fri, 21 Apr 2017 16:12:42 +0200
From: Jiri Slaby <jslaby@...e.cz>
To: mingo@...hat.com
Cc: tglx@...utronix.de, hpa@...or.com, x86@...nel.org,
jpoimboe@...hat.com, linux-kernel@...r.kernel.org,
Jiri Slaby <jslaby@...e.cz>
Subject: [PATCH v3 06/29] x86: assembly, annotate functions by ENTRY, not GLOBAL
GLOBAL is meant for global symbols, but not functions. Use ENTRY which
is dedicated for global functions:
* it aligns the functions properly
* it is expected to have a corresponding ENDPROC too.
startup_64, which uses .globl explicitly, is converted too.
Besides that, x86's custom GLOBAL macro is going to die very soon.
Signed-off-by: Jiri Slaby <jslaby@...e.cz>
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: <x86@...nel.org>
---
arch/x86/boot/copy.S | 8 ++++----
arch/x86/boot/pmjump.S | 4 ++--
arch/x86/entry/entry_64_compat.S | 3 +--
arch/x86/kernel/ftrace_64.S | 2 +-
arch/x86/kernel/head_64.S | 5 +++--
5 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/arch/x86/boot/copy.S b/arch/x86/boot/copy.S
index 15d9f74b0008..030a7bde51da 100644
--- a/arch/x86/boot/copy.S
+++ b/arch/x86/boot/copy.S
@@ -17,7 +17,7 @@
.code16
.text
-GLOBAL(memcpy)
+ENTRY(memcpy)
pushw %si
pushw %di
movw %ax, %di
@@ -33,7 +33,7 @@ GLOBAL(memcpy)
retl
ENDPROC(memcpy)
-GLOBAL(memset)
+ENTRY(memset)
pushw %di
movw %ax, %di
movzbl %dl, %eax
@@ -48,7 +48,7 @@ GLOBAL(memset)
retl
ENDPROC(memset)
-GLOBAL(copy_from_fs)
+ENTRY(copy_from_fs)
pushw %ds
pushw %fs
popw %ds
@@ -57,7 +57,7 @@ GLOBAL(copy_from_fs)
retl
ENDPROC(copy_from_fs)
-GLOBAL(copy_to_fs)
+ENTRY(copy_to_fs)
pushw %es
pushw %fs
popw %es
diff --git a/arch/x86/boot/pmjump.S b/arch/x86/boot/pmjump.S
index 3e0edc6d2a20..6528f78a79b5 100644
--- a/arch/x86/boot/pmjump.S
+++ b/arch/x86/boot/pmjump.S
@@ -23,7 +23,7 @@
/*
* void protected_mode_jump(u32 entrypoint, u32 bootparams);
*/
-GLOBAL(protected_mode_jump)
+ENTRY(protected_mode_jump)
movl %edx, %esi # Pointer to boot_params table
xorl %ebx, %ebx
@@ -48,7 +48,7 @@ ENDPROC(protected_mode_jump)
.code32
.section ".text32","ax"
-GLOBAL(in_pm32)
+ENTRY(in_pm32)
# Set up data segments for flat 32-bit mode
movl %ecx, %ds
movl %ecx, %es
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 14dc2f259e2f..ba6af2fba6a7 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -342,8 +342,7 @@ ENTRY(entry_INT80_compat)
jmp restore_regs_and_iret
ENDPROC(entry_INT80_compat)
- ALIGN
-GLOBAL(stub32_clone)
+ENTRY(stub32_clone)
/*
* The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
* The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index a915729c0246..76c774a5e792 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -314,7 +314,7 @@ ENTRY(ftrace_graph_caller)
retq
ENDPROC(ftrace_graph_caller)
-GLOBAL(return_to_handler)
+ENTRY(return_to_handler)
subq $24, %rsp
/* Save the return values */
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index ac9d327d2e42..e1da48448cc0 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -46,8 +46,7 @@ L3_START_KERNEL = pud_index(__START_KERNEL_map)
.text
__HEAD
.code64
- .globl startup_64
-startup_64:
+ENTRY(startup_64)
/*
* At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
* and someone has loaded an identity mapped page table
@@ -168,6 +167,8 @@ startup_64:
.Lskip_fixup:
movq $(early_level4_pgt - __START_KERNEL_map), %rax
jmp 1f
+ENDPROC(startup_64)
+
ENTRY(secondary_startup_64)
/*
* At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
--
2.12.2
Powered by blists - more mailing lists