[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170421141305.25180-25-jslaby@suse.cz>
Date: Fri, 21 Apr 2017 16:13:01 +0200
From: Jiri Slaby <jslaby@...e.cz>
To: mingo@...hat.com
Cc: tglx@...utronix.de, hpa@...or.com, x86@...nel.org,
jpoimboe@...hat.com, linux-kernel@...r.kernel.org,
Jiri Slaby <jslaby@...e.cz>,
Matt Fleming <matt@...eblueprint.co.uk>,
Ard Biesheuvel <ard.biesheuvel@...aro.org>,
linux-efi@...r.kernel.org, xen-devel@...ts.xenproject.org
Subject: [PATCH v3 25/29] x86: assembly, make some functions local
There is couple of assembly functions, which are invoked only locally in
a file they are defined. In C, we mark them "static". Annotate them here
using SYM_FUNC_START_LOCAL (and switch their ENDPROC to SYM_FUNC_END
too).
Signed-off-by: Jiri Slaby <jslaby@...e.cz>
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: x86@...nel.org
Cc: Matt Fleming <matt@...eblueprint.co.uk>
Cc: Ard Biesheuvel <ard.biesheuvel@...aro.org>
Cc: linux-efi@...r.kernel.org
Cc: xen-devel@...ts.xenproject.org
---
arch/x86/boot/compressed/efi_thunk_64.S | 8 ++++----
arch/x86/boot/pmjump.S | 4 ++--
arch/x86/entry/entry_64.S | 25 +++++++++++++------------
arch/x86/lib/copy_page_64.S | 4 ++--
arch/x86/lib/memcpy_64.S | 12 ++++++------
arch/x86/lib/memset_64.S | 8 ++++----
arch/x86/platform/efi/efi_thunk_64.S | 12 ++++++------
arch/x86/xen/xen-pvh.S | 4 ++--
8 files changed, 39 insertions(+), 38 deletions(-)
diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
index 86528f120962..c072711d8d62 100644
--- a/arch/x86/boot/compressed/efi_thunk_64.S
+++ b/arch/x86/boot/compressed/efi_thunk_64.S
@@ -98,12 +98,12 @@ ENTRY(efi64_thunk)
ret
ENDPROC(efi64_thunk)
-ENTRY(efi_exit32)
+SYM_FUNC_START_LOCAL(efi_exit32)
movq func_rt_ptr(%rip), %rax
push %rax
mov %rdi, %rax
ret
-ENDPROC(efi_exit32)
+SYM_FUNC_END(efi_exit32)
.code32
/*
@@ -111,7 +111,7 @@ ENDPROC(efi_exit32)
*
* The stack should represent the 32-bit calling convention.
*/
-ENTRY(efi_enter32)
+SYM_FUNC_START_LOCAL(efi_enter32)
movl $__KERNEL_DS, %eax
movl %eax, %ds
movl %eax, %es
@@ -171,7 +171,7 @@ ENTRY(efi_enter32)
btsl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0
lret
-ENDPROC(efi_enter32)
+SYM_FUNC_END(efi_enter32)
.data
.balign 8
diff --git a/arch/x86/boot/pmjump.S b/arch/x86/boot/pmjump.S
index 6528f78a79b5..da86f4df8ffb 100644
--- a/arch/x86/boot/pmjump.S
+++ b/arch/x86/boot/pmjump.S
@@ -48,7 +48,7 @@ ENDPROC(protected_mode_jump)
.code32
.section ".text32","ax"
-ENTRY(in_pm32)
+SYM_FUNC_START_LOCAL(in_pm32)
# Set up data segments for flat 32-bit mode
movl %ecx, %ds
movl %ecx, %es
@@ -74,4 +74,4 @@ ENTRY(in_pm32)
lldt %cx
jmpl *%eax # Jump to the 32-bit entrypoint
-ENDPROC(in_pm32)
+SYM_FUNC_END(in_pm32)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 213127a44c7c..ab71baad00fb 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -323,7 +323,7 @@ opportunistic_sysret_failed:
jmp restore_c_regs_and_iret
ENDPROC(entry_SYSCALL_64)
-ENTRY(stub_ptregs_64)
+SYM_FUNC_START_LOCAL(stub_ptregs_64)
/*
* Syscalls marked as needing ptregs land here.
* If we are on the fast path, we need to save the extra regs,
@@ -347,7 +347,7 @@ ENTRY(stub_ptregs_64)
1:
jmp *%rax /* Called from C */
-ENDPROC(stub_ptregs_64)
+SYM_FUNC_END(stub_ptregs_64)
.macro ptregs_stub func
ENTRY(ptregs_\func)
@@ -918,7 +918,8 @@ idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
* existing activation in its critical region -- if so, we pop the current
* activation and restart the handler using the previous one.
*/
-ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
+/* do_hypervisor_callback(struct *pt_regs) */
+SYM_FUNC_START_LOCAL(xen_do_hypervisor_callback)
/*
* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
@@ -936,7 +937,7 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
call xen_maybe_preempt_hcall
#endif
jmp error_exit
-ENDPROC(xen_do_hypervisor_callback)
+SYM_FUNC_END(xen_do_hypervisor_callback)
/*
* Hypervisor uses this for application faults while it executes.
@@ -1020,7 +1021,7 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vec
* Use slow, but surefire "are we in kernel?" check.
* Return: ebx=0: need swapgs on exit, ebx=1: otherwise
*/
-ENTRY(paranoid_entry)
+SYM_FUNC_START_LOCAL(paranoid_entry)
cld
SAVE_C_REGS 8
SAVE_EXTRA_REGS 8
@@ -1033,7 +1034,7 @@ ENTRY(paranoid_entry)
SWAPGS
xorl %ebx, %ebx
1: ret
-ENDPROC(paranoid_entry)
+SYM_FUNC_END(paranoid_entry)
/*
* "Paranoid" exit path from exception stack. This is invoked
@@ -1047,7 +1048,7 @@ ENDPROC(paranoid_entry)
*
* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
*/
-ENTRY(paranoid_exit)
+SYM_FUNC_START_LOCAL(paranoid_exit)
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF_DEBUG
testl %ebx, %ebx /* swapgs needed? */
@@ -1062,13 +1063,13 @@ paranoid_exit_restore:
RESTORE_C_REGS
REMOVE_PT_GPREGS_FROM_STACK 8
INTERRUPT_RETURN
-ENDPROC(paranoid_exit)
+SYM_FUNC_END(paranoid_exit)
/*
* Save all registers in pt_regs, and switch gs if needed.
* Return: EBX=0: came from user mode; EBX=1: otherwise
*/
-ENTRY(error_entry)
+SYM_FUNC_START_LOCAL(error_entry)
cld
SAVE_C_REGS 8
SAVE_EXTRA_REGS 8
@@ -1144,7 +1145,7 @@ ENTRY(error_entry)
mov %rax, %rsp
decl %ebx
jmp .Lerror_entry_from_usermode_after_swapgs
-ENDPROC(error_entry)
+SYM_FUNC_END(error_entry)
/*
@@ -1152,13 +1153,13 @@ ENDPROC(error_entry)
* 1: already in kernel mode, don't need SWAPGS
* 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
*/
-ENTRY(error_exit)
+SYM_FUNC_START_LOCAL(error_exit)
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
testl %ebx, %ebx
jnz retint_kernel
jmp retint_user
-ENDPROC(error_exit)
+SYM_FUNC_END(error_exit)
/* Runs on exception stack */
ENTRY(nmi)
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index e8508156c99d..e1ee50bc161a 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -20,7 +20,7 @@ ENTRY(copy_page)
ENDPROC(copy_page)
EXPORT_SYMBOL(copy_page)
-ENTRY(copy_page_regs)
+SYM_FUNC_START_LOCAL(copy_page_regs)
subq $2*8, %rsp
movq %rbx, (%rsp)
movq %r12, 1*8(%rsp)
@@ -85,4 +85,4 @@ ENTRY(copy_page_regs)
movq 1*8(%rsp), %r12
addq $2*8, %rsp
ret
-ENDPROC(copy_page_regs)
+SYM_FUNC_END(copy_page_regs)
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 4911b1c61aa8..728703c47d58 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -27,7 +27,7 @@
* rax original destination
*/
SYM_FUNC_START_ALIAS(__memcpy)
-ENTRY(memcpy)
+SYM_FUNC_START_LOCAL(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
"jmp memcpy_erms", X86_FEATURE_ERMS
@@ -39,7 +39,7 @@ ENTRY(memcpy)
movl %edx, %ecx
rep movsb
ret
-ENDPROC(memcpy)
+SYM_FUNC_END(memcpy)
SYM_FUNC_END_ALIAS(__memcpy)
EXPORT_SYMBOL(memcpy)
EXPORT_SYMBOL(__memcpy)
@@ -48,14 +48,14 @@ EXPORT_SYMBOL(__memcpy)
* memcpy_erms() - enhanced fast string memcpy. This is faster and
* simpler than memcpy. Use memcpy_erms when possible.
*/
-ENTRY(memcpy_erms)
+SYM_FUNC_START_LOCAL(memcpy_erms)
movq %rdi, %rax
movq %rdx, %rcx
rep movsb
ret
-ENDPROC(memcpy_erms)
+SYM_FUNC_END(memcpy_erms)
-ENTRY(memcpy_orig)
+SYM_FUNC_START_LOCAL(memcpy_orig)
movq %rdi, %rax
cmpq $0x20, %rdx
@@ -180,7 +180,7 @@ ENTRY(memcpy_orig)
.Lend:
retq
-ENDPROC(memcpy_orig)
+SYM_FUNC_END(memcpy_orig)
#ifndef CONFIG_UML
/*
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 0d3a1d341e60..c63ae9987612 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -58,16 +58,16 @@ EXPORT_SYMBOL(__memset)
*
* rax original destination
*/
-ENTRY(memset_erms)
+SYM_FUNC_START_LOCAL(memset_erms)
movq %rdi,%r9
movb %sil,%al
movq %rdx,%rcx
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset_erms)
+SYM_FUNC_END(memset_erms)
-ENTRY(memset_orig)
+SYM_FUNC_START_LOCAL(memset_orig)
movq %rdi,%r10
/* expand byte value */
@@ -138,4 +138,4 @@ ENTRY(memset_orig)
subq %r8,%rdx
jmp .Lafter_bad_alignment
.Lfinal:
-ENDPROC(memset_orig)
+SYM_FUNC_END(memset_orig)
diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S
index ff85d28c50f2..d18697df1fe9 100644
--- a/arch/x86/platform/efi/efi_thunk_64.S
+++ b/arch/x86/platform/efi/efi_thunk_64.S
@@ -66,7 +66,7 @@ ENDPROC(efi64_thunk)
*
* This function must be invoked with a 1:1 mapped stack.
*/
-ENTRY(__efi64_thunk)
+SYM_FUNC_START_LOCAL(__efi64_thunk)
movl %ds, %eax
push %rax
movl %es, %eax
@@ -113,14 +113,14 @@ ENTRY(__efi64_thunk)
or %rcx, %rax
1:
ret
-ENDPROC(__efi64_thunk)
+SYM_FUNC_END(__efi64_thunk)
-ENTRY(efi_exit32)
+SYM_FUNC_START_LOCAL(efi_exit32)
movq func_rt_ptr(%rip), %rax
push %rax
mov %rdi, %rax
ret
-ENDPROC(efi_exit32)
+SYM_FUNC_END(efi_exit32)
.code32
/*
@@ -128,7 +128,7 @@ ENDPROC(efi_exit32)
*
* The stack should represent the 32-bit calling convention.
*/
-ENTRY(efi_enter32)
+SYM_FUNC_START_LOCAL(efi_enter32)
movl $__KERNEL_DS, %eax
movl %eax, %ds
movl %eax, %es
@@ -144,7 +144,7 @@ ENTRY(efi_enter32)
pushl %eax
lret
-ENDPROC(efi_enter32)
+SYM_FUNC_END(efi_enter32)
.data
.balign 8
diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S
index fa5ba8565646..469102d9c812 100644
--- a/arch/x86/xen/xen-pvh.S
+++ b/arch/x86/xen/xen-pvh.S
@@ -54,7 +54,7 @@
* charge of setting up it's own stack, GDT and IDT.
*/
-ENTRY(pvh_start_xen)
+SYM_FUNC_START_LOCAL(pvh_start_xen)
cld
lgdt (_pa(gdt))
@@ -133,7 +133,7 @@ ENTRY(pvh_start_xen)
ljmp $__BOOT_CS, $_pa(startup_32)
#endif
-ENDPROC(pvh_start_xen)
+SYM_FUNC_END(pvh_start_xen)
.section ".init.data","aw"
.balign 8
--
2.12.2
Powered by blists - more mailing lists