[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220804152656.8840-2-knscarlet@gnuweeb.org>
Date: Thu, 4 Aug 2022 15:26:55 +0000
From: Kanna Scarlet <knscarlet@...weeb.org>
To: Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
"H. Peter Anvin" <hpa@...or.com>, x86@...nel.org
Cc: Kanna Scarlet <knscarlet@...weeb.org>,
Ard Biesheuvel <ardb@...nel.org>,
Bill Metzenthen <billm@...bpc.org.au>,
Brijesh Singh <brijesh.singh@....com>,
Joerg Roedel <jroedel@...e.de>,
Josh Poimboeuf <jpoimboe@...nel.org>,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
Mark Rutland <mark.rutland@....com>,
Michael Roth <michael.roth@....com>,
Peter Zijlstra <peterz@...radead.org>,
Sean Christopherson <seanjc@...gle.com>,
Steven Rostedt <rostedt@...dmis.org>,
Ammar Faizi <ammarfaizi2@...weeb.org>,
GNU/Weeb Mailing List <gwml@...r.gnuweeb.org>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>
Subject: [PATCH 1/1] x86: Change mov $0, %reg with xor %reg, %reg
Change mov $0, %reg with xor %reg, %reg because xor %reg, %reg is
smaller so it is good to save space
asm:
ba 00 00 00 00 movl $0x0,%edx
31 d2 xorl %edx,%edx
Suggested-by: Ammar Faizi <ammarfaizi2@...weeb.org>
Signed-off-by: Kanna Scarlet <knscarlet@...weeb.org>
---
arch/x86/boot/compressed/head_64.S | 2 +-
arch/x86/boot/compressed/mem_encrypt.S | 2 +-
arch/x86/kernel/ftrace_32.S | 4 ++--
arch/x86/kernel/head_64.S | 2 +-
arch/x86/math-emu/div_Xsig.S | 2 +-
arch/x86/math-emu/reg_u_sub.S | 2 +-
6 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index d33f060900d2..39442e7f5993 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -666,7 +666,7 @@ SYM_CODE_START(trampoline_32bit_src)
movl %cr4, %eax
andl $X86_CR4_MCE, %eax
#else
- movl $0, %eax
+ xorl %eax, %eax
#endif
/* Enable PAE and LA57 (if required) paging modes */
diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
index a73e4d783cae..d1e4d3aa8395 100644
--- a/arch/x86/boot/compressed/mem_encrypt.S
+++ b/arch/x86/boot/compressed/mem_encrypt.S
@@ -111,7 +111,7 @@ SYM_CODE_START(startup32_vc_handler)
cmpl $0x72, 16(%esp)
jne .Lfail
- movl $0, %eax # Request CPUID[fn].EAX
+ xorl %eax, %eax # Request CPUID[fn].EAX
movl %ebx, %edx # CPUID fn
call sev_es_req_cpuid # Call helper
testl %eax, %eax # Check return code
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index a0ed0e4a2c0c..cff7decb58be 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -171,7 +171,7 @@ SYM_CODE_START(ftrace_graph_caller)
movl 3*4(%esp), %eax
/* Even with frame pointers, fentry doesn't have one here */
lea 4*4(%esp), %edx
- movl $0, %ecx
+ xorl %ecx, %ecx
subl $MCOUNT_INSN_SIZE, %eax
call prepare_ftrace_return
popl %edx
@@ -184,7 +184,7 @@ SYM_CODE_END(ftrace_graph_caller)
return_to_handler:
pushl %eax
pushl %edx
- movl $0, %eax
+ xorl %eax, %eax
call ftrace_return_to_handler
movl %eax, %ecx
popl %edx
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index d860d437631b..eeb06047e30a 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -184,7 +184,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
movq %cr4, %rcx
andl $X86_CR4_MCE, %ecx
#else
- movl $0, %ecx
+ xorl %ecx, %ecx
#endif
/* Enable PAE mode, PGE and LA57 */
diff --git a/arch/x86/math-emu/div_Xsig.S b/arch/x86/math-emu/div_Xsig.S
index 8c270ab415be..5767b4d23954 100644
--- a/arch/x86/math-emu/div_Xsig.S
+++ b/arch/x86/math-emu/div_Xsig.S
@@ -122,7 +122,7 @@ SYM_FUNC_START(div_Xsig)
movl XsigLL(%esi),%eax
rcrl %eax
movl %eax,FPU_accum_1
- movl $0,%eax
+ xorl %eax,%eax
rcrl %eax
movl %eax,FPU_accum_0
diff --git a/arch/x86/math-emu/reg_u_sub.S b/arch/x86/math-emu/reg_u_sub.S
index 4c900c29e4ff..130b49fa1ca2 100644
--- a/arch/x86/math-emu/reg_u_sub.S
+++ b/arch/x86/math-emu/reg_u_sub.S
@@ -212,7 +212,7 @@ L_must_be_zero:
L_shift_32:
movl %ebx,%eax
movl %edx,%ebx
- movl $0,%edx
+ xorl %edx,%edx
subw $32,EXP(%edi) /* Can get underflow here */
/* We need to shift left by 1 - 31 bits */
--
Kanna Scarlet
Powered by blists - more mailing lists