lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1A02CCC5-8D03-44B0-A927-3BEC671C7744@zytor.com>
Date: Thu, 06 Mar 2025 06:14:27 -0800
From: "H. Peter Anvin" <hpa@...or.com>
To: Uros Bizjak <ubizjak@...il.com>, x86@...nel.org,
        linux-kernel@...r.kernel.org
CC: Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar <mingo@...nel.org>,
        Borislav Petkov <bp@...en8.de>,
        Dave Hansen <dave.hansen@...ux.intel.com>
Subject: Re: [PATCH] x86/kexec: Merge x86_32 and x86_64 code using macros from asm.h

On March 5, 2025 8:26:37 AM PST, Uros Bizjak <ubizjak@...il.com> wrote:
>Merge common x86_32 and x86_64 code in crash_setup_regs()
>using macros from asm/asm.h.
>
>The compiled object files before and after the patch are unchanged.
>
>Signed-off-by: Uros Bizjak <ubizjak@...il.com>
>Cc: Thomas Gleixner <tglx@...utronix.de>
>Cc: Ingo Molnar <mingo@...nel.org>
>Cc: Borislav Petkov <bp@...en8.de>
>Cc: Dave Hansen <dave.hansen@...ux.intel.com>
>Cc: "H. Peter Anvin" <hpa@...or.com>
>---
> arch/x86/include/asm/asm.h   |  2 ++
> arch/x86/include/asm/kexec.h | 44 +++++++++++++++---------------------
> 2 files changed, 20 insertions(+), 26 deletions(-)
>
>diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
>index 975ae7a9397e..2bccc063d30b 100644
>--- a/arch/x86/include/asm/asm.h
>+++ b/arch/x86/include/asm/asm.h
>@@ -41,6 +41,8 @@
> #define _ASM_SUB	__ASM_SIZE(sub)
> #define _ASM_XADD	__ASM_SIZE(xadd)
> #define _ASM_MUL	__ASM_SIZE(mul)
>+#define _ASM_PUSHF	__ASM_SIZE(pushf)
>+#define _ASM_POP	__ASM_SIZE(pop)
> 
> #define _ASM_AX		__ASM_REG(ax)
> #define _ASM_BX		__ASM_REG(bx)
>diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
>index 8ad187462b68..56040ae6bda0 100644
>--- a/arch/x86/include/asm/kexec.h
>+++ b/arch/x86/include/asm/kexec.h
>@@ -18,6 +18,7 @@
> #include <linux/string.h>
> #include <linux/kernel.h>
> 
>+#include <asm/asm.h>
> #include <asm/page.h>
> #include <asm/ptrace.h>
> 
>@@ -71,29 +72,15 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
> 	if (oldregs) {
> 		memcpy(newregs, oldregs, sizeof(*newregs));
> 	} else {
>-#ifdef CONFIG_X86_32
>-		asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
>-		asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
>-		asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
>-		asm volatile("movl %%esi,%0" : "=m"(newregs->si));
>-		asm volatile("movl %%edi,%0" : "=m"(newregs->di));
>-		asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
>-		asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
>-		asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
>-		asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
>-		asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
>-		asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
>-		asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
>-		asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
>-#else
>-		asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
>-		asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
>-		asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
>-		asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
>-		asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
>-		asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
>-		asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
>-		asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
>+		asm volatile(_ASM_MOV " %%" _ASM_BX ",%0" : "=m"(newregs->bx));
>+		asm volatile(_ASM_MOV " %%" _ASM_CX ",%0" : "=m"(newregs->cx));
>+		asm volatile(_ASM_MOV " %%" _ASM_DX ",%0" : "=m"(newregs->dx));
>+		asm volatile(_ASM_MOV " %%" _ASM_SI ",%0" : "=m"(newregs->si));
>+		asm volatile(_ASM_MOV " %%" _ASM_DI ",%0" : "=m"(newregs->di));
>+		asm volatile(_ASM_MOV " %%" _ASM_BP ",%0" : "=m"(newregs->bp));
>+		asm volatile(_ASM_MOV " %%" _ASM_AX ",%0" : "=m"(newregs->ax));
>+		asm volatile(_ASM_MOV " %%" _ASM_SP ",%0" : "=m"(newregs->sp));
>+#ifdef CONFIG_X86_64
> 		asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
> 		asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
> 		asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
>@@ -102,10 +89,15 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
> 		asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
> 		asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
> 		asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
>-		asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
>-		asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
>-		asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
> #endif
>+		asm volatile("movl %%ss,%k0" :"=a"(newregs->ss));
>+		asm volatile("movl %%cs,%k0" :"=a"(newregs->cs));
>+#ifdef CONFIG_X86_32
>+		asm volatile("movl %%ds,%k0" :"=a"(newregs->ds));
>+		asm volatile("movl %%es,%k0" :"=a"(newregs->es));
>+#endif
>+		asm volatile(_ASM_PUSHF "\n\t"
>+			     _ASM_POP " %0" : "=m"(newregs->flags));
> 		newregs->ip = _THIS_IP_;
> 	}
> }

There is no reason to stick a size suffix on if it is unambiguous. Normally pushf/popf never are, since stack operations are promoted automatically.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ