[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <163938901816.23020.10705032850093391722.tip-bot2@tip-bot2>
Date: Mon, 13 Dec 2021 09:50:18 -0000
From: "tip-bot2 for Peter Zijlstra" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: "Peter Zijlstra (Intel)" <peterz@...radead.org>,
Josh Poimboeuf <jpoimboe@...hat.com>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [tip: x86/core] x86/uaccess: Remove .fixup usage
The following commit has been merged into the x86/core branch of tip:
Commit-ID: 99641e094d6ccf547b3eba833aea9a34fdf5681e
Gitweb: https://git.kernel.org/tip/99641e094d6ccf547b3eba833aea9a34fdf5681e
Author: Peter Zijlstra <peterz@...radead.org>
AuthorDate: Wed, 10 Nov 2021 11:01:12 +01:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Sat, 11 Dec 2021 09:09:47 +01:00
x86/uaccess: Remove .fixup usage
For the !CC_AS_ASM_GOTO_OUTPUT (aka. the legacy codepath), remove the
.fixup usage by employing both EX_TYPE_EFAULT_REG and EX_FLAG_CLEAR.
Like was already done for X86_32's version of __get_user_asm_u64() use
the "a" register for output, specifically so we can use CLEAR_AX.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Reviewed-by: Josh Poimboeuf <jpoimboe@...hat.com>
Link: https://lore.kernel.org/r/20211110101325.485154848@infradead.org
---
arch/x86/include/asm/uaccess.h | 39 ++++++++++++++-------------------
1 file changed, 17 insertions(+), 22 deletions(-)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 33a6840..ac6233a 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -351,24 +351,22 @@ do { \
"1: movl %[lowbits],%%eax\n" \
"2: movl %[highbits],%%edx\n" \
"3:\n" \
- ".section .fixup,\"ax\"\n" \
- "4: mov %[efault],%[errout]\n" \
- " xorl %%eax,%%eax\n" \
- " xorl %%edx,%%edx\n" \
- " jmp 3b\n" \
- ".previous\n" \
- _ASM_EXTABLE_UA(1b, 4b) \
- _ASM_EXTABLE_UA(2b, 4b) \
+ _ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG | \
+ EX_FLAG_CLEAR_AX_DX, \
+ %[errout]) \
+ _ASM_EXTABLE_TYPE_REG(2b, 3b, EX_TYPE_EFAULT_REG | \
+ EX_FLAG_CLEAR_AX_DX, \
+ %[errout]) \
: [errout] "=r" (retval), \
[output] "=&A"(x) \
: [lowbits] "m" (__m(__ptr)), \
[highbits] "m" __m(((u32 __user *)(__ptr)) + 1), \
- [efault] "i" (-EFAULT), "0" (retval)); \
+ "0" (retval)); \
})
#else
#define __get_user_asm_u64(x, ptr, retval) \
- __get_user_asm(x, ptr, retval, "q", "=r")
+ __get_user_asm(x, ptr, retval, "q")
#endif
#define __get_user_size(x, ptr, size, retval) \
@@ -379,14 +377,14 @@ do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
- __get_user_asm(x_u8__, ptr, retval, "b", "=q"); \
+ __get_user_asm(x_u8__, ptr, retval, "b"); \
(x) = x_u8__; \
break; \
case 2: \
- __get_user_asm(x, ptr, retval, "w", "=r"); \
+ __get_user_asm(x, ptr, retval, "w"); \
break; \
case 4: \
- __get_user_asm(x, ptr, retval, "l", "=r"); \
+ __get_user_asm(x, ptr, retval, "l"); \
break; \
case 8: \
__get_user_asm_u64(x, ptr, retval); \
@@ -396,20 +394,17 @@ do { \
} \
} while (0)
-#define __get_user_asm(x, addr, err, itype, ltype) \
+#define __get_user_asm(x, addr, err, itype) \
asm volatile("\n" \
"1: mov"itype" %[umem],%[output]\n" \
"2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: mov %[efault],%[errout]\n" \
- " xorl %k[output],%k[output]\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE_UA(1b, 3b) \
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG | \
+ EX_FLAG_CLEAR_AX, \
+ %[errout]) \
: [errout] "=r" (err), \
- [output] ltype(x) \
+ [output] "=a" (x) \
: [umem] "m" (__m(addr)), \
- [efault] "i" (-EFAULT), "0" (err))
+ "0" (err))
#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
Powered by blists - more mailing lists