[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20170601065843.2392-2-chris@chris-wilson.co.uk>
Date: Thu, 1 Jun 2017 07:58:41 +0100
From: Chris Wilson <chris@...is-wilson.co.uk>
To: linux-kernel@...r.kernel.org
Cc: x86@...nel.org, intel-gfx@...ts.freedesktop.org,
Chris Wilson <chris@...is-wilson.co.uk>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>
Subject: [PATCH 1/3] x86-32: Teach copy_from_user to unroll .size=6/8
Two exception handling register moves are faster to inline than a call
to __copy_user_ll(). We already apply the conversion for a get_user()
call, so for symmetry we should also apply the optimisation to
copy_from_user.
Signed-off-by: Chris Wilson <chris@...is-wilson.co.uk>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: "H. Peter Anvin" <hpa@...or.com>
---
arch/x86/include/asm/uaccess_32.h | 25 +++++++++++++++++++++----
1 file changed, 21 insertions(+), 4 deletions(-)
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index aeda9bb8af50..44d17d1ab07c 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -23,30 +23,47 @@ static __always_inline unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
- unsigned long ret;
+ unsigned long ret = 0;
switch (n) {
case 1:
- ret = 0;
__uaccess_begin();
__get_user_asm_nozero(*(u8 *)to, from, ret,
"b", "b", "=q", 1);
__uaccess_end();
return ret;
case 2:
- ret = 0;
__uaccess_begin();
__get_user_asm_nozero(*(u16 *)to, from, ret,
"w", "w", "=r", 2);
__uaccess_end();
return ret;
case 4:
- ret = 0;
__uaccess_begin();
__get_user_asm_nozero(*(u32 *)to, from, ret,
"l", "k", "=r", 4);
__uaccess_end();
return ret;
+ case 6:
+ __uaccess_begin();
+ __get_user_asm_nozero(*(u32 *)to, from, ret,
+ "l", "k", "=r", 6);
+ if (likely(!ret))
+ __get_user_asm_nozero(*(u16 *)(4 + (char *)to),
+ (u16 __user *)(4 + (char __user *)from),
+ ret, "w", "w", "=r", 2);
+ __uaccess_end();
+ return ret;
+ case 8:
+ __uaccess_begin();
+ __get_user_asm_nozero(*(u32 *)to, from, ret,
+ "l", "k", "=r", 8);
+ if (likely(!ret))
+ __get_user_asm_nozero(*(u32 *)(4 + (char *)to),
+ (u32 __user *)(4 + (char __user *)from),
+ ret, "l", "k", "=r", 4);
+ __uaccess_end();
+ return ret;
}
}
return __copy_user_ll(to, (__force const void *)from, n);
--
2.11.0
Powered by blists - more mailing lists