[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170601065843.2392-3-chris@chris-wilson.co.uk>
Date: Thu, 1 Jun 2017 07:58:42 +0100
From: Chris Wilson <chris@...is-wilson.co.uk>
To: linux-kernel@...r.kernel.org
Cc: x86@...nel.org, intel-gfx@...ts.freedesktop.org,
Chris Wilson <chris@...is-wilson.co.uk>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>
Subject: [PATCH 2/3] x86-32: Expand static copy_to_user()
For known compile-time fixed sizes, teach x86-32 copy_to_user() to
convert them to the simpler put_user and inline it similar to the
optimisation applied to copy_from_user() and already used by x86-64.
Signed-off-by: Chris Wilson <chris@...is-wilson.co.uk>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: "H. Peter Anvin" <hpa@...or.com>
---
arch/x86/include/asm/uaccess_32.h | 48 +++++++++++++++++++++++++++++++++++++++
1 file changed, 48 insertions(+)
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 44d17d1ab07c..a02aa9db34ed 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -16,6 +16,54 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
static __always_inline unsigned long __must_check
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
+ if (__builtin_constant_p(n)) {
+ unsigned long ret = 0;
+
+ switch (n) {
+ case 1:
+ __uaccess_begin();
+ __put_user_asm(*(u8 *)from, to, ret,
+ "b", "b", "iq", 1);
+ __uaccess_end();
+ return ret;
+ case 2:
+ __uaccess_begin();
+ __put_user_asm(*(u16 *)from, to, ret,
+ "w", "w", "ir", 2);
+ __uaccess_end();
+ return ret;
+ case 4:
+ __uaccess_begin();
+ __put_user_asm(*(u32 *)from, to, ret,
+ "l", "k", "ir", 4);
+ __uaccess_end();
+ return ret;
+ case 6:
+ __uaccess_begin();
+ __put_user_asm(*(u32 *)from, to, ret,
+ "l", "k", "ir", 4);
+ if (likely(!ret)) {
+ asm("":::"memory");
+ __put_user_asm(*(u16 *)(4 + (char *)from),
+ (u16 __user *)(4 + (char __user *)to),
+ ret, "w", "w", "ir", 2);
+ }
+ __uaccess_end();
+ return ret;
+ case 8:
+ __uaccess_begin();
+ __put_user_asm(*(u32 *)from, to, ret,
+ "l", "k", "ir", 4);
+ if (likely(!ret)) {
+ asm("":::"memory");
+ __put_user_asm(*(u32 *)(4 + (char *)from),
+ (u32 __user *)(4 + (char __user *)to),
+ ret, "l", "k", "ir", 4);
+ }
+ __uaccess_end();
+ return ret;
+ }
+ }
return __copy_user_ll((__force void *)to, from, n);
}
--
2.11.0
Powered by blists - more mailing lists