[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170601065843.2392-4-chris@chris-wilson.co.uk>
Date: Thu, 1 Jun 2017 07:58:43 +0100
From: Chris Wilson <chris@...is-wilson.co.uk>
To: linux-kernel@...r.kernel.org
Cc: x86@...nel.org, intel-gfx@...ts.freedesktop.org,
Chris Wilson <chris@...is-wilson.co.uk>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>
Subject: [PATCH 3/3] x86-64: Inline 6/12 byte copy_user
Extend the list of replacements for compile-time known sizes to include
6/12 byte copies. These expand to two movs (along with their exception
table) and are cheaper to inline than the function call (similar to the
10 byte copy already handled).
Signed-off-by: Chris Wilson <chris@...is-wilson.co.uk>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: "H. Peter Anvin" <hpa@...or.com>
---
arch/x86/include/asm/uaccess_64.h | 42 +++++++++++++++++++++++++++++++++++++++
1 file changed, 42 insertions(+)
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index c5504b9a472e..ff2d65baa988 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -71,6 +71,16 @@ raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
ret, "l", "k", "=r", 4);
__uaccess_end();
return ret;
+ case 6:
+ __uaccess_begin();
+ __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src,
+ ret, "l", "k", "=r", 6);
+ if (likely(!ret))
+ __get_user_asm_nozero(*(u16 *)(4 + (char *)dst),
+ (u16 __user *)(4 + (char __user *)src),
+ ret, "w", "w", "=r", 2);
+ __uaccess_end();
+ return ret;
case 8:
__uaccess_begin();
__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
@@ -87,6 +97,16 @@ raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
ret, "w", "w", "=r", 2);
__uaccess_end();
return ret;
+ case 12:
+ __uaccess_begin();
+ __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
+ ret, "q", "", "=r", 10);
+ if (likely(!ret))
+ __get_user_asm_nozero(*(u32 *)(8 + (char *)dst),
+ (u32 __user *)(8 + (char __user *)src),
+ ret, "l", "k", "=r", 4);
+ __uaccess_end();
+ return ret;
case 16:
__uaccess_begin();
__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
@@ -128,6 +148,17 @@ raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
ret, "l", "k", "ir", 4);
__uaccess_end();
return ret;
+ case 6:
+ __uaccess_begin();
+ __put_user_asm(*(u32 *)src, (u32 __user *)dst,
+ ret, "l", "k", "ir", 6);
+ if (likely(!ret)) {
+ asm("":::"memory");
+ __put_user_asm(2[(u16 *)src], 2 + (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ }
+ __uaccess_end();
+ return ret;
case 8:
__uaccess_begin();
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
@@ -145,6 +176,17 @@ raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
}
__uaccess_end();
return ret;
+ case 12:
+ __uaccess_begin();
+ __put_user_asm(*(u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 12);
+ if (likely(!ret)) {
+ asm("":::"memory");
+ __put_user_asm(2[(u32 *)src], 2 + (u32 __user *)dst,
+ ret, "l", "k", "ir", 4);
+ }
+ __uaccess_end();
+ return ret;
case 16:
__uaccess_begin();
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
--
2.11.0
Powered by blists - more mailing lists