arch/x86/include/asm/uaccess.h | 96 +++++++++---------- arch/x86/include/asm/uaccess_64.h | 191 ++++++++++++++++++-------------------- fs/readdir.c | 22 +++-- 3 files changed, 149 insertions(+), 160 deletions(-) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index b5e58cc0c5e7..3f4c89deb7a1 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -12,6 +12,9 @@ #include #include +#define INLINE_COPY_TO_USER +#define INLINE_COPY_FROM_USER + /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with @@ -189,19 +192,14 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) #ifdef CONFIG_X86_32 -#define __put_user_asm_u64(x, addr, err, errret) \ - asm volatile("\n" \ - "1: movl %%eax,0(%2)\n" \ - "2: movl %%edx,4(%2)\n" \ - "3:" \ - ".section .fixup,\"ax\"\n" \ - "4: movl %3,%0\n" \ - " jmp 3b\n" \ - ".previous\n" \ - _ASM_EXTABLE_UA(1b, 4b) \ - _ASM_EXTABLE_UA(2b, 4b) \ - : "=r" (err) \ - : "A" (x), "r" (addr), "i" (errret), "0" (err)) +#define __put_user_goto_u64(x, addr, label) \ + asm volatile("\n" \ + "1: movl %%eax,0(%2)\n" \ + "2: movl %%edx,4(%2)\n" \ + _ASM_EXTABLE_UA(1b, %2l) \ + _ASM_EXTABLE_UA(2b, %2l) \ + : : "A" (x), "r" (addr) \ + : : label) #define __put_user_asm_ex_u64(x, addr) \ asm volatile("\n" \ @@ -216,8 +214,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) asm volatile("call __put_user_8" : "=a" (__ret_pu) \ : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") #else -#define __put_user_asm_u64(x, ptr, retval, errret) \ - __put_user_asm(x, ptr, retval, "q", "", "er", errret) +#define __put_user_goto_u64(x, ptr, label) \ + __put_user_goto(x, ptr, "q", "", "er", label) #define __put_user_asm_ex_u64(x, addr) \ __put_user_asm_ex(x, addr, "q", "", "er") #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) @@ -278,23 +276,21 @@ extern void __put_user_8(void); __builtin_expect(__ret_pu, 0); \ }) -#define __put_user_size(x, ptr, size, retval, errret) \ +#define __put_user_size(x, ptr, size, label) \ do { \ - retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ case 1: \ - __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ + __put_user_goto(x, ptr, "b", "b", "iq", label); \ break; \ case 2: \ - __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ + __put_user_goto(x, ptr, "w", "w", "ir", label); \ break; \ case 4: \ - __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ + __put_user_goto(x, ptr, "l", "k", "ir", label); \ break; \ case 8: \ - __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \ - errret); \ + __put_user_goto_u64((__typeof__(*ptr))(x), ptr, label); \ break; \ default: \ __put_user_bad(); \ @@ -439,9 +435,12 @@ do { \ #define __put_user_nocheck(x, ptr, size) \ ({ \ - int __pu_err; \ + __label__ __pu_label; \ + int __pu_err = -EFAULT; \ __uaccess_begin(); \ - __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ + __put_user_size((x), (ptr), (size), __pu_label); \ + __pu_err = 0; \ +__pu_label: \ __uaccess_end(); \ __builtin_expect(__pu_err, 0); \ }) @@ -466,17 +465,23 @@ struct __large_struct { unsigned long buf[100]; }; * we do not write to any memory gcc knows about, so there are no * aliasing issues. */ -#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ - asm volatile("\n" \ - "1: mov"itype" %"rtype"1,%2\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: mov %3,%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE_UA(1b, 3b) \ - : "=r"(err) \ - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) +#define __put_user_goto(x, addr, itype, rtype, ltype, label) \ + asm volatile goto("\n" \ + "1: mov"itype" %"rtype"0,%1\n" \ + _ASM_EXTABLE_UA(1b, %l2) \ + : : ltype(x), "m" (__m(addr)) \ + : : label) + +#define __put_user_failed(x, addr, itype, rtype, ltype, errret) \ + ({ __label__ __puflab; \ + int __pufret = errret; \ + __put_user_goto(x,addr,itype,rtype,ltype,__puflab); \ + __pufret = 0; \ + __puflab: __pufret; }) + +#define __put_user_asm(x, addr, retval, itype, rtype, ltype, errret) do { \ + retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \ +} while (0) #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ asm volatile("1: mov"itype" %"rtype"0,%1\n" \ @@ -687,12 +692,6 @@ extern struct movsl_mask { #define ARCH_HAS_NOCACHE_UACCESS 1 -#ifdef CONFIG_X86_32 -# include -#else -# include -#endif - /* * We rely on the nested NMI work to allow atomic faults from the NMI path; the * nested NMI paths are careful to preserve CR2. @@ -711,13 +710,8 @@ extern struct movsl_mask { #define user_access_begin() __uaccess_begin() #define user_access_end() __uaccess_end() -#define unsafe_put_user(x, ptr, err_label) \ -do { \ - int __pu_err; \ - __typeof__(*(ptr)) __pu_val = (x); \ - __put_user_size(__pu_val, (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \ - if (unlikely(__pu_err)) goto err_label; \ -} while (0) +#define unsafe_put_user(x, ptr, label) \ + __put_user_size((x), (ptr), sizeof(*(ptr)), label) #define unsafe_get_user(x, ptr, err_label) \ do { \ @@ -728,5 +722,11 @@ do { \ if (unlikely(__gu_err)) goto err_label; \ } while (0) +#ifdef CONFIG_X86_32 +# include +#else +# include +#endif + #endif /* _ASM_X86_UACCESS_H */ diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index a9d637bc301d..8dd85d2fce28 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -65,117 +65,104 @@ copy_to_user_mcsafe(void *to, const void *from, unsigned len) static __always_inline __must_check unsigned long raw_copy_from_user(void *dst, const void __user *src, unsigned long size) { - int ret = 0; - - if (!__builtin_constant_p(size)) - return copy_user_generic(dst, (__force void *)src, size); - switch (size) { - case 1: - __uaccess_begin_nospec(); - __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src, - ret, "b", "b", "=q", 1); - __uaccess_end(); - return ret; - case 2: - __uaccess_begin_nospec(); - __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src, - ret, "w", "w", "=r", 2); - __uaccess_end(); - return ret; - case 4: - __uaccess_begin_nospec(); - __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src, - ret, "l", "k", "=r", 4); - __uaccess_end(); - return ret; - case 8: - __uaccess_begin_nospec(); - __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, - ret, "q", "", "=r", 8); - __uaccess_end(); - return ret; - case 10: - __uaccess_begin_nospec(); - __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, - ret, "q", "", "=r", 10); - if (likely(!ret)) - __get_user_asm_nozero(*(u16 *)(8 + (char *)dst), - (u16 __user *)(8 + (char __user *)src), - ret, "w", "w", "=r", 2); - __uaccess_end(); - return ret; - case 16: - __uaccess_begin_nospec(); - __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, - ret, "q", "", "=r", 16); - if (likely(!ret)) - __get_user_asm_nozero(*(u64 *)(8 + (char *)dst), - (u64 __user *)(8 + (char __user *)src), - ret, "q", "", "=r", 8); - __uaccess_end(); - return ret; - default: + if (!__builtin_constant_p(size) || size > 128) return copy_user_generic(dst, (__force void *)src, size); + + /* Small constant size: unroll */ + user_access_begin(); + while (size >= sizeof(unsigned long)) { + unsigned long val; + unsafe_get_user(val, (const unsigned long __user *) src, out); + *(unsigned long *)dst = val; + size -= sizeof(unsigned long); + src += sizeof(unsigned long); + dst += sizeof(unsigned long); } + + while (size >= sizeof(unsigned int)) { + unsigned int val; + unsafe_get_user(val, (const unsigned int __user *) src, out); + *(unsigned int *)dst = val; + size -= sizeof(unsigned int); + src += sizeof(unsigned int); + dst += sizeof(unsigned int); + } + + while (size >= sizeof(unsigned short)) { + unsigned short val; + unsafe_get_user(val, (const unsigned short __user *) src, out); + *(unsigned short *)dst = val; + size -= sizeof(unsigned short); + src += sizeof(unsigned short); + dst += sizeof(unsigned short); + } + + while (size >= sizeof(unsigned char)) { + unsigned char val; + unsafe_get_user(val, (const unsigned char __user *) src, out); + *(unsigned char *)dst = val; + size -= sizeof(unsigned char); + src += sizeof(unsigned char); + dst += sizeof(unsigned char); + } + user_access_end(); + return 0; + +out: + user_access_end(); + return size; } static __always_inline __must_check unsigned long raw_copy_to_user(void __user *dst, const void *src, unsigned long size) { - int ret = 0; - - if (!__builtin_constant_p(size)) - return copy_user_generic((__force void *)dst, src, size); - switch (size) { - case 1: - __uaccess_begin(); - __put_user_asm(*(u8 *)src, (u8 __user *)dst, - ret, "b", "b", "iq", 1); - __uaccess_end(); - return ret; - case 2: - __uaccess_begin(); - __put_user_asm(*(u16 *)src, (u16 __user *)dst, - ret, "w", "w", "ir", 2); - __uaccess_end(); - return ret; - case 4: - __uaccess_begin(); - __put_user_asm(*(u32 *)src, (u32 __user *)dst, - ret, "l", "k", "ir", 4); - __uaccess_end(); - return ret; - case 8: - __uaccess_begin(); - __put_user_asm(*(u64 *)src, (u64 __user *)dst, - ret, "q", "", "er", 8); - __uaccess_end(); - return ret; - case 10: - __uaccess_begin(); - __put_user_asm(*(u64 *)src, (u64 __user *)dst, - ret, "q", "", "er", 10); - if (likely(!ret)) { - asm("":::"memory"); - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst, - ret, "w", "w", "ir", 2); - } - __uaccess_end(); - return ret; - case 16: - __uaccess_begin(); - __put_user_asm(*(u64 *)src, (u64 __user *)dst, - ret, "q", "", "er", 16); - if (likely(!ret)) { - asm("":::"memory"); - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, - ret, "q", "", "er", 8); - } - __uaccess_end(); - return ret; - default: + if (!__builtin_constant_p(size) || size > 128) return copy_user_generic((__force void *)dst, src, size); + + /* Small constant size: unroll */ + user_access_begin(); + while (size >= sizeof(unsigned long)) { + unsigned long val; + val = *(const unsigned long *)src; + unsafe_put_user(val, (unsigned long __user *) dst, out); + size -= sizeof(unsigned long); + src += sizeof(unsigned long); + dst += sizeof(unsigned long); + } + + while (size >= sizeof(unsigned int)) { + unsigned int val; + val = *(const unsigned int *)src; + unsafe_put_user(val, (unsigned int __user *) dst, out); + size -= sizeof(unsigned int); + src += sizeof(unsigned int); + dst += sizeof(unsigned int); + } + + while (size >= sizeof(unsigned short)) { + unsigned short val; + val = *(const unsigned short *)src; + unsafe_put_user(val, (unsigned short __user *) dst, out); + size -= sizeof(unsigned short); + src += sizeof(unsigned short); + dst += sizeof(unsigned short); + } + + while (size >= sizeof(unsigned char)) { + unsigned char val; + val = *(const unsigned char *)src; + unsafe_put_user(val, (unsigned char __user *) dst, out); + size -= sizeof(unsigned char); + src += sizeof(unsigned char); + dst += sizeof(unsigned char); } + + user_access_end(); + return 0; + +out: + user_access_end(); + return size; } static __always_inline __must_check diff --git a/fs/readdir.c b/fs/readdir.c index d97f548e6323..f1e159e17125 100644 --- a/fs/readdir.c +++ b/fs/readdir.c @@ -185,25 +185,27 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen, if (dirent) { if (signal_pending(current)) return -EINTR; - if (__put_user(offset, &dirent->d_off)) - goto efault; } + + user_access_begin(); + if (dirent) + unsafe_put_user(offset, &dirent->d_off, efault_end); dirent = buf->current_dir; - if (__put_user(d_ino, &dirent->d_ino)) - goto efault; - if (__put_user(reclen, &dirent->d_reclen)) - goto efault; + unsafe_put_user(d_ino, &dirent->d_ino, efault_end); + unsafe_put_user(reclen, &dirent->d_reclen, efault_end); + unsafe_put_user(0, dirent->d_name + namlen, efault_end); + unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end); + user_access_end(); + if (copy_to_user(dirent->d_name, name, namlen)) goto efault; - if (__put_user(0, dirent->d_name + namlen)) - goto efault; - if (__put_user(d_type, (char __user *) dirent + reclen - 1)) - goto efault; buf->previous = dirent; dirent = (void __user *)dirent + reclen; buf->current_dir = dirent; buf->count -= reclen; return 0; +efault_end: + user_access_end(); efault: buf->error = -EFAULT; return -EFAULT;