diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 123135d60f72..7bb11d5a7f8f 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -77,15 +77,28 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, #ifdef CONFIG_X86_64 /* - * On x86-64, we may have tag bits in the user pointer. Rather than - * mask them off, just change the rules for __access_ok(). + * The virtual address space space is logically divided into a kernel + * half and a user half. When cast to a signed type, user pointers + * are positive and kernel pointers are negative. + */ +static inline bool ptr_in_user_half(void *ptr) +{ + return (long)ptr >= 0; +} + +/* + * User pointers can have tag bits on x86-64. This scheme tolerates + * arbitrary values in those bits rather masking them off. + * + * Enforce two rules: + * 1. 'ptr' must be in the user half of the address space + * 2. 'ptr+size' must not overflow (back into the kernel half) * - * Make the rule be that 'ptr+size' must not overflow, and must not - * have the high bit set. Compilers generally understand about - * unsigned overflow and the CF bit and generate reasonable code for - * this. Although it looks like the combination confuses at least - * clang (and instead of just doing an "add" followed by a test of - * SF and CF, you'll see that unnecessary comparison). + * Compilers generally understand about unsigned overflow and the CF + * bit and generate reasonable code for this. Although it looks like + * the combination confuses at least clang (and instead of just doing + * an "add" followed by a test of SF and CF, you'll see that + * unnecessary comparison). * * For the common case of small sizes that can be checked at compile * time, don't even bother with the addition, and just check that the @@ -93,11 +106,16 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, */ static inline bool __access_ok(const void __user *ptr, unsigned long size) { + /* + * Check only the pointer (not ptr+size) for small accesses. + * This is OK because the kernel address space begins with a + * >=PAGE_SIZE guard hole. + */ if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) { - return (long)ptr >= 0; + return ptr_in_user_half(ptr); } else { unsigned long sum = size + (unsigned long)ptr; - return (long) sum >= 0 && sum >= (unsigned long)ptr; + return ptr_in_user_half(ptr) && sum >= (unsigned long)ptr; } } #define __access_ok __access_ok