[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <201701162222.PbaH7OVB%fengguang.wu@intel.com>
Date: Mon, 16 Jan 2017 22:59:43 +0800
From: kbuild test robot <lkp@...el.com>
To: Dmitry Safonov <dsafonov@...tuozzo.com>
Cc: kbuild-all@...org, linux-kernel@...r.kernel.org,
0x7f454c46@...il.com, Dmitry Safonov <dsafonov@...tuozzo.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>,
Andy Lutomirski <luto@...nel.org>,
Borislav Petkov <bp@...e.de>, x86@...nel.org,
linux-mm@...ck.org
Subject: Re: [PATCHv2 3/5] x86/mm: fix native mmap() in compat bins and
vice-versa
Hi Dmitry,
[auto build test WARNING on tip/x86/core]
[also build test WARNING on v4.10-rc4 next-20170116]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
url: https://github.com/0day-ci/linux/commits/Dmitry-Safonov/Fix-compatible-mmap-return-pointer-over-4Gb/20170116-204523
config: x86_64-randconfig-ne0-01162147 (attached as .config)
compiler: gcc-6 (Debian 6.2.0-3) 6.2.0 20160901
reproduce:
# save the attached .config to linux build tree
make ARCH=x86_64
All warnings (new ones prefixed by >>):
In file included from include/uapi/linux/stddef.h:1:0,
from include/linux/stddef.h:4,
from include/uapi/linux/posix_types.h:4,
from include/uapi/linux/types.h:13,
from include/linux/types.h:5,
from include/uapi/linux/capability.h:16,
from include/linux/capability.h:15,
from include/linux/sched.h:15,
from arch/x86/kernel/sys_x86_64.c:2:
arch/x86/kernel/sys_x86_64.c: In function 'find_start_end':
arch/x86/kernel/sys_x86_64.c:131:8: error: implicit declaration of function 'in_compat_syscall' [-Werror=implicit-function-declaration]
if (!in_compat_syscall()) {
^
include/linux/compiler.h:149:30: note: in definition of macro '__trace_if'
if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
^~~~
>> arch/x86/kernel/sys_x86_64.c:131:3: note: in expansion of macro 'if'
if (!in_compat_syscall()) {
^~
cc1: some warnings being treated as errors
vim +/if +131 arch/x86/kernel/sys_x86_64.c
1 #include <linux/errno.h>
> 2 #include <linux/sched.h>
3 #include <linux/syscalls.h>
4 #include <linux/mm.h>
5 #include <linux/fs.h>
6 #include <linux/smp.h>
7 #include <linux/sem.h>
8 #include <linux/msg.h>
9 #include <linux/shm.h>
10 #include <linux/stat.h>
11 #include <linux/mman.h>
12 #include <linux/file.h>
13 #include <linux/utsname.h>
14 #include <linux/personality.h>
15 #include <linux/random.h>
16 #include <linux/uaccess.h>
17 #include <linux/elf.h>
18
19 #include <asm/ia32.h>
20 #include <asm/syscalls.h>
21
22 /*
23 * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
24 */
25 static unsigned long get_align_mask(void)
26 {
27 /* handle 32- and 64-bit case with a single conditional */
28 if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
29 return 0;
30
31 if (!(current->flags & PF_RANDOMIZE))
32 return 0;
33
34 return va_align.mask;
35 }
36
37 /*
38 * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
39 * va_align.bits, [12:upper_bit), are set to a random value instead of
40 * zeroing them. This random value is computed once per boot. This form
41 * of ASLR is known as "per-boot ASLR".
42 *
43 * To achieve this, the random value is added to the info.align_offset
44 * value before calling vm_unmapped_area() or ORed directly to the
45 * address.
46 */
47 static unsigned long get_align_bits(void)
48 {
49 return va_align.bits & get_align_mask();
50 }
51
52 unsigned long align_vdso_addr(unsigned long addr)
53 {
54 unsigned long align_mask = get_align_mask();
55 addr = (addr + align_mask) & ~align_mask;
56 return addr | get_align_bits();
57 }
58
59 static int __init control_va_addr_alignment(char *str)
60 {
61 /* guard against enabling this on other CPU families */
62 if (va_align.flags < 0)
63 return 1;
64
65 if (*str == 0)
66 return 1;
67
68 if (*str == '=')
69 str++;
70
71 if (!strcmp(str, "32"))
72 va_align.flags = ALIGN_VA_32;
73 else if (!strcmp(str, "64"))
74 va_align.flags = ALIGN_VA_64;
75 else if (!strcmp(str, "off"))
76 va_align.flags = 0;
77 else if (!strcmp(str, "on"))
78 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
79 else
80 return 0;
81
82 return 1;
83 }
84 __setup("align_va_addr", control_va_addr_alignment);
85
86 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
87 unsigned long, prot, unsigned long, flags,
88 unsigned long, fd, unsigned long, off)
89 {
90 long error;
91 error = -EINVAL;
92 if (off & ~PAGE_MASK)
93 goto out;
94
95 error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
96 out:
97 return error;
98 }
99
100 static void find_start_end(unsigned long flags, unsigned long *begin,
101 unsigned long *end)
102 {
103 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
104 /* This is usually used needed to map code in small
105 model, so it needs to be in the first 31bit. Limit
106 it to that. This means we need to move the
107 unmapped base down for this case. This can give
108 conflicts with the heap, but we assume that glibc
109 malloc knows how to fall back to mmap. Give it 1GB
110 of playground for now. -AK */
111 *begin = 0x40000000;
112 *end = 0x80000000;
113 if (current->flags & PF_RANDOMIZE) {
114 *begin = randomize_page(*begin, 0x02000000);
115 }
116 return;
117 }
118
119 if (!test_thread_flag(TIF_ADDR32)) {
120 #ifdef CONFIG_COMPAT
121 /* 64-bit native binary doing compat 32-bit syscall */
122 if (in_compat_syscall()) {
123 *begin = mmap_legacy_base(arch_compat_rnd(),
124 IA32_PAGE_OFFSET);
125 *end = IA32_PAGE_OFFSET;
126 return;
127 }
128 #endif
129 } else {
130 /* 32-bit binary doing 64-bit syscall */
> 131 if (!in_compat_syscall()) {
132 *begin = mmap_legacy_base(arch_native_rnd(),
133 IA32_PAGE_OFFSET);
134 *end = TASK_SIZE_MAX;
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
Download attachment ".config.gz" of type "application/gzip" (28444 bytes)
Powered by blists - more mailing lists