diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index ac601c4b9f57..356bf4b4273b 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2457,6 +2457,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. spia_pedr= spia_peddr= + stack_guard_gap= [MM] + override the default stack gap protection. The value + is in page units and it defines how many pages prior + to (for stacks growing down) resp. after (for stacks + growing up) the main stack are reserved for no other + mapping. Default value is 256 pages. + stacktrace [FTRACE] Enabled the stack tracer on boot up. diff --git a/Makefile b/Makefile index df4aefae8171..efe5c9704afa 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 2 -SUBLEVEL = 89 +SUBLEVEL = 90 EXTRAVERSION = NAME = Saber-toothed Squirrel diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 01e8715e26d9..b9abe5bb437b 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len, /* At this point: (!vma || addr < vma->vm_end). */ if (limit - len < addr) return -ENOMEM; - if (!vma || addr + len <= vma->vm_start) + if (!vma || addr + len <= vm_start_gap(vma)) return addr; addr = vma->vm_end; vma = vma->vm_next; diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 44b628e4d6ea..4497b5ef688e 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -30,7 +30,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long start_addr; + unsigned long start_addr, vm_start; int do_align = 0; int aliasing = cache_is_vipt_aliasing(); @@ -62,7 +62,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (len > mm->cached_hole_size) { @@ -96,15 +96,17 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, } return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { + if (vma) + vm_start = vm_start_gap(vma); + if (!vma || addr + len <= vm_start) { /* * Remember the place where we stopped the search: */ mm->free_area_cache = addr + len; return addr; } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; + if (addr + mm->cached_hole_size < vm_start) + mm->cached_hole_size = vm_start - addr; addr = vma->vm_end; if (do_align) addr = COLOUR_ALIGN(addr, pgoff); diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c index 385fd30b142f..96eca58b9679 100644 --- a/arch/frv/mm/elf-fdpic.c +++ b/arch/frv/mm/elf-fdpic.c @@ -74,7 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi addr = PAGE_ALIGN(addr); vma = find_vma(current->mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) goto success; } @@ -89,7 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi for (; vma; vma = vma->vm_next) { if (addr > limit) break; - if (addr + len <= vma->vm_start) + if (addr + len <= vm_start_gap(vma)) goto success; addr = vma->vm_end; } @@ -104,7 +104,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi for (; vma; vma = vma->vm_next) { if (addr > limit) break; - if (addr + len <= vma->vm_start) + if (addr + len <= vm_start_gap(vma)) goto success; addr = vma->vm_end; } diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index 609d50056a6c..77c0aff5aaf0 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -27,7 +27,8 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len long map_shared = (flags & MAP_SHARED); unsigned long start_addr, align_mask = PAGE_SIZE - 1; struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; + unsigned long prev_end; if (len > RGN_MAP_LIMIT) return -ENOMEM; @@ -58,7 +59,17 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len full_search: start_addr = addr = (addr + align_mask) & ~align_mask; - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { + for (vma = find_vma_prev(mm, addr, &prev); ; prev = vma, + vma = vma->vm_next) { + if (prev) { + prev_end = vm_end_gap(prev); + if (addr < prev_end) { + addr = (prev_end + align_mask) & ~align_mask; + /* If vma already violates gap, forget it */ + if (vma && addr > vma->vm_start) + addr = vma->vm_start; + } + } /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) { if (start_addr != TASK_UNMAPPED_BASE) { @@ -68,12 +79,11 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len } return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { + if (!vma || addr + len <= vm_start_gap(vma)) { /* Remember the address where we stopped this search: */ mm->free_area_cache = addr + len; return addr; } - addr = (vma->vm_end + align_mask) & ~align_mask; } } diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 5ca674b74737..66a1ec0a2467 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -171,9 +171,9 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u /* At this point: (!vmm || addr < vmm->vm_end). */ if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) return -ENOMEM; - if (!vmm || (addr + len) <= vmm->vm_start) + if (!vmm || (addr + len) <= vm_start_gap(vmm)) return addr; - addr = ALIGN(vmm->vm_end, HPAGE_SIZE); + addr = ALIGN(vm_end_gap(vmm), HPAGE_SIZE); } } diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 302d779d5b0d..a79ddcfec1c7 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -70,6 +70,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long addr = addr0; + unsigned long vm_start; int do_color_align; if (unlikely(len > TASK_SIZE)) @@ -103,7 +104,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -118,7 +119,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; - if (!vma || addr + len <= vma->vm_start) + if (!vma || addr + len <= vm_start_gap(vma)) return addr; addr = vma->vm_end; if (do_color_align) @@ -145,7 +146,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { vma = find_vma(mm, addr - len); - if (!vma || addr <= vma->vm_start) { + if (!vma || addr <= vm_start_gap(vma)) { /* cache the address as a hint for next time */ return mm->free_area_cache = addr - len; } @@ -165,20 +166,22 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, * return with success: */ vma = find_vma(mm, addr); - if (likely(!vma || addr + len <= vma->vm_start)) { + if (vma) + vm_start = vm_start_gap(vma); + if (likely(!vma || addr + len <= vm_start)) { /* cache the address as a hint for next time */ return mm->free_area_cache = addr; } /* remember the largest hole we saw so far */ - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; + if (addr + mm->cached_hole_size < vm_start) + mm->cached_hole_size = vm_start - addr; /* try just below the current vma->vm_start */ - addr = vma->vm_start - len; + addr = vm_start - len; if (do_color_align) addr = COLOUR_ALIGN_DOWN(addr, pgoff); - } while (likely(len < vma->vm_start)); + } while (likely(len < vm_start)); bottomup: /* diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 7ea75d14aa65..1d4ac8d7cdd4 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -35,17 +35,27 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len) { - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; + unsigned long prev_end; addr = PAGE_ALIGN(addr); - for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { + for (vma = find_vma_prev(current->mm, addr, &prev); ; prev = vma, + vma = vma->vm_next) { + if (prev) { + prev_end = vm_end_gap(prev); + if (addr < prev_end) { + addr = prev_end; + /* If vma already violates gap, forget it */ + if (vma && addr > vma->vm_start) + addr = vma->vm_start; + } + } /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; - if (!vma || addr + len <= vma->vm_start) + if (!vma || addr + len <= vm_start_gap(vma)) return addr; - addr = vma->vm_end; } } @@ -70,22 +80,32 @@ static int get_offset(struct address_space *mapping) static unsigned long get_shared_area(struct address_space *mapping, unsigned long addr, unsigned long len, unsigned long pgoff) { - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; + unsigned long prev_end; int offset = mapping ? get_offset(mapping) : 0; offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000; addr = DCACHE_ALIGN(addr - offset) + offset; - for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { + for (vma = find_vma_prev(current->mm, addr, &prev); ; prev = vma, + vma = vma->vm_next) { + if (prev) { + prev_end = vm_end_gap(prev); + if (addr < prev_end) { + addr = DCACHE_ALIGN(prev_end - offset) + offset; + if (addr < prev_end) /* handle wraparound */ + return -ENOMEM; + /* If vma already violates gap, forget it */ + if (vma && addr > vma->vm_start) + addr = vma->vm_start; + } + } /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; - if (!vma || addr + len <= vma->vm_start) + if (!vma || addr + len <= vm_start_gap(vma)) return addr; - addr = DCACHE_ALIGN(vma->vm_end - offset) + offset; - if (addr < vma->vm_end) /* handle wraparound */ - return -ENOMEM; } } diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 73709f7ce92c..57654c9b70f8 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, if ((mm->task_size - len) < addr) return 0; vma = find_vma(mm, addr); - return (!vma || (addr + len) <= vma->vm_start); + return (!vma || (addr + len) <= vm_start_gap(vma)); } static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) @@ -227,7 +227,7 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm, int psize, int use_cache) { struct vm_area_struct *vma; - unsigned long start_addr, addr; + unsigned long start_addr, addr, vm_start; struct slice_mask mask; int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); @@ -256,7 +256,9 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm, addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT); continue; } - if (!vma || addr + len <= vma->vm_start) { + if (vma) + vm_start = vm_start_gap(vma); + if (!vma || addr + len <= vm_start) { /* * Remember the place where we stopped the search: */ @@ -264,8 +266,8 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm, mm->free_area_cache = addr + len; return addr; } - if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; + if (use_cache && (addr + mm->cached_hole_size) < vm_start) + mm->cached_hole_size = vm_start - addr; addr = vma->vm_end; } @@ -284,7 +286,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, int psize, int use_cache) { struct vm_area_struct *vma; - unsigned long addr; + unsigned long addr, vm_start; struct slice_mask mask; int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); @@ -336,7 +338,9 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, * return with success: */ vma = find_vma(mm, addr); - if (!vma || (addr + len) <= vma->vm_start) { + if (vma) + vm_start = vm_start_gap(vma); + if (!vma || (addr + len) <= vm_start) { /* remember the address as a hint for next time */ if (use_cache) mm->free_area_cache = addr; @@ -344,11 +348,11 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, } /* remember the largest hole we saw so far */ - if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; + if (use_cache && (addr + mm->cached_hole_size) < vm_start) + mm->cached_hole_size = vm_start - addr; /* try just below the current vma->vm_start */ - addr = vma->vm_start; + addr = vm_start; } /* diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index afeb710ec5c3..22eff46d8ef5 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -47,7 +47,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long start_addr; + unsigned long start_addr, vm_start; int do_colour_align; if (flags & MAP_FIXED) { @@ -75,7 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -106,15 +106,17 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, } return -ENOMEM; } - if (likely(!vma || addr + len <= vma->vm_start)) { + if (vma) + vm_start = vm_start_gap(vma); + if (likely(!vma || addr + len <= vm_start)) { /* * Remember the place where we stopped the search: */ mm->free_area_cache = addr + len; return addr; } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; + if (addr + mm->cached_hole_size < vm_start) + mm->cached_hole_size = vm_start - addr; addr = vma->vm_end; if (do_colour_align) @@ -130,6 +132,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; + unsigned long vm_start; int do_colour_align; if (flags & MAP_FIXED) { @@ -158,7 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -179,7 +182,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { + if (!vma || addr <= vm_start_gap(vma)) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr-len); } @@ -199,20 +202,22 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, * return with success: */ vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { + if (vma) + vm_start = vm_start_gap(vma); + if (likely(!vma || addr + len <= vm_start)) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); } /* remember the largest hole we saw so far */ - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; + if (addr + mm->cached_hole_size < vm_start) + mm->cached_hole_size = vm_start - addr; /* try just below the current vma->vm_start */ - addr = vma->vm_start-len; + addr = vm_start-len; if (do_colour_align) addr = COLOUR_ALIGN_DOWN(addr, pgoff); - } while (likely(len < vma->vm_start)); + } while (likely(len < vm_start)); bottomup: /* diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index 42b282fa6112..eeae89bada55 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi } if (TASK_SIZE - PAGE_SIZE - len < addr) return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) + if (!vmm || addr + len <= vm_start_gap(vmm)) return addr; addr = vmm->vm_end; if (flags & MAP_SHARED) diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index a062fe9a4e49..39f49991575e 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -117,7 +117,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi struct mm_struct *mm = current->mm; struct vm_area_struct * vma; unsigned long task_size = TASK_SIZE; - unsigned long start_addr; + unsigned long start_addr, vm_start; int do_color_align; if (flags & MAP_FIXED) { @@ -147,7 +147,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -181,15 +181,17 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi } return -ENOMEM; } - if (likely(!vma || addr + len <= vma->vm_start)) { + if (vma) + vm_start = vm_start_gap(vma); + if (likely(!vma || addr + len <= vm_start)) { /* * Remember the place where we stopped the search: */ mm->free_area_cache = addr + len; return addr; } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; + if (addr + mm->cached_hole_size < vm_start) + mm->cached_hole_size = vm_start - addr; addr = vma->vm_end; if (do_color_align) @@ -205,7 +207,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long task_size = STACK_TOP32; - unsigned long addr = addr0; + unsigned long addr = addr0, vm_start; int do_color_align; /* This should only ever run for 32-bit processes. */ @@ -237,7 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { + if (!vma || addr <= vm_start_gap(vma)) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr-len); } @@ -278,20 +280,22 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, * return with success: */ vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { + if (vma) + vm_start = vm_start_gap(vma); + if (likely(!vma || addr + len <= vm_start)) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); } /* remember the largest hole we saw so far */ - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; + if (addr + mm->cached_hole_size < vm_start) + mm->cached_hole_size = vm_start - addr; /* try just below the current vma->vm_start */ - addr = vma->vm_start-len; + addr = vm_start - len; if (do_color_align) addr = COLOUR_ALIGN_DOWN(addr, pgoff); - } while (likely(len < vma->vm_start)); + } while (likely(len < vm_start)); bottomup: /* diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 07e14535375c..e13e85dbfcd9 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -33,7 +33,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, struct mm_struct *mm = current->mm; struct vm_area_struct * vma; unsigned long task_size = TASK_SIZE; - unsigned long start_addr; + unsigned long start_addr, vm_start; if (test_thread_flag(TIF_32BIT)) task_size = STACK_TOP32; @@ -67,15 +67,17 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, } return -ENOMEM; } - if (likely(!vma || addr + len <= vma->vm_start)) { + if (vma) + vm_start = vm_start_gap(vma); + if (likely(!vma || addr + len <= vm_start)) { /* * Remember the place where we stopped the search: */ mm->free_area_cache = addr + len; return addr; } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; + if (addr + mm->cached_hole_size < vm_start) + mm->cached_hole_size = vm_start - addr; addr = ALIGN(vma->vm_end, HPAGE_SIZE); } @@ -90,6 +92,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; + unsigned long vm_start; /* This should only ever run for 32-bit processes. */ BUG_ON(!test_thread_flag(TIF_32BIT)); @@ -106,7 +109,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { + if (!vma || addr <= vm_start_gap(vma)) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr-len); } @@ -124,18 +127,20 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, * return with success: */ vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { + if (vma) + vm_start = vm_start_gap(vma); + if (likely(!vma || addr + len <= vm_start)) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); } /* remember the largest hole we saw so far */ - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; + if (addr + mm->cached_hole_size < vm_start) + mm->cached_hole_size = vm_start - addr; /* try just below the current vma->vm_start */ - addr = (vma->vm_start-len) & HPAGE_MASK; - } while (likely(len < vma->vm_start)); + addr = (vm_start - len) & HPAGE_MASK; + } while (likely(len < vm_start)); bottomup: /* @@ -182,7 +187,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, HPAGE_SIZE); vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index 42cfcba4e1ef..184e0339c056 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -159,7 +159,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long start_addr; + unsigned long start_addr, vm_start; if (len > mm->cached_hole_size) { start_addr = mm->free_area_cache; @@ -185,12 +185,14 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, } return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { + if (vma) + vm_start = vm_start_gap(vma); + if (!vma || addr + len <= vm_start) { mm->free_area_cache = addr + len; return addr; } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; + if (addr + mm->cached_hole_size < vm_start) + mm->cached_hole_size = vm_start - addr; addr = ALIGN(vma->vm_end, huge_page_size(h)); } } @@ -204,6 +206,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, struct vm_area_struct *vma, *prev_vma; unsigned long base = mm->mmap_base, addr = addr0; unsigned long largest_hole = mm->cached_hole_size; + unsigned long vm_start; int first_time = 1; /* don't allow allocations above current base */ @@ -234,9 +237,10 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, /* * new region fits between prev_vma->vm_end and - * vma->vm_start, use it: + * vm_start, use it: */ - if (addr + len <= vma->vm_start && + vm_start = vm_start_gap(vma); + if (addr + len <= vm_start && (!prev_vma || (addr >= prev_vma->vm_end))) { /* remember the address as a hint for next time */ mm->cached_hole_size = largest_hole; @@ -251,13 +255,13 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, } /* remember the largest hole we saw so far */ - if (addr + largest_hole < vma->vm_start) - largest_hole = vma->vm_start - addr; + if (addr + largest_hole < vm_start) + largest_hole = vm_start - addr; /* try just below the current vma->vm_start */ - addr = (vma->vm_start - len) & huge_page_mask(h); + addr = (vm_start - len) & huge_page_mask(h); - } while (len <= vma->vm_start); + } while (len <= vm_start); fail: /* @@ -312,7 +316,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (current->mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index cdb2fc9235b7..0dbfff8e1a58 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -126,7 +126,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long start_addr; + unsigned long start_addr, vm_start; unsigned long begin, end; if (flags & MAP_FIXED) @@ -141,7 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (end - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) @@ -172,15 +172,17 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, } return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { + if (vma) + vm_start = vm_start_gap(vma); + if (!vma || addr + len <= vm_start) { /* * Remember the place where we stopped the search: */ mm->free_area_cache = addr + len; return addr; } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; + if (addr + mm->cached_hole_size < vm_start) + mm->cached_hole_size = vm_start - addr; addr = vma->vm_end; addr = align_addr(addr, filp, 0); @@ -196,6 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; + unsigned long vm_start; /* requested length too big for entire address space */ if (len > TASK_SIZE) @@ -213,7 +216,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -232,7 +235,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, ALIGN_TOPDOWN); vma = find_vma(mm, tmp_addr); - if (!vma || tmp_addr + len <= vma->vm_start) + if (!vma || tmp_addr + len <= vm_start_gap(vma)) /* remember the address as a hint for next time */ return mm->free_area_cache = tmp_addr; } @@ -251,17 +254,19 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, * return with success: */ vma = find_vma(mm, addr); - if (!vma || addr+len <= vma->vm_start) + if (vma) + vm_start = vm_start_gap(vma); + if (!vma || addr + len <= vm_start) /* remember the address as a hint for next time */ return mm->free_area_cache = addr; /* remember the largest hole we saw so far */ - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; + if (addr + mm->cached_hole_size < vm_start) + mm->cached_hole_size = vm_start - addr; /* try just below the current vma->vm_start */ - addr = vma->vm_start-len; - } while (len < vma->vm_start); + addr = vm_start - len; + } while (len < vm_start); bottomup: /* diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index df7d12c9af24..67b87605e223 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -277,7 +277,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long start_addr; + unsigned long start_addr, vm_start; if (len > mm->cached_hole_size) { start_addr = mm->free_area_cache; @@ -303,12 +303,14 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, } return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { + if (vma) + vm_start = vm_start_gap(vma); + if (!vma || addr + len <= vm_start) { mm->free_area_cache = addr + len; return addr; } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; + if (addr + mm->cached_hole_size < vm_start) + mm->cached_hole_size = vm_start - addr; addr = ALIGN(vma->vm_end, huge_page_size(h)); } } @@ -322,6 +324,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, struct vm_area_struct *vma, *prev_vma; unsigned long base = mm->mmap_base, addr = addr0; unsigned long largest_hole = mm->cached_hole_size; + unsigned long vm_start; int first_time = 1; /* don't allow allocations above current base */ @@ -351,7 +354,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, * new region fits between prev_vma->vm_end and * vma->vm_start, use it: */ - if (addr + len <= vma->vm_start && + vm_start = vm_start_gap(vma); + if (addr + len <= vm_start && (!prev_vma || (addr >= prev_vma->vm_end))) { /* remember the address as a hint for next time */ mm->cached_hole_size = largest_hole; @@ -365,12 +369,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, } /* remember the largest hole we saw so far */ - if (addr + largest_hole < vma->vm_start) - largest_hole = vma->vm_start - addr; + if (addr + largest_hole < vm_start) + largest_hole = vm_start - addr; /* try just below the current vma->vm_start */ - addr = (vma->vm_start - len) & huge_page_mask(h); - } while (len <= vma->vm_start); + addr = (vm_start - len) & huge_page_mask(h); + } while (len <= vm_start); fail: /* @@ -426,7 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 55573322d1bb..99c51d6cb925 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -150,7 +150,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -176,7 +176,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) + if (!vma || addr + len <= vm_start_gap(vma)) return addr; addr = ALIGN(vma->vm_end, huge_page_size(h)); } diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index de404f204b6a..6037a132fc7f 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -230,11 +230,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) /* We don't show the stack guard page in /proc/maps */ start = vma->vm_start; - if (stack_guard_page_start(vma, start)) - start += PAGE_SIZE; end = vma->vm_end; - if (stack_guard_page_end(vma, end)) - end -= PAGE_SIZE; seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", start, diff --git a/include/linux/mm.h b/include/linux/mm.h index 16394da01d2d..19f9043e3692 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1015,34 +1015,6 @@ int set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); int clear_page_dirty_for_io(struct page *page); -/* Is the vma a continuation of the stack vma above it? */ -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) -{ - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); -} - -static inline int stack_guard_page_start(struct vm_area_struct *vma, - unsigned long addr) -{ - return (vma->vm_flags & VM_GROWSDOWN) && - (vma->vm_start == addr) && - !vma_growsdown(vma->vm_prev, addr); -} - -/* Is the vma a continuation of the stack vma below it? */ -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) -{ - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); -} - -static inline int stack_guard_page_end(struct vm_area_struct *vma, - unsigned long addr) -{ - return (vma->vm_flags & VM_GROWSUP) && - (vma->vm_end == addr) && - !vma_growsup(vma->vm_next, addr); -} - extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len); @@ -1462,6 +1434,7 @@ unsigned long ra_submit(struct file_ra_state *ra, struct address_space *mapping, struct file *filp); +extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); @@ -1490,6 +1463,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m return vma; } +static inline unsigned long vm_start_gap(struct vm_area_struct *vma) +{ + unsigned long vm_start = vma->vm_start; + + if (vma->vm_flags & VM_GROWSDOWN) { + vm_start -= stack_guard_gap; + if (vm_start > vma->vm_start) + vm_start = 0; + } + return vm_start; +} + +static inline unsigned long vm_end_gap(struct vm_area_struct *vma) +{ + unsigned long vm_end = vma->vm_end; + + if (vma->vm_flags & VM_GROWSUP) { + vm_end += stack_guard_gap; + if (vm_end < vma->vm_end) + vm_end = -PAGE_SIZE; + } + return vm_end; +} + static inline unsigned long vma_pages(struct vm_area_struct *vma) { return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; diff --git a/mm/memory.c b/mm/memory.c index 2917e9b2e4d4..6325103db6f3 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1605,12 +1605,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, return page; } -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) -{ - return stack_guard_page_start(vma, addr) || - stack_guard_page_end(vma, addr+PAGE_SIZE); -} - /** * __get_user_pages() - pin user pages in memory * @tsk: task_struct of target task @@ -1761,11 +1755,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int ret; unsigned int fault_flags = 0; - /* For mlock, just skip the stack guard page. */ - if (foll_flags & FOLL_MLOCK) { - if (stack_guard_page(vma, start)) - goto next_page; - } if (foll_flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (nonblocking) @@ -3122,40 +3111,6 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, } /* - * This is like a special single-page "expand_{down|up}wards()", - * except we must first make sure that 'address{-|+}PAGE_SIZE' - * doesn't hit another vma. - */ -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) -{ - address &= PAGE_MASK; - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { - struct vm_area_struct *prev = vma->vm_prev; - - /* - * Is there a mapping abutting this one below? - * - * That's only ok if it's the same stack mapping - * that has gotten split.. - */ - if (prev && prev->vm_end == address) - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; - - return expand_downwards(vma, address - PAGE_SIZE); - } - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { - struct vm_area_struct *next = vma->vm_next; - - /* As VM_GROWSDOWN but s/below/above/ */ - if (next && next->vm_start == address + PAGE_SIZE) - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; - - return expand_upwards(vma, address + PAGE_SIZE); - } - return 0; -} - -/* * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_sem still held, but pte unmapped and unlocked. @@ -3174,10 +3129,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, if (vma->vm_flags & VM_SHARED) return VM_FAULT_SIGBUS; - /* Check if we need to add a guard page to the stack */ - if (check_stack_guard_page(vma, address) < 0) - return VM_FAULT_SIGSEGV; - /* Use the zero-page for reads */ if (!(flags & FAULT_FLAG_WRITE)) { entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), diff --git a/mm/mmap.c b/mm/mmap.c index 94f4e3444ae5..c7cbb405d5b3 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -245,6 +245,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) unsigned long rlim, retval; unsigned long newbrk, oldbrk; struct mm_struct *mm = current->mm; + struct vm_area_struct *next; unsigned long min_brk; down_write(&mm->mmap_sem); @@ -289,7 +290,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) } /* Check against existing mmap mappings. */ - if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) + next = find_vma(mm, oldbrk); + if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) goto out; /* Ok, looks good - let it rip. */ @@ -1368,8 +1370,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long start_addr; + struct vm_area_struct *vma, *prev; + unsigned long start_addr, vm_start, prev_end; if (len > TASK_SIZE - mmap_min_addr) return -ENOMEM; @@ -1379,9 +1381,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) return addr; } if (len > mm->cached_hole_size) { @@ -1392,7 +1395,17 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, } full_search: - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { + for (vma = find_vma_prev(mm, addr, &prev); ; prev = vma, + vma = vma->vm_next) { + if (prev) { + prev_end = vm_end_gap(prev); + if (addr < prev_end) { + addr = prev_end; + /* If vma already violates gap, forget it */ + if (vma && addr > vma->vm_start) + addr = vma->vm_start; + } + } /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) { /* @@ -1407,16 +1420,16 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, } return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { + vm_start = vma ? vm_start_gap(vma) : TASK_SIZE; + if (addr + len <= vm_start) { /* * Remember the place where we stopped the search: */ mm->free_area_cache = addr + len; return addr; } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - addr = vma->vm_end; + if (addr + mm->cached_hole_size < vm_start) + mm->cached_hole_size = vm_start - addr; } } #endif @@ -1442,9 +1455,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; unsigned long addr = addr0; + unsigned long vm_start, prev_end; unsigned long low_limit = max(PAGE_SIZE, mmap_min_addr); /* requested length too big for entire address space */ @@ -1457,9 +1471,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) return addr; } @@ -1474,8 +1489,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* make sure it can fit in the remaining address space */ if (addr >= low_limit + len) { - vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) + vma = find_vma_prev(mm, addr-len, &prev); + if ((!vma || addr <= vm_start_gap(vma)) && + (!prev || addr-len >= vm_end_gap(prev))) /* remember the address as a hint for next time */ return (mm->free_area_cache = addr-len); } @@ -1491,18 +1507,21 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, * else if new region fits below vma->vm_start, * return with success: */ - vma = find_vma(mm, addr); - if (!vma || addr+len <= vma->vm_start) + vma = find_vma_prev(mm, addr, &prev); + vm_start = vma ? vm_start_gap(vma) : mm->mmap_base; + prev_end = prev ? vm_end_gap(prev) : low_limit; + + if (addr + len <= vm_start && addr >= prev_end) /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); /* remember the largest hole we saw so far */ - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; + if (addr + mm->cached_hole_size < vm_start) + mm->cached_hole_size = vm_start - addr; /* try just below the current vma->vm_start */ - addr = vma->vm_start-len; - } while (vma->vm_start >= low_limit + len); + addr = vm_start - len; + } while (vm_start >= low_limit + len); bottomup: /* @@ -1607,39 +1626,27 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) EXPORT_SYMBOL(find_vma); -/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */ +/* + * Same as find_vma, but also return a pointer to the previous VMA in *pprev. + */ struct vm_area_struct * find_vma_prev(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **pprev) { - struct vm_area_struct *vma = NULL, *prev = NULL; - struct rb_node *rb_node; - if (!mm) - goto out; - - /* Guard against addr being lower than the first VMA */ - vma = mm->mmap; - - /* Go through the RB tree quickly. */ - rb_node = mm->mm_rb.rb_node; - - while (rb_node) { - struct vm_area_struct *vma_tmp; - vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); + struct vm_area_struct *vma; - if (addr < vma_tmp->vm_end) { - rb_node = rb_node->rb_left; - } else { - prev = vma_tmp; - if (!prev->vm_next || (addr < prev->vm_next->vm_end)) - break; + vma = find_vma(mm, addr); + if (vma) { + *pprev = vma->vm_prev; + } else { + struct rb_node *rb_node = mm->mm_rb.rb_node; + *pprev = NULL; + while (rb_node) { + *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb); rb_node = rb_node->rb_right; } } - -out: - *pprev = prev; - return prev ? prev->vm_next : vma; + return vma; } /* @@ -1647,21 +1654,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, * update accounting. This is shared with both the * grow-up and grow-down cases. */ -static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) +static int acct_stack_growth(struct vm_area_struct *vma, + unsigned long size, unsigned long grow) { struct mm_struct *mm = vma->vm_mm; struct rlimit *rlim = current->signal->rlim; - unsigned long new_start, actual_size; + unsigned long new_start; /* address space limit tests */ if (!may_expand_vm(mm, grow)) return -ENOMEM; /* Stack limit test */ - actual_size = size; - if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) - actual_size -= PAGE_SIZE; - if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) + if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) return -ENOMEM; /* mlock limit tests */ @@ -1703,32 +1708,43 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns */ int expand_upwards(struct vm_area_struct *vma, unsigned long address) { - int error; + struct vm_area_struct *next; + unsigned long gap_addr; + int error = 0; if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; - /* - * We must make sure the anon_vma is allocated - * so that the anon_vma locking is not a noop. - */ + /* Guard against exceeding limits of the address space. */ + address &= PAGE_MASK; + if (address >= TASK_SIZE) + return -ENOMEM; + address += PAGE_SIZE; + + /* Enforce stack_guard_gap */ + gap_addr = address + stack_guard_gap; + + /* Guard against overflow */ + if (gap_addr < address || gap_addr > TASK_SIZE) + gap_addr = TASK_SIZE; + + next = vma->vm_next; + if (next && next->vm_start < gap_addr) { + if (!(next->vm_flags & VM_GROWSUP)) + return -ENOMEM; + /* Check that both stack segments have the same anon_vma? */ + } + + /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; - vma_lock_anon_vma(vma); /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. - * Also guard against wrapping around to address 0. */ - if (address < PAGE_ALIGN(address+4)) - address = PAGE_ALIGN(address+4); - else { - vma_unlock_anon_vma(vma); - return -ENOMEM; - } - error = 0; + vma_lock_anon_vma(vma); /* Somebody else might have raced and expanded it already */ if (address > vma->vm_end) { @@ -1758,27 +1774,36 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) int expand_downwards(struct vm_area_struct *vma, unsigned long address) { + struct vm_area_struct *prev; + unsigned long gap_addr; int error; - /* - * We must make sure the anon_vma is allocated - * so that the anon_vma locking is not a noop. - */ - if (unlikely(anon_vma_prepare(vma))) - return -ENOMEM; - address &= PAGE_MASK; error = security_file_mmap(NULL, 0, 0, 0, address, 1); if (error) return error; - vma_lock_anon_vma(vma); + /* Enforce stack_guard_gap */ + gap_addr = address - stack_guard_gap; + if (gap_addr > address) + return -ENOMEM; + prev = vma->vm_prev; + if (prev && prev->vm_end > gap_addr) { + if (!(prev->vm_flags & VM_GROWSDOWN)) + return -ENOMEM; + /* Check that both stack segments have the same anon_vma? */ + } + + /* We must make sure the anon_vma is allocated. */ + if (unlikely(anon_vma_prepare(vma))) + return -ENOMEM; /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ + vma_lock_anon_vma(vma); /* Somebody else might have raced and expanded it already */ if (address < vma->vm_start) { @@ -1802,6 +1827,22 @@ int expand_downwards(struct vm_area_struct *vma, return error; } +/* enforced gap between the expanding stack and other mappings. */ +unsigned long stack_guard_gap = 256UL<name_parts[0], princ->realm); @@ -289,7 +289,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td, unsigned *_toklen) { const __be32 *xdr = *_xdr; - unsigned toklen = *_toklen, len; + unsigned int toklen = *_toklen, len, paddedlen; /* there must be at least one tag and one length word */ if (toklen <= 8) @@ -303,6 +303,9 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td, toklen -= 8; if (len > max_data_size) return -EINVAL; + paddedlen = (len + 3) & ~3; + if (paddedlen > toklen) + return -EINVAL; td->data_len = len; if (len > 0) { @@ -310,9 +313,8 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td, if (!td->data) return -ENOMEM; memcpy(td->data, xdr, len); - len = (len + 3) & ~3; - toklen -= len; - xdr += len >> 2; + toklen -= paddedlen; + xdr += paddedlen >> 2; } _debug("tag %x len %x", td->tag, td->data_len); @@ -384,7 +386,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, const __be32 **_xdr, unsigned *_toklen) { const __be32 *xdr = *_xdr; - unsigned toklen = *_toklen, len; + unsigned int toklen = *_toklen, len, paddedlen; /* there must be at least one length word */ if (toklen <= 4) @@ -396,6 +398,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, toklen -= 4; if (len > AFSTOKEN_K5_TIX_MAX) return -EINVAL; + paddedlen = (len + 3) & ~3; + if (paddedlen > toklen) + return -EINVAL; *_tktlen = len; _debug("ticket len %u", len); @@ -405,9 +410,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, if (!*_ticket) return -ENOMEM; memcpy(*_ticket, xdr, len); - len = (len + 3) & ~3; - toklen -= len; - xdr += len >> 2; + toklen -= paddedlen; + xdr += paddedlen >> 2; } *_xdr = xdr; @@ -551,7 +555,7 @@ static int rxrpc_instantiate_xdr(struct key *key, const void *data, size_t datal { const __be32 *xdr = data, *token; const char *cp; - unsigned len, tmp, loop, ntoken, toklen, sec_ix; + unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix; int ret; _enter(",{%x,%x,%x,%x},%zu", @@ -576,22 +580,21 @@ static int rxrpc_instantiate_xdr(struct key *key, const void *data, size_t datal if (len < 1 || len > AFSTOKEN_CELL_MAX) goto not_xdr; datalen -= 4; - tmp = (len + 3) & ~3; - if (tmp > datalen) + paddedlen = (len + 3) & ~3; + if (paddedlen > datalen) goto not_xdr; cp = (const char *) xdr; for (loop = 0; loop < len; loop++) if (!isprint(cp[loop])) goto not_xdr; - if (len < tmp) - for (; loop < tmp; loop++) - if (cp[loop]) - goto not_xdr; + for (; loop < paddedlen; loop++) + if (cp[loop]) + goto not_xdr; _debug("cellname: [%u/%u] '%*.*s'", - len, tmp, len, len, (const char *) xdr); - datalen -= tmp; - xdr += tmp >> 2; + len, paddedlen, len, len, (const char *) xdr); + datalen -= paddedlen; + xdr += paddedlen >> 2; /* get the token count */ if (datalen < 12) @@ -612,10 +615,11 @@ static int rxrpc_instantiate_xdr(struct key *key, const void *data, size_t datal sec_ix = ntohl(*xdr); datalen -= 4; _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix); - if (toklen < 20 || toklen > datalen) + paddedlen = (toklen + 3) & ~3; + if (toklen < 20 || toklen > datalen || paddedlen > datalen) goto not_xdr; - datalen -= (toklen + 3) & ~3; - xdr += (toklen + 3) >> 2; + datalen -= paddedlen; + xdr += paddedlen >> 2; } while (--loop > 0);