>From c529dfa6bdfc643a9c3debb4b61b9b0c13b0862e Mon Sep 17 00:00:00 2001 From: Aaron Lu Date: Thu, 17 Nov 2016 15:11:08 +0800 Subject: [PATCH] mremap: add a 2s delay for MAP_FIXED case Add a 2s delay for MAP_FIXED case to enlarge the race window so that we can hit the race in user space. Signed-off-by: Aaron Lu --- fs/exec.c | 2 +- include/linux/mm.h | 2 +- mm/mremap.c | 19 ++++++++++++------- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/fs/exec.c b/fs/exec.c index 4e497b9ee71e..1e49ce9a23bd 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -619,7 +619,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) * process cleanup to remove whatever mess we made. */ if (length != move_page_tables(vma, old_start, - vma, new_start, length, false)) + vma, new_start, length, false, false)) return -ENOMEM; lru_add_drain(); diff --git a/include/linux/mm.h b/include/linux/mm.h index a92c8d73aeaf..5e35fe3d914a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1392,7 +1392,7 @@ int vma_is_stack_for_current(struct vm_area_struct *vma); extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, - bool need_rmap_locks); + bool need_rmap_locks, bool delay); extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa); diff --git a/mm/mremap.c b/mm/mremap.c index da22ad2a5678..8e35279ca622 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -166,7 +167,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, - bool need_rmap_locks) + bool need_rmap_locks, bool delay) { unsigned long extent, next, old_end; pmd_t *old_pmd, *new_pmd; @@ -224,8 +225,11 @@ unsigned long move_page_tables(struct vm_area_struct *vma, new_vma, new_pmd, new_addr, need_rmap_locks); need_flush = true; } - if (likely(need_flush)) + if (likely(need_flush)) { + if (delay) + msleep(2000); flush_tlb_range(vma, old_end-len, old_addr); + } mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); @@ -234,7 +238,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma, static unsigned long move_vma(struct vm_area_struct *vma, unsigned long old_addr, unsigned long old_len, - unsigned long new_len, unsigned long new_addr, bool *locked) + unsigned long new_len, unsigned long new_addr, + bool *locked, bool delay) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *new_vma; @@ -273,7 +278,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, return -ENOMEM; moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, - need_rmap_locks); + need_rmap_locks, delay); if (moved_len < old_len) { err = -ENOMEM; } else if (vma->vm_ops && vma->vm_ops->mremap) { @@ -287,7 +292,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, * and then proceed to unmap new area instead of old. */ move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, - true); + true, delay); vma = new_vma; old_len = new_len; old_addr = new_addr; @@ -442,7 +447,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, if (offset_in_page(ret)) goto out1; - ret = move_vma(vma, addr, old_len, new_len, new_addr, locked); + ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, true); if (!(offset_in_page(ret))) goto out; out1: @@ -576,7 +581,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, goto out; } - ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked); + ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked, false); } out: if (offset_in_page(ret)) { -- 2.5.5