[<prev] [next>] [day] [month] [year] [list]
Message-ID: <202102030953.0zx5H2qj-lkp@intel.com>
Date: Wed, 3 Feb 2021 09:25:59 +0800
From: kernel test robot <lkp@...el.com>
To: "Liam R. Howlett" <Liam.Howlett@...cle.com>
Cc: kbuild-all@...ts.01.org, linux-kernel@...r.kernel.org
Subject: [rcu:willy-maple 142/202] mm/mmap.c:2479:5: warning: no previous
prototype for 'do_mas_align_munmap'
tree: https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git willy-maple
head: 7e346d2845b4bd77663394f39fa70456e0084c86
commit: e8ea5d32f9de65efb14c3b01f8897913817ec0e5 [142/202] mm/mmap: Add do_mas_munmap() and wraper for __do_munmap()
config: alpha-defconfig (attached as .config)
compiler: alpha-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git/commit/?id=e8ea5d32f9de65efb14c3b01f8897913817ec0e5
git remote add rcu https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
git fetch --no-tags rcu willy-maple
git checkout e8ea5d32f9de65efb14c3b01f8897913817ec0e5
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=alpha
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@...el.com>
All warnings (new ones prefixed by >>):
>> mm/mmap.c:2479:5: warning: no previous prototype for 'do_mas_align_munmap' [-Wmissing-prototypes]
2479 | int do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
| ^~~~~~~~~~~~~~~~~~~
mm/mmap.c:2603:5: warning: no previous prototype for 'do_mas_munmap' [-Wmissing-prototypes]
2603 | int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm,
| ^~~~~~~~~~~~~
vim +/do_mas_align_munmap +2479 mm/mmap.c
2466
2467 /* do_mas_align_munmap() - munmap the aligned region from @start to @end.
2468 *
2469 * @mas: The maple_state, ideally set up to alter the correct tree location.
2470 * @vma: The starting vm_area_struct
2471 * @mm: The mm_struct
2472 * @start: The aligned start address to munmap.
2473 * @end: The aligned end address to munmap.
2474 * @uf: The userfaultfd list_head
2475 * @downgrade: Set to true to attempt a downwrite of the mmap_sem
2476 *
2477 *
2478 */
> 2479 int do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
2480 struct mm_struct *mm, unsigned long start,
2481 unsigned long end, struct list_head *uf, bool downgrade)
2482 {
2483 struct vm_area_struct *prev, *last;
2484 /* we have start < vma->vm_end */
2485
2486 /*
2487 * If we need to split any vma, do it now to save pain later.
2488 *
2489 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
2490 * unmapped vm_area_struct will remain in use: so lower split_vma
2491 * places tmp vma above, and higher split_vma places tmp vma below.
2492 */
2493 if (start > vma->vm_start) {
2494 int error;
2495 /*
2496 * Make sure that map_count on return from munmap() will
2497 * not exceed its limit; but let map_count go just above
2498 * its limit temporarily, to help free resources as expected.
2499 */
2500 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2501 return -ENOMEM;
2502
2503 error = __split_vma(mm, vma, start, 0);
2504 if (error)
2505 return error;
2506 prev = vma;
2507 vma = vma_next(mm, prev);
2508 mas->index = start;
2509 mas_reset(mas);
2510 } else {
2511 prev = vma->vm_prev;
2512 }
2513
2514 if (vma->vm_end >= end)
2515 last = vma;
2516 else
2517 last = find_vma_intersection(mm, end - 1, end);
2518
2519 /* Does it split the last one? */
2520 if (last && end < last->vm_end) {
2521 int error = __split_vma(mm, last, end, 1);
2522 if (error)
2523 return error;
2524 vma = vma_next(mm, prev);
2525 mas_reset(mas);
2526 }
2527
2528
2529 if (unlikely(uf)) {
2530 /*
2531 * If userfaultfd_unmap_prep returns an error the vmas
2532 * will remain splitted, but userland will get a
2533 * highly unexpected error anyway. This is no
2534 * different than the case where the first of the two
2535 * __split_vma fails, but we don't undo the first
2536 * split, despite we could. This is unlikely enough
2537 * failure that it's not worth optimizing it for.
2538 */
2539 int error = userfaultfd_unmap_prep(vma, start, end, uf);
2540
2541 if (error)
2542 return error;
2543 }
2544
2545 /*
2546 * unlock any mlock()ed ranges before detaching vmas, count the number
2547 * of VMAs to be dropped, and return the tail entry of the affected
2548 * area.
2549 */
2550 mm->map_count -= unlock_range(vma, &last, end);
2551 /* Drop removed area from the tree */
2552 mas_store_gfp(mas, NULL, GFP_KERNEL);
2553
2554 /* Detach vmas from the MM linked list */
2555 vma->vm_prev = NULL;
2556 if (prev)
2557 prev->vm_next = last->vm_next;
2558 else
2559 mm->mmap = last->vm_next;
2560
2561 if (last->vm_next) {
2562 last->vm_next->vm_prev = prev;
2563 last->vm_next = NULL;
2564 } else
2565 mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
2566
2567 /*
2568 * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
2569 * VM_GROWSUP VMA. Such VMAs can change their size under
2570 * down_read(mmap_lock) and collide with the VMA we are about to unmap.
2571 */
2572 if (downgrade) {
2573 if (last && (last->vm_flags & VM_GROWSDOWN))
2574 downgrade = false;
2575 else if (prev && (prev->vm_flags & VM_GROWSUP))
2576 downgrade = false;
2577 else
2578 mmap_write_downgrade(mm);
2579 }
2580
2581 unmap_region(mm, vma, prev, start, end);
2582
2583 /* Fix up all other VM information */
2584 remove_vma_list(mm, vma);
2585
2586 return downgrade ? 1 : 0;
2587 }
2588
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Download attachment ".config.gz" of type "application/gzip" (13878 bytes)
Powered by blists - more mailing lists