[<prev] [next>] [day] [month] [year] [list]
Message-ID: <202110161709.wdDmN8he-lkp@intel.com>
Date: Sat, 16 Oct 2021 17:36:13 +0800
From: kernel test robot <lkp@...el.com>
To: Vineet Gupta <vgupta@...opsys.com>
Cc: llvm@...ts.linux.dev, kbuild-all@...ts.01.org,
linux-kernel@...r.kernel.org
Subject: [vgupta-arc:topic-zol-remove 79/188] mm/memory.c:3717:21: error: no
previous prototype for function 'do_anonymous_page'
tree: https://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc.git topic-zol-remove
head: 5d273f5d5109b942d3be84a4db0ffe05feb901d4
commit: 94f784d9992cf0c171122f44767c549fa4f353cb [79/188] xxx: disable address space randomization, fault around
config: riscv-buildonly-randconfig-r005-20211016 (attached as .config)
compiler: clang version 14.0.0 (https://github.com/llvm/llvm-project a49f5386ce6b091da66ea7c3a1d9a588d53becf7)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# install riscv cross compiling tool for clang build
# apt-get install binutils-riscv64-linux-gnu
# https://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc.git/commit/?id=94f784d9992cf0c171122f44767c549fa4f353cb
git remote add vgupta-arc https://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc.git
git fetch --no-tags vgupta-arc topic-zol-remove
git checkout 94f784d9992cf0c171122f44767c549fa4f353cb
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 ARCH=riscv
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@...el.com>
All errors (new ones prefixed by >>):
>> mm/memory.c:3717:21: error: no previous prototype for function 'do_anonymous_page' [-Werror,-Wmissing-prototypes]
noinline vm_fault_t do_anonymous_page(struct vm_fault *vmf)
^
mm/memory.c:3717:10: note: declare 'static' if the function is not intended to be used outside of this translation unit
noinline vm_fault_t do_anonymous_page(struct vm_fault *vmf)
^
static
>> mm/memory.c:3833:21: error: no previous prototype for function '__do_fault' [-Werror,-Wmissing-prototypes]
noinline vm_fault_t __do_fault(struct vm_fault *vmf)
^
mm/memory.c:3833:10: note: declare 'static' if the function is not intended to be used outside of this translation unit
noinline vm_fault_t __do_fault(struct vm_fault *vmf)
^
static
>> mm/memory.c:4157:21: error: no previous prototype for function 'do_read_fault' [-Werror,-Wmissing-prototypes]
noinline vm_fault_t do_read_fault(struct vm_fault *vmf)
^
mm/memory.c:4157:10: note: declare 'static' if the function is not intended to be used outside of this translation unit
noinline vm_fault_t do_read_fault(struct vm_fault *vmf)
^
static
>> mm/memory.c:4186:21: error: no previous prototype for function 'do_cow_fault' [-Werror,-Wmissing-prototypes]
noinline vm_fault_t do_cow_fault(struct vm_fault *vmf)
^
mm/memory.c:4186:10: note: declare 'static' if the function is not intended to be used outside of this translation unit
noinline vm_fault_t do_cow_fault(struct vm_fault *vmf)
^
static
>> mm/memory.c:4224:21: error: no previous prototype for function 'do_shared_fault' [-Werror,-Wmissing-prototypes]
noinline vm_fault_t do_shared_fault(struct vm_fault *vmf)
^
mm/memory.c:4224:10: note: declare 'static' if the function is not intended to be used outside of this translation unit
noinline vm_fault_t do_shared_fault(struct vm_fault *vmf)
^
static
>> mm/memory.c:4267:21: error: no previous prototype for function 'do_fault' [-Werror,-Wmissing-prototypes]
noinline vm_fault_t do_fault(struct vm_fault *vmf)
^
mm/memory.c:4267:10: note: declare 'static' if the function is not intended to be used outside of this translation unit
noinline vm_fault_t do_fault(struct vm_fault *vmf)
^
static
>> mm/memory.c:4506:21: error: no previous prototype for function 'handle_pte_fault' [-Werror,-Wmissing-prototypes]
noinline vm_fault_t handle_pte_fault(struct vm_fault *vmf)
^
mm/memory.c:4506:10: note: declare 'static' if the function is not intended to be used outside of this translation unit
noinline vm_fault_t handle_pte_fault(struct vm_fault *vmf)
^
static
>> mm/memory.c:4610:21: error: no previous prototype for function '__handle_mm_fault' [-Werror,-Wmissing-prototypes]
noinline vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
^
mm/memory.c:4610:10: note: declare 'static' if the function is not intended to be used outside of this translation unit
noinline vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
^
static
8 errors generated.
vim +/do_anonymous_page +3717 mm/memory.c
3711
3712 /*
3713 * We enter with non-exclusive mmap_lock (to exclude vma changes,
3714 * but allow concurrent faults), and pte mapped but not yet locked.
3715 * We return with mmap_lock still held, but pte unmapped and unlocked.
3716 */
> 3717 noinline vm_fault_t do_anonymous_page(struct vm_fault *vmf)
3718 {
3719 struct vm_area_struct *vma = vmf->vma;
3720 struct page *page;
3721 vm_fault_t ret = 0;
3722 pte_t entry;
3723
3724 /* File mapping without ->vm_ops ? */
3725 if (vma->vm_flags & VM_SHARED)
3726 return VM_FAULT_SIGBUS;
3727
3728 /*
3729 * Use pte_alloc() instead of pte_alloc_map(). We can't run
3730 * pte_offset_map() on pmds where a huge pmd might be created
3731 * from a different thread.
3732 *
3733 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
3734 * parallel threads are excluded by other means.
3735 *
3736 * Here we only have mmap_read_lock(mm).
3737 */
3738 if (pte_alloc(vma->vm_mm, vmf->pmd))
3739 return VM_FAULT_OOM;
3740
3741 /* See comment in handle_pte_fault() */
3742 if (unlikely(pmd_trans_unstable(vmf->pmd)))
3743 return 0;
3744
3745 /* Use the zero-page for reads */
3746 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
3747 !mm_forbids_zeropage(vma->vm_mm)) {
3748 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
3749 vma->vm_page_prot));
3750 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3751 vmf->address, &vmf->ptl);
3752 if (!pte_none(*vmf->pte)) {
3753 update_mmu_tlb(vma, vmf->address, vmf->pte);
3754 goto unlock;
3755 }
3756 ret = check_stable_address_space(vma->vm_mm);
3757 if (ret)
3758 goto unlock;
3759 /* Deliver the page fault to userland, check inside PT lock */
3760 if (userfaultfd_missing(vma)) {
3761 pte_unmap_unlock(vmf->pte, vmf->ptl);
3762 return handle_userfault(vmf, VM_UFFD_MISSING);
3763 }
3764 goto setpte;
3765 }
3766
3767 /* Allocate our own private page. */
3768 if (unlikely(anon_vma_prepare(vma)))
3769 goto oom;
3770 page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
3771 if (!page)
3772 goto oom;
3773
3774 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
3775 goto oom_free_page;
3776 cgroup_throttle_swaprate(page, GFP_KERNEL);
3777
3778 /*
3779 * The memory barrier inside __SetPageUptodate makes sure that
3780 * preceding stores to the page contents become visible before
3781 * the set_pte_at() write.
3782 */
3783 __SetPageUptodate(page);
3784
3785 entry = mk_pte(page, vma->vm_page_prot);
3786 entry = pte_sw_mkyoung(entry);
3787 if (vma->vm_flags & VM_WRITE)
3788 entry = pte_mkwrite(pte_mkdirty(entry));
3789
3790 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3791 &vmf->ptl);
3792 if (!pte_none(*vmf->pte)) {
3793 update_mmu_cache(vma, vmf->address, vmf->pte);
3794 goto release;
3795 }
3796
3797 ret = check_stable_address_space(vma->vm_mm);
3798 if (ret)
3799 goto release;
3800
3801 /* Deliver the page fault to userland, check inside PT lock */
3802 if (userfaultfd_missing(vma)) {
3803 pte_unmap_unlock(vmf->pte, vmf->ptl);
3804 put_page(page);
3805 return handle_userfault(vmf, VM_UFFD_MISSING);
3806 }
3807
3808 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3809 page_add_new_anon_rmap(page, vma, vmf->address, false);
3810 lru_cache_add_inactive_or_unevictable(page, vma);
3811 setpte:
3812 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
3813
3814 /* No need to invalidate - it was non-present before */
3815 update_mmu_cache(vma, vmf->address, vmf->pte);
3816 unlock:
3817 pte_unmap_unlock(vmf->pte, vmf->ptl);
3818 return ret;
3819 release:
3820 put_page(page);
3821 goto unlock;
3822 oom_free_page:
3823 put_page(page);
3824 oom:
3825 return VM_FAULT_OOM;
3826 }
3827
3828 /*
3829 * The mmap_lock must have been held on entry, and may have been
3830 * released depending on flags and vma->vm_ops->fault() return value.
3831 * See filemap_fault() and __lock_page_retry().
3832 */
> 3833 noinline vm_fault_t __do_fault(struct vm_fault *vmf)
3834 {
3835 struct vm_area_struct *vma = vmf->vma;
3836 vm_fault_t ret;
3837
3838 /*
3839 * Preallocate pte before we take page_lock because this might lead to
3840 * deadlocks for memcg reclaim which waits for pages under writeback:
3841 * lock_page(A)
3842 * SetPageWriteback(A)
3843 * unlock_page(A)
3844 * lock_page(B)
3845 * lock_page(B)
3846 * pte_alloc_one
3847 * shrink_page_list
3848 * wait_on_page_writeback(A)
3849 * SetPageWriteback(B)
3850 * unlock_page(B)
3851 * # flush A, B to clear the writeback
3852 */
3853 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
3854 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
3855 if (!vmf->prealloc_pte)
3856 return VM_FAULT_OOM;
3857 smp_wmb(); /* See comment in __pte_alloc() */
3858 }
3859
3860 ret = vma->vm_ops->fault(vmf);
3861 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
3862 VM_FAULT_DONE_COW)))
3863 return ret;
3864
3865 if (unlikely(PageHWPoison(vmf->page))) {
3866 if (ret & VM_FAULT_LOCKED)
3867 unlock_page(vmf->page);
3868 put_page(vmf->page);
3869 vmf->page = NULL;
3870 return VM_FAULT_HWPOISON;
3871 }
3872
3873 if (unlikely(!(ret & VM_FAULT_LOCKED)))
3874 lock_page(vmf->page);
3875 else
3876 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
3877
3878 return ret;
3879 }
3880
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Download attachment ".config.gz" of type "application/gzip" (35301 bytes)
Powered by blists - more mailing lists