[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <202507312342.dmLxVgli-lkp@intel.com>
Date: Fri, 1 Aug 2025 00:19:52 +0800
From: kernel test robot <lkp@...el.com>
To: Balbir Singh <balbirs@...dia.com>, linux-mm@...ck.org
Cc: oe-kbuild-all@...ts.linux.dev, linux-kernel@...r.kernel.org,
Balbir Singh <balbirs@...dia.com>,
Karol Herbst <kherbst@...hat.com>, Lyude Paul <lyude@...hat.com>,
Danilo Krummrich <dakr@...nel.org>,
David Airlie <airlied@...il.com>, Simona Vetter <simona@...ll.ch>,
Jérôme Glisse <jglisse@...hat.com>,
Shuah Khan <skhan@...uxfoundation.org>,
David Hildenbrand <david@...hat.com>,
Barry Song <baohua@...nel.org>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Ryan Roberts <ryan.roberts@....com>,
Matthew Wilcox <willy@...radead.org>, Peter Xu <peterx@...hat.com>,
Zi Yan <ziy@...dia.com>, Kefeng Wang <wangkefeng.wang@...wei.com>,
Jane Chu <jane.chu@...cle.com>,
Alistair Popple <apopple@...dia.com>,
Donet Tom <donettom@...ux.ibm.com>,
Mika Penttilä <mpenttil@...hat.com>,
Matthew Brost <matthew.brost@...el.com>,
Francois Dugast <francois.dugast@...el.com>,
Ralph Campbell <rcampbell@...dia.com>
Subject: Re: [v2 03/11] mm/migrate_device: THP migration of zone device pages
Hi Balbir,
kernel test robot noticed the following build warnings:
[auto build test WARNING on akpm-mm/mm-everything]
[also build test WARNING on next-20250731]
[cannot apply to akpm-mm/mm-nonmm-unstable shuah-kselftest/next shuah-kselftest/fixes linus/master v6.16]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Balbir-Singh/mm-zone_device-support-large-zone-device-private-folios/20250730-172600
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20250730092139.3890844-4-balbirs%40nvidia.com
patch subject: [v2 03/11] mm/migrate_device: THP migration of zone device pages
config: x86_64-randconfig-122-20250731 (https://download.01.org/0day-ci/archive/20250731/202507312342.dmLxVgli-lkp@intel.com/config)
compiler: gcc-12 (Debian 12.2.0-14+deb12u1) 12.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250731/202507312342.dmLxVgli-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@...el.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202507312342.dmLxVgli-lkp@intel.com/
sparse warnings: (new ones prefixed by >>)
>> mm/migrate_device.c:769:13: sparse: sparse: incorrect type in assignment (different base types) @@ expected int [assigned] ret @@ got restricted vm_fault_t @@
mm/migrate_device.c:769:13: sparse: expected int [assigned] ret
mm/migrate_device.c:769:13: sparse: got restricted vm_fault_t
mm/migrate_device.c:130:25: sparse: sparse: context imbalance in 'migrate_vma_collect_huge_pmd' - unexpected unlock
mm/migrate_device.c:815:16: sparse: sparse: context imbalance in 'migrate_vma_insert_huge_pmd_page' - different lock contexts for basic block
vim +769 mm/migrate_device.c
689
690 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
691 /**
692 * migrate_vma_insert_huge_pmd_page: Insert a huge folio into @migrate->vma->vm_mm
693 * at @addr. folio is already allocated as a part of the migration process with
694 * large page.
695 *
696 * @folio needs to be initialized and setup after it's allocated. The code bits
697 * here follow closely the code in __do_huge_pmd_anonymous_page(). This API does
698 * not support THP zero pages.
699 *
700 * @migrate: migrate_vma arguments
701 * @addr: address where the folio will be inserted
702 * @folio: folio to be inserted at @addr
703 * @src: src pfn which is being migrated
704 * @pmdp: pointer to the pmd
705 */
706 static int migrate_vma_insert_huge_pmd_page(struct migrate_vma *migrate,
707 unsigned long addr,
708 struct page *page,
709 unsigned long *src,
710 pmd_t *pmdp)
711 {
712 struct vm_area_struct *vma = migrate->vma;
713 gfp_t gfp = vma_thp_gfp_mask(vma);
714 struct folio *folio = page_folio(page);
715 int ret;
716 spinlock_t *ptl;
717 pgtable_t pgtable;
718 pmd_t entry;
719 bool flush = false;
720 unsigned long i;
721
722 VM_WARN_ON_FOLIO(!folio, folio);
723 VM_WARN_ON_ONCE(!pmd_none(*pmdp) && !is_huge_zero_pmd(*pmdp));
724
725 if (!thp_vma_suitable_order(vma, addr, HPAGE_PMD_ORDER))
726 return -EINVAL;
727
728 ret = anon_vma_prepare(vma);
729 if (ret)
730 return ret;
731
732 folio_set_order(folio, HPAGE_PMD_ORDER);
733 folio_set_large_rmappable(folio);
734
735 if (mem_cgroup_charge(folio, migrate->vma->vm_mm, gfp)) {
736 count_vm_event(THP_FAULT_FALLBACK);
737 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
738 ret = -ENOMEM;
739 goto abort;
740 }
741
742 __folio_mark_uptodate(folio);
743
744 pgtable = pte_alloc_one(vma->vm_mm);
745 if (unlikely(!pgtable))
746 goto abort;
747
748 if (folio_is_device_private(folio)) {
749 swp_entry_t swp_entry;
750
751 if (vma->vm_flags & VM_WRITE)
752 swp_entry = make_writable_device_private_entry(
753 page_to_pfn(page));
754 else
755 swp_entry = make_readable_device_private_entry(
756 page_to_pfn(page));
757 entry = swp_entry_to_pmd(swp_entry);
758 } else {
759 if (folio_is_zone_device(folio) &&
760 !folio_is_device_coherent(folio)) {
761 goto abort;
762 }
763 entry = folio_mk_pmd(folio, vma->vm_page_prot);
764 if (vma->vm_flags & VM_WRITE)
765 entry = pmd_mkwrite(pmd_mkdirty(entry), vma);
766 }
767
768 ptl = pmd_lock(vma->vm_mm, pmdp);
> 769 ret = check_stable_address_space(vma->vm_mm);
770 if (ret)
771 goto abort;
772
773 /*
774 * Check for userfaultfd but do not deliver the fault. Instead,
775 * just back off.
776 */
777 if (userfaultfd_missing(vma))
778 goto unlock_abort;
779
780 if (!pmd_none(*pmdp)) {
781 if (!is_huge_zero_pmd(*pmdp))
782 goto unlock_abort;
783 flush = true;
784 } else if (!pmd_none(*pmdp))
785 goto unlock_abort;
786
787 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
788 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
789 if (!folio_is_zone_device(folio))
790 folio_add_lru_vma(folio, vma);
791 folio_get(folio);
792
793 if (flush) {
794 pte_free(vma->vm_mm, pgtable);
795 flush_cache_page(vma, addr, addr + HPAGE_PMD_SIZE);
796 pmdp_invalidate(vma, addr, pmdp);
797 } else {
798 pgtable_trans_huge_deposit(vma->vm_mm, pmdp, pgtable);
799 mm_inc_nr_ptes(vma->vm_mm);
800 }
801 set_pmd_at(vma->vm_mm, addr, pmdp, entry);
802 update_mmu_cache_pmd(vma, addr, pmdp);
803
804 spin_unlock(ptl);
805
806 count_vm_event(THP_FAULT_ALLOC);
807 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC);
808 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
809
810 return 0;
811
812 unlock_abort:
813 spin_unlock(ptl);
814 abort:
815 for (i = 0; i < HPAGE_PMD_NR; i++)
816 src[i] &= ~MIGRATE_PFN_MIGRATE;
817 return 0;
818 }
819 #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
820 static int migrate_vma_insert_huge_pmd_page(struct migrate_vma *migrate,
821 unsigned long addr,
822 struct page *page,
823 unsigned long *src,
824 pmd_t *pmdp)
825 {
826 return 0;
827 }
828 #endif
829
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Powered by blists - more mailing lists