[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <202312210042.xQEiqlEh-lkp@intel.com>
Date: Thu, 21 Dec 2023 00:47:51 +0800
From: kernel test robot <lkp@...el.com>
To: Kinsey Ho <kinseyho@...gle.com>,
Andrew Morton <akpm@...ux-foundation.org>
Cc: oe-kbuild-all@...ts.linux.dev,
Linux Memory Management List <linux-mm@...ck.org>,
linux-kernel@...r.kernel.org, yuzhao@...gle.com,
Kinsey Ho <kinseyho@...gle.com>,
Aneesh Kumar K V <aneesh.kumar@...ux.ibm.com>
Subject: Re: [PATCH mm-unstable v1 4/4] mm/mglru: remove
CONFIG_TRANSPARENT_HUGEPAGE
Hi Kinsey,
kernel test robot noticed the following build errors:
[auto build test ERROR on akpm-mm/mm-everything]
url: https://github.com/intel-lab-lkp/linux/commits/Kinsey-Ho/mm-mglru-add-CONFIG_ARCH_HAS_HW_PTE_YOUNG/20231220-120318
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20231220040037.883811-5-kinseyho%40google.com
patch subject: [PATCH mm-unstable v1 4/4] mm/mglru: remove CONFIG_TRANSPARENT_HUGEPAGE
config: arm-randconfig-002-20231220 (https://download.01.org/0day-ci/archive/20231221/202312210042.xQEiqlEh-lkp@intel.com/config)
compiler: arm-linux-gnueabi-gcc (GCC) 13.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20231221/202312210042.xQEiqlEh-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@...el.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202312210042.xQEiqlEh-lkp@intel.com/
All errors (new ones prefixed by >>):
mm/vmscan.c: In function 'walk_pmd_range_locked':
>> mm/vmscan.c:3455:21: error: implicit declaration of function 'pmd_dirty'; did you mean 'pte_dirty'? [-Werror=implicit-function-declaration]
3455 | if (pmd_dirty(pmd[i]) && !folio_test_dirty(folio) &&
| ^~~~~~~~~
| pte_dirty
cc1: some warnings being treated as errors
vim +3455 mm/vmscan.c
bd74fdaea146029 Yu Zhao 2022-09-18 3394
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18 3395 static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma,
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18 3396 struct mm_walk *args, unsigned long *bitmap, unsigned long *first)
bd74fdaea146029 Yu Zhao 2022-09-18 3397 {
bd74fdaea146029 Yu Zhao 2022-09-18 3398 int i;
bd74fdaea146029 Yu Zhao 2022-09-18 3399 pmd_t *pmd;
bd74fdaea146029 Yu Zhao 2022-09-18 3400 spinlock_t *ptl;
bd74fdaea146029 Yu Zhao 2022-09-18 3401 struct lru_gen_mm_walk *walk = args->private;
bd74fdaea146029 Yu Zhao 2022-09-18 3402 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
bd74fdaea146029 Yu Zhao 2022-09-18 3403 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
bd74fdaea146029 Yu Zhao 2022-09-18 3404 int old_gen, new_gen = lru_gen_from_seq(walk->max_seq);
bd74fdaea146029 Yu Zhao 2022-09-18 3405
bd74fdaea146029 Yu Zhao 2022-09-18 3406 VM_WARN_ON_ONCE(pud_leaf(*pud));
bd74fdaea146029 Yu Zhao 2022-09-18 3407
bd74fdaea146029 Yu Zhao 2022-09-18 3408 /* try to batch at most 1+MIN_LRU_BATCH+1 entries */
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18 3409 if (*first == -1) {
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18 3410 *first = addr;
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18 3411 bitmap_zero(bitmap, MIN_LRU_BATCH);
bd74fdaea146029 Yu Zhao 2022-09-18 3412 return;
bd74fdaea146029 Yu Zhao 2022-09-18 3413 }
bd74fdaea146029 Yu Zhao 2022-09-18 3414
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18 3415 i = addr == -1 ? 0 : pmd_index(addr) - pmd_index(*first);
bd74fdaea146029 Yu Zhao 2022-09-18 3416 if (i && i <= MIN_LRU_BATCH) {
bd74fdaea146029 Yu Zhao 2022-09-18 3417 __set_bit(i - 1, bitmap);
bd74fdaea146029 Yu Zhao 2022-09-18 3418 return;
bd74fdaea146029 Yu Zhao 2022-09-18 3419 }
bd74fdaea146029 Yu Zhao 2022-09-18 3420
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18 3421 pmd = pmd_offset(pud, *first);
bd74fdaea146029 Yu Zhao 2022-09-18 3422
bd74fdaea146029 Yu Zhao 2022-09-18 3423 ptl = pmd_lockptr(args->mm, pmd);
bd74fdaea146029 Yu Zhao 2022-09-18 3424 if (!spin_trylock(ptl))
bd74fdaea146029 Yu Zhao 2022-09-18 3425 goto done;
bd74fdaea146029 Yu Zhao 2022-09-18 3426
bd74fdaea146029 Yu Zhao 2022-09-18 3427 arch_enter_lazy_mmu_mode();
bd74fdaea146029 Yu Zhao 2022-09-18 3428
bd74fdaea146029 Yu Zhao 2022-09-18 3429 do {
bd74fdaea146029 Yu Zhao 2022-09-18 3430 unsigned long pfn;
bd74fdaea146029 Yu Zhao 2022-09-18 3431 struct folio *folio;
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18 3432
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18 3433 /* don't round down the first address */
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18 3434 addr = i ? (*first & PMD_MASK) + i * PMD_SIZE : *first;
bd74fdaea146029 Yu Zhao 2022-09-18 3435
bd74fdaea146029 Yu Zhao 2022-09-18 3436 pfn = get_pmd_pfn(pmd[i], vma, addr);
bd74fdaea146029 Yu Zhao 2022-09-18 3437 if (pfn == -1)
bd74fdaea146029 Yu Zhao 2022-09-18 3438 goto next;
bd74fdaea146029 Yu Zhao 2022-09-18 3439
bd74fdaea146029 Yu Zhao 2022-09-18 3440 if (!pmd_trans_huge(pmd[i])) {
bd02df412cbb9a6 T.J. Alumbaugh 2023-05-22 3441 if (should_clear_pmd_young())
bd74fdaea146029 Yu Zhao 2022-09-18 3442 pmdp_test_and_clear_young(vma, addr, pmd + i);
bd74fdaea146029 Yu Zhao 2022-09-18 3443 goto next;
bd74fdaea146029 Yu Zhao 2022-09-18 3444 }
bd74fdaea146029 Yu Zhao 2022-09-18 3445
bd74fdaea146029 Yu Zhao 2022-09-18 3446 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap);
bd74fdaea146029 Yu Zhao 2022-09-18 3447 if (!folio)
bd74fdaea146029 Yu Zhao 2022-09-18 3448 goto next;
bd74fdaea146029 Yu Zhao 2022-09-18 3449
bd74fdaea146029 Yu Zhao 2022-09-18 3450 if (!pmdp_test_and_clear_young(vma, addr, pmd + i))
bd74fdaea146029 Yu Zhao 2022-09-18 3451 goto next;
bd74fdaea146029 Yu Zhao 2022-09-18 3452
bd74fdaea146029 Yu Zhao 2022-09-18 3453 walk->mm_stats[MM_LEAF_YOUNG]++;
bd74fdaea146029 Yu Zhao 2022-09-18 3454
bd74fdaea146029 Yu Zhao 2022-09-18 @3455 if (pmd_dirty(pmd[i]) && !folio_test_dirty(folio) &&
bd74fdaea146029 Yu Zhao 2022-09-18 3456 !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
bd74fdaea146029 Yu Zhao 2022-09-18 3457 !folio_test_swapcache(folio)))
bd74fdaea146029 Yu Zhao 2022-09-18 3458 folio_mark_dirty(folio);
bd74fdaea146029 Yu Zhao 2022-09-18 3459
bd74fdaea146029 Yu Zhao 2022-09-18 3460 old_gen = folio_update_gen(folio, new_gen);
bd74fdaea146029 Yu Zhao 2022-09-18 3461 if (old_gen >= 0 && old_gen != new_gen)
bd74fdaea146029 Yu Zhao 2022-09-18 3462 update_batch_size(walk, folio, old_gen, new_gen);
bd74fdaea146029 Yu Zhao 2022-09-18 3463 next:
bd74fdaea146029 Yu Zhao 2022-09-18 3464 i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1;
bd74fdaea146029 Yu Zhao 2022-09-18 3465 } while (i <= MIN_LRU_BATCH);
bd74fdaea146029 Yu Zhao 2022-09-18 3466
bd74fdaea146029 Yu Zhao 2022-09-18 3467 arch_leave_lazy_mmu_mode();
bd74fdaea146029 Yu Zhao 2022-09-18 3468 spin_unlock(ptl);
bd74fdaea146029 Yu Zhao 2022-09-18 3469 done:
b5ff4133617d0ec T.J. Alumbaugh 2023-01-18 3470 *first = -1;
bd74fdaea146029 Yu Zhao 2022-09-18 3471 }
bd74fdaea146029 Yu Zhao 2022-09-18 3472
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Powered by blists - more mailing lists