[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <202212080754.4woszUWv-lkp@intel.com>
Date: Thu, 8 Dec 2022 07:46:11 +0800
From: kernel test robot <lkp@...el.com>
To: "Vishal Moola (Oracle)" <vishal.moola@...il.com>,
linux-mm@...ck.org
Cc: oe-kbuild-all@...ts.linux.dev, damon@...ts.linux.dev,
linux-kernel@...r.kernel.org, akpm@...ux-foundation.org,
sj@...nel.org, "Vishal Moola (Oracle)" <vishal.moola@...il.com>
Subject: Re: [PATCH 1/3] madvise: Convert madvise_cold_or_pageout_pte_range()
to use folios
Hi Vishal,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on linus/master]
[also build test ERROR on v6.1-rc8]
[cannot apply to akpm-mm/mm-everything next-20221207]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Vishal-Moola-Oracle/Convert-deactivate_page-to-deactivate_folio/20221207-082339
patch link: https://lore.kernel.org/r/20221207002158.418789-2-vishal.moola%40gmail.com
patch subject: [PATCH 1/3] madvise: Convert madvise_cold_or_pageout_pte_range() to use folios
config: x86_64-randconfig-a011
compiler: gcc-11 (Debian 11.3.0-8) 11.3.0
reproduce (this is a W=1 build):
# https://github.com/intel-lab-lkp/linux/commit/670bf0d6d5222d0c92e72b7869d25fa64f582082
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Vishal-Moola-Oracle/Convert-deactivate_page-to-deactivate_folio/20221207-082339
git checkout 670bf0d6d5222d0c92e72b7869d25fa64f582082
# save the config file
mkdir build_dir && cp config build_dir/.config
make W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash
If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@...el.com>
All errors (new ones prefixed by >>):
In file included from include/linux/mm.h:6,
from include/linux/mman.h:5,
from mm/madvise.c:9:
mm/madvise.c: In function 'madvise_cold_or_pageout_pte_range':
>> mm/madvise.c:464:57: error: passing argument 1 of 'dump_page' from incompatible pointer type [-Werror=incompatible-pointer-types]
464 | VM_BUG_ON_PAGE(folio_test_large(folio), folio);
| ^~~~~
| |
| struct folio *
include/linux/mmdebug.h:21:35: note: in definition of macro 'VM_BUG_ON_PAGE'
21 | dump_page(page, "VM_BUG_ON_PAGE(" __stringify(cond)")");\
| ^~~~
include/linux/mmdebug.h:12:29: note: expected 'struct page *' but argument is of type 'struct folio *'
12 | void dump_page(struct page *page, const char *reason);
| ~~~~~~~~~~~~~^~~~
cc1: some warnings being treated as errors
vim +/dump_page +464 mm/madvise.c
323
324 static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
325 unsigned long addr, unsigned long end,
326 struct mm_walk *walk)
327 {
328 struct madvise_walk_private *private = walk->private;
329 struct mmu_gather *tlb = private->tlb;
330 bool pageout = private->pageout;
331 struct mm_struct *mm = tlb->mm;
332 struct vm_area_struct *vma = walk->vma;
333 pte_t *orig_pte, *pte, ptent;
334 spinlock_t *ptl;
335 struct folio *folio = NULL;
336 struct page *page = NULL;
337 LIST_HEAD(folio_list);
338
339 if (fatal_signal_pending(current))
340 return -EINTR;
341
342 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
343 if (pmd_trans_huge(*pmd)) {
344 pmd_t orig_pmd;
345 unsigned long next = pmd_addr_end(addr, end);
346
347 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
348 ptl = pmd_trans_huge_lock(pmd, vma);
349 if (!ptl)
350 return 0;
351
352 orig_pmd = *pmd;
353 if (is_huge_zero_pmd(orig_pmd))
354 goto huge_unlock;
355
356 if (unlikely(!pmd_present(orig_pmd))) {
357 VM_BUG_ON(thp_migration_supported() &&
358 !is_pmd_migration_entry(orig_pmd));
359 goto huge_unlock;
360 }
361
362 folio = pfn_folio(pmd_pfn(orig_pmd));
363
364 /* Do not interfere with other mappings of this folio */
365 if (folio_mapcount(folio) != 1)
366 goto huge_unlock;
367
368 if (next - addr != HPAGE_PMD_SIZE) {
369 int err;
370
371 folio_get(folio);
372 spin_unlock(ptl);
373 folio_lock(folio);
374 err = split_folio(folio);
375 folio_unlock(folio);
376 folio_put(folio);
377 if (!err)
378 goto regular_folio;
379 return 0;
380 }
381
382 if (pmd_young(orig_pmd)) {
383 pmdp_invalidate(vma, addr, pmd);
384 orig_pmd = pmd_mkold(orig_pmd);
385
386 set_pmd_at(mm, addr, pmd, orig_pmd);
387 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
388 }
389
390 folio_clear_referenced(folio);
391 folio_test_clear_young(folio);
392 if (pageout) {
393 if (!folio_isolate_lru(folio)) {
394 if (folio_test_unevictable(folio))
395 folio_putback_lru(folio);
396 else
397 list_add(&folio->lru, &folio_list);
398 }
399 } else
400 deactivate_page(&folio->page);
401 huge_unlock:
402 spin_unlock(ptl);
403 if (pageout)
404 reclaim_pages(&folio_list);
405 return 0;
406 }
407
408 regular_folio:
409 if (pmd_trans_unstable(pmd))
410 return 0;
411 #endif
412 tlb_change_page_size(tlb, PAGE_SIZE);
413 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
414 flush_tlb_batched_pending(mm);
415 arch_enter_lazy_mmu_mode();
416 for (; addr < end; pte++, addr += PAGE_SIZE) {
417 ptent = *pte;
418
419 if (pte_none(ptent))
420 continue;
421
422 if (!pte_present(ptent))
423 continue;
424
425 page = vm_normal_page(vma, addr, ptent);
426 if (!page || is_zone_device_page(page))
427 continue;
428 folio = page_folio(page);
429
430 /*
431 * Creating a THP page is expensive so split it only if we
432 * are sure it's worth. Split it if we are only owner.
433 */
434 if (folio_test_large(folio)) {
435 if (folio_mapcount(folio) != 1)
436 break;
437 folio_get(folio);
438 if (!folio_trylock(folio)) {
439 folio_put(folio);
440 break;
441 }
442 pte_unmap_unlock(orig_pte, ptl);
443 if (split_folio(folio)) {
444 folio_unlock(folio);
445 folio_put(folio);
446 orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
447 break;
448 }
449 folio_unlock(folio);
450 folio_put(folio);
451 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
452 pte--;
453 addr -= PAGE_SIZE;
454 continue;
455 }
456
457 /*
458 * Do not interfere with other mappings of this folio and
459 * non-LRU folio.
460 */
461 if (!folio_test_lru(folio))
462 continue;
463
> 464 VM_BUG_ON_PAGE(folio_test_large(folio), folio);
465
466 if (pte_young(ptent)) {
467 ptent = ptep_get_and_clear_full(mm, addr, pte,
468 tlb->fullmm);
469 ptent = pte_mkold(ptent);
470 set_pte_at(mm, addr, pte, ptent);
471 tlb_remove_tlb_entry(tlb, pte, addr);
472 }
473
474 /*
475 * We are deactivating a folio for accelerating reclaiming.
476 * VM couldn't reclaim the folio unless we clear PG_young.
477 * As a side effect, it makes confuse idle-page tracking
478 * because they will miss recent referenced history.
479 */
480 folio_clear_referenced(folio);
481 folio_test_clear_young(folio);
482 if (pageout) {
483 if (!folio_isolate_lru(folio)) {
484 if (folio_test_unevictable(folio))
485 folio_putback_lru(folio);
486 else
487 list_add(&folio->lru, &folio_list);
488 }
489 } else
490 deactivate_page(&folio->page);
491 }
492
493 arch_leave_lazy_mmu_mode();
494 pte_unmap_unlock(orig_pte, ptl);
495 if (pageout)
496 reclaim_pages(&folio_list);
497 cond_resched();
498
499 return 0;
500 }
501
--
0-DAY CI Kernel Test Service
https://01.org/lkp
View attachment "config" of type "text/plain" (158601 bytes)
Powered by blists - more mailing lists