[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <202602091006.0jXoavPW-lkp@intel.com>
Date: Mon, 9 Feb 2026 10:12:02 +0800
From: kernel test robot <lkp@...el.com>
To: Nhat Pham <nphamcs@...il.com>, linux-mm@...ck.org
Cc: llvm@...ts.linux.dev, oe-kbuild-all@...ts.linux.dev,
akpm@...ux-foundation.org, hannes@...xchg.org, hughd@...gle.com,
yosry.ahmed@...ux.dev, mhocko@...nel.org, roman.gushchin@...ux.dev,
shakeel.butt@...ux.dev, muchun.song@...ux.dev, len.brown@...el.com,
chengming.zhou@...ux.dev, kasong@...cent.com, chrisl@...nel.org,
huang.ying.caritas@...il.com, ryan.roberts@....com,
shikemeng@...weicloud.com, viro@...iv.linux.org.uk,
baohua@...nel.org, bhe@...hat.com, osalvador@...e.de,
lorenzo.stoakes@...cle.com, christophe.leroy@...roup.eu,
pavel@...nel.org, kernel-team@...a.com,
linux-kernel@...r.kernel.org, cgroups@...r.kernel.org,
linux-pm@...r.kernel.org, peterx@...hat.com, riel@...riel.com,
joshua.hahnjy@...il.com
Subject: Re: [PATCH v3 18/20] memcg: swap: only charge physical swap slots
Hi Nhat,
kernel test robot noticed the following build errors:
[auto build test ERROR on linus/master]
[also build test ERROR on v6.19]
[cannot apply to akpm-mm/mm-everything tj-cgroup/for-next tip/smp/core next-20260205]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Nhat-Pham/swap-rearrange-the-swap-header-file/20260209-065842
base: linus/master
patch link: https://lore.kernel.org/r/20260208215839.87595-19-nphamcs%40gmail.com
patch subject: [PATCH v3 18/20] memcg: swap: only charge physical swap slots
config: sparc64-defconfig (https://download.01.org/0day-ci/archive/20260209/202602091006.0jXoavPW-lkp@intel.com/config)
compiler: clang version 20.1.8 (https://github.com/llvm/llvm-project 87f0227cb60147a26a1eeb4fb06e3b505e9c7261)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260209/202602091006.0jXoavPW-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@...el.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202602091006.0jXoavPW-lkp@intel.com/
All errors (new ones prefixed by >>):
>> mm/vswap.c:637:2: error: call to undeclared function 'mem_cgroup_clear_swap'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
637 | mem_cgroup_clear_swap(entry, 1);
| ^
mm/vswap.c:637:2: note: did you mean 'mem_cgroup_uncharge_swap'?
include/linux/swap.h:658:20: note: 'mem_cgroup_uncharge_swap' declared here
658 | static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
| ^
>> mm/vswap.c:661:2: error: call to undeclared function 'mem_cgroup_record_swap'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
661 | mem_cgroup_record_swap(folio, entry);
| ^
mm/vswap.c:661:2: note: did you mean 'mem_cgroup_uncharge_swap'?
include/linux/swap.h:658:20: note: 'mem_cgroup_uncharge_swap' declared here
658 | static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
| ^
2 errors generated.
vim +/mem_cgroup_clear_swap +637 mm/vswap.c
528
529 /*
530 * Caller needs to handle races with other operations themselves.
531 *
532 * Specifically, this function is safe to be called in contexts where the swap
533 * entry has been added to the swap cache and the associated folio is locked.
534 * We cannot race with other accessors, and the swap entry is guaranteed to be
535 * valid the whole time (since swap cache implies one refcount).
536 *
537 * We cannot assume that the backends will be of the same type,
538 * contiguous, etc. We might have a large folio coalesced from subpages with
539 * mixed backend, which is only rectified when it is reclaimed.
540 */
541 static void release_backing(swp_entry_t entry, int nr)
542 {
543 struct vswap_cluster *cluster = NULL;
544 struct swp_desc *desc;
545 unsigned long flush_nr, phys_swap_start = 0, phys_swap_end = 0;
546 unsigned long phys_swap_released = 0;
547 unsigned int phys_swap_type = 0;
548 bool need_flushing_phys_swap = false;
549 swp_slot_t flush_slot;
550 int i;
551
552 VM_WARN_ON(!entry.val);
553
554 rcu_read_lock();
555 for (i = 0; i < nr; i++) {
556 desc = vswap_iter(&cluster, entry.val + i);
557 VM_WARN_ON(!desc);
558
559 /*
560 * We batch contiguous physical swap slots for more efficient
561 * freeing.
562 */
563 if (phys_swap_start != phys_swap_end &&
564 (desc->type != VSWAP_SWAPFILE ||
565 swp_slot_type(desc->slot) != phys_swap_type ||
566 swp_slot_offset(desc->slot) != phys_swap_end)) {
567 need_flushing_phys_swap = true;
568 flush_slot = swp_slot(phys_swap_type, phys_swap_start);
569 flush_nr = phys_swap_end - phys_swap_start;
570 phys_swap_start = phys_swap_end = 0;
571 }
572
573 if (desc->type == VSWAP_ZSWAP && desc->zswap_entry) {
574 zswap_entry_free(desc->zswap_entry);
575 } else if (desc->type == VSWAP_SWAPFILE) {
576 phys_swap_released++;
577 if (!phys_swap_start) {
578 /* start a new contiguous range of phys swap */
579 phys_swap_start = swp_slot_offset(desc->slot);
580 phys_swap_end = phys_swap_start + 1;
581 phys_swap_type = swp_slot_type(desc->slot);
582 } else {
583 /* extend the current contiguous range of phys swap */
584 phys_swap_end++;
585 }
586 }
587
588 desc->slot.val = 0;
589
590 if (need_flushing_phys_swap) {
591 spin_unlock(&cluster->lock);
592 cluster = NULL;
593 swap_slot_free_nr(flush_slot, flush_nr);
594 need_flushing_phys_swap = false;
595 }
596 }
597 if (cluster)
598 spin_unlock(&cluster->lock);
599 rcu_read_unlock();
600
601 /* Flush any remaining physical swap range */
602 if (phys_swap_start) {
603 flush_slot = swp_slot(phys_swap_type, phys_swap_start);
604 flush_nr = phys_swap_end - phys_swap_start;
605 swap_slot_free_nr(flush_slot, flush_nr);
606 }
607
608 if (phys_swap_released)
609 mem_cgroup_uncharge_swap(entry, phys_swap_released);
610 }
611
612 /*
613 * Entered with the cluster locked, but might unlock the cluster.
614 * This is because several operations, such as releasing physical swap slots
615 * (i.e swap_slot_free_nr()) require the cluster to be unlocked to avoid
616 * deadlocks.
617 *
618 * This is safe, because:
619 *
620 * 1. The swap entry to be freed has refcnt (swap count and swapcache pin)
621 * down to 0, so no one can change its internal state
622 *
623 * 2. The swap entry to be freed still holds a refcnt to the cluster, keeping
624 * the cluster itself valid.
625 *
626 * We will exit the function with the cluster re-locked.
627 */
628 static void vswap_free(struct vswap_cluster *cluster, struct swp_desc *desc,
629 swp_entry_t entry)
630 {
631 /* Clear shadow if present */
632 if (xa_is_value(desc->shadow))
633 desc->shadow = NULL;
634 spin_unlock(&cluster->lock);
635
636 release_backing(entry, 1);
> 637 mem_cgroup_clear_swap(entry, 1);
638
639 /* erase forward mapping and release the virtual slot for reallocation */
640 spin_lock(&cluster->lock);
641 release_vswap_slot(cluster, entry.val);
642 }
643
644 /**
645 * folio_alloc_swap - allocate virtual swap space for a folio.
646 * @folio: the folio.
647 *
648 * Return: 0, if the allocation succeeded, -ENOMEM, if the allocation failed.
649 */
650 int folio_alloc_swap(struct folio *folio)
651 {
652 swp_entry_t entry;
653
654 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
655 VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
656
657 entry = vswap_alloc(folio);
658 if (!entry.val)
659 return -ENOMEM;
660
> 661 mem_cgroup_record_swap(folio, entry);
662 swap_cache_add_folio(folio, entry, NULL);
663
664 return 0;
665 }
666
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Powered by blists - more mailing lists