lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <202410291853.lBOeTPTK-lkp@intel.com>
Date: Tue, 29 Oct 2024 18:24:19 +0800
From: kernel test robot <lkp@...el.com>
To: Zi Yan <ziy@...dia.com>, linux-mm@...ck.org,
	"Matthew Wilcox (Oracle)" <willy@...radead.org>
Cc: llvm@...ts.linux.dev, oe-kbuild-all@...ts.linux.dev,
	Ryan Roberts <ryan.roberts@....com>,
	Hugh Dickins <hughd@...gle.com>,
	"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
	David Hildenbrand <david@...hat.com>,
	Yang Shi <yang@...amperecomputing.com>,
	Miaohe Lin <linmiaohe@...wei.com>,
	Kefeng Wang <wangkefeng.wang@...wei.com>,
	Yu Zhao <yuzhao@...gle.com>, John Hubbard <jhubbard@...dia.com>,
	linux-kernel@...r.kernel.org, Zi Yan <ziy@...dia.com>
Subject: Re: [PATCH v1 1/3] mm/huge_memory: buddy allocator like folio_split()

Hi Zi,

kernel test robot noticed the following build warnings:

[auto build test WARNING on akpm-mm/mm-everything]
[also build test WARNING on next-20241029]
[cannot apply to linus/master v6.12-rc5]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Zi-Yan/mm-huge_memory-buddy-allocator-like-folio_split/20241029-021200
base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link:    https://lore.kernel.org/r/20241028180932.1319265-2-ziy%40nvidia.com
patch subject: [PATCH v1 1/3] mm/huge_memory: buddy allocator like folio_split()
config: x86_64-kexec (https://download.01.org/0day-ci/archive/20241029/202410291853.lBOeTPTK-lkp@intel.com/config)
compiler: clang version 19.1.2 (https://github.com/llvm/llvm-project 7ba7d8e2f7b6445b60679da826210cdde29eaf8b)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20241029/202410291853.lBOeTPTK-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@...el.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202410291853.lBOeTPTK-lkp@intel.com/

All warnings (new ones prefixed by >>):

   In file included from mm/huge_memory.c:8:
   In file included from include/linux/mm.h:2213:
   include/linux/vmstat.h:504:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
     504 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~ ^
     505 |                            item];
         |                            ~~~~
   include/linux/vmstat.h:511:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
     511 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~ ^
     512 |                            NR_VM_NUMA_EVENT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~~
   include/linux/vmstat.h:518:36: warning: arithmetic between different enumeration types ('enum node_stat_item' and 'enum lru_list') [-Wenum-enum-conversion]
     518 |         return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
         |                               ~~~~~~~~~~~ ^ ~~~
   include/linux/vmstat.h:524:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
     524 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~ ^
     525 |                            NR_VM_NUMA_EVENT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~~
   In file included from mm/huge_memory.c:18:
   include/linux/mm_inline.h:47:41: warning: arithmetic between different enumeration types ('enum node_stat_item' and 'enum lru_list') [-Wenum-enum-conversion]
      47 |         __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
         |                                    ~~~~~~~~~~~ ^ ~~~
   include/linux/mm_inline.h:49:22: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum lru_list') [-Wenum-enum-conversion]
      49 |                                 NR_ZONE_LRU_BASE + lru, nr_pages);
         |                                 ~~~~~~~~~~~~~~~~ ^ ~~~
>> mm/huge_memory.c:3342:6: warning: variable 'nr_dropped' set but not used [-Wunused-but-set-variable]
    3342 |         int nr_dropped = 0;
         |             ^
   mm/huge_memory.c:3806:12: warning: unused function 'folio_split' [-Wunused-function]
    3806 | static int folio_split(struct folio *folio, unsigned int new_order,
         |            ^~~~~~~~~~~
   8 warnings generated.


vim +/nr_dropped +3342 mm/huge_memory.c

  3292	
  3293	#define for_each_folio_until_end_safe(iter, iter2, start, end)	\
  3294		for (iter = start, iter2 = folio_next(start);		\
  3295		     iter != end;					\
  3296		     iter = iter2, iter2 = folio_next(iter2))
  3297	
  3298	/*
  3299	 * It splits a @folio (without mapping) to lower order smaller folios in two
  3300	 * ways.
  3301	 * 1. uniform split: the given @folio into multiple @new_order small folios,
  3302	 *    where all small folios have the same order. This is done when
  3303	 *    uniform_split is true.
  3304	 * 2. buddy allocator like split: the given @folio is split into half and one
  3305	 *    of the half (containing the given page) is split into half until the
  3306	 *    given @page's order becomes @new_order. This is done when uniform_split is
  3307	 *    false.
  3308	 *
  3309	 * The high level flow for these two methods are:
  3310	 * 1. uniform split: a single __split_folio_to_order() is called to split the
  3311	 *    @folio into @new_order, then we traverse all the resulting folios one by
  3312	 *    one in PFN ascending order and perform stats, unfreeze, adding to list,
  3313	 *    and file mapping index operations.
  3314	 * 2. buddy allocator like split: in general, folio_order - @new_order calls to
  3315	 *    __split_folio_to_order() are called in the for loop to split the @folio
  3316	 *    to one lower order at a time. The resulting small folios are processed
  3317	 *    like what is done during the traversal in 1, except the one containing
  3318	 *    @page, which is split in next for loop.
  3319	 *
  3320	 * After splitting, the caller's folio reference will be transferred to the
  3321	 * folio containing @page. The other folios may be freed if they are not mapped.
  3322	 *
  3323	 * In terms of locking, after splitting,
  3324	 * 1. uniform split leaves @page (or the folio contains it) locked;
  3325	 * 2. buddy allocator like split leaves @folio locked.
  3326	 *
  3327	 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
  3328	 */
  3329	static int __folio_split_without_mapping(struct folio *folio, int new_order,
  3330			struct page *page, struct list_head *list, pgoff_t end,
  3331			struct xa_state *xas, struct address_space *mapping,
  3332			bool uniform_split)
  3333	{
  3334		struct lruvec *lruvec;
  3335		struct address_space *swap_cache = NULL;
  3336		struct folio *origin_folio = folio;
  3337		struct folio *next_folio = folio_next(folio);
  3338		struct folio *new_folio;
  3339		struct folio *next;
  3340		int order = folio_order(folio);
  3341		int split_order = order - 1;
> 3342		int nr_dropped = 0;
  3343	
  3344		if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
  3345			if (!uniform_split)
  3346				return -EINVAL;
  3347	
  3348			swap_cache = swap_address_space(folio->swap);
  3349			xa_lock(&swap_cache->i_pages);
  3350		}
  3351	
  3352		if (folio_test_anon(folio))
  3353			mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
  3354	
  3355		/* lock lru list/PageCompound, ref frozen by page_ref_freeze */
  3356		lruvec = folio_lruvec_lock(folio);
  3357	
  3358		/*
  3359		 * split to new_order one order at a time. For uniform split,
  3360		 * intermediate orders are skipped
  3361		 */
  3362		for (split_order = order - 1; split_order >= new_order; split_order--) {
  3363			int old_order = folio_order(folio);
  3364			struct folio *release;
  3365			struct folio *end_folio = folio_next(folio);
  3366			int status;
  3367	
  3368			if (folio_test_anon(folio) && split_order == 1)
  3369				continue;
  3370			if (uniform_split && split_order != new_order)
  3371				continue;
  3372	
  3373			if (mapping) {
  3374				/*
  3375				 * uniform split has xas_split_alloc() called before
  3376				 * irq is disabled, since xas_nomem() might not be
  3377				 * able to allocate enough memory.
  3378				 */
  3379				if (uniform_split)
  3380					xas_split(xas, folio, old_order);
  3381				else {
  3382					xas_set_order(xas, folio->index, split_order);
  3383					xas_set_err(xas, -ENOMEM);
  3384					if (xas_nomem(xas, 0))
  3385						xas_split(xas, folio, old_order);
  3386					else
  3387						return -ENOMEM;
  3388				}
  3389			}
  3390	
  3391			split_page_memcg(&folio->page, old_order, split_order);
  3392			split_page_owner(&folio->page, old_order, split_order);
  3393			pgalloc_tag_split(folio, old_order, split_order);
  3394	
  3395			status = __split_folio_to_order(folio, split_order);
  3396	
  3397			if (status < 0)
  3398				return status;
  3399	
  3400			/*
  3401			 * Iterate through after-split folios and perform related
  3402			 * operations. But in buddy allocator like split, the folio
  3403			 * containing the specified page is skipped until its order
  3404			 * is new_order, since the folio will be worked on in next
  3405			 * iteration.
  3406			 */
  3407			for_each_folio_until_end_safe(release, next, folio, end_folio) {
  3408				if (page_in_folio_offset(page, release) >= 0) {
  3409					folio = release;
  3410					if (split_order != new_order)
  3411						continue;
  3412				}
  3413				if (folio_test_anon(release))
  3414					mod_mthp_stat(folio_order(release),
  3415							MTHP_STAT_NR_ANON, 1);
  3416	
  3417				/*
  3418				 * Unfreeze refcount first. Additional reference from
  3419				 * page cache.
  3420				 */
  3421				folio_ref_unfreeze(release,
  3422					1 + ((!folio_test_anon(origin_folio) ||
  3423					     folio_test_swapcache(origin_folio)) ?
  3424						     folio_nr_pages(release) : 0));
  3425	
  3426				if (release != origin_folio)
  3427					lru_add_page_tail(origin_folio, &release->page,
  3428							lruvec, list);
  3429	
  3430				/* Some pages can be beyond EOF: drop them from page cache */
  3431				if (release->index >= end) {
  3432					if (shmem_mapping(origin_folio->mapping))
  3433						nr_dropped++;
  3434					else if (folio_test_clear_dirty(release))
  3435						folio_account_cleaned(release,
  3436							inode_to_wb(origin_folio->mapping->host));
  3437					__filemap_remove_folio(release, NULL);
  3438					folio_put(release);
  3439				} else if (!folio_test_anon(release)) {
  3440					__xa_store(&origin_folio->mapping->i_pages,
  3441							release->index, &release->page, 0);
  3442				} else if (swap_cache) {
  3443					__xa_store(&swap_cache->i_pages,
  3444							swap_cache_index(release->swap),
  3445							&release->page, 0);
  3446				}
  3447			}
  3448		}
  3449	
  3450		unlock_page_lruvec(lruvec);
  3451	
  3452		if (folio_test_anon(origin_folio)) {
  3453			if (folio_test_swapcache(origin_folio))
  3454				xa_unlock(&swap_cache->i_pages);
  3455		} else
  3456			xa_unlock(&mapping->i_pages);
  3457	
  3458		/* Caller disabled irqs, so they are still disabled here */
  3459		local_irq_enable();
  3460	
  3461		remap_page(origin_folio, 1 << order,
  3462				folio_test_anon(origin_folio) ?
  3463					RMP_USE_SHARED_ZEROPAGE : 0);
  3464	
  3465		/*
  3466		 * At this point, folio should contain the specified page, so that it
  3467		 * will be left to the caller to unlock it.
  3468		 */
  3469		for_each_folio_until_end_safe(new_folio, next, origin_folio, next_folio) {
  3470			if (uniform_split && new_folio == folio)
  3471				continue;
  3472			if (!uniform_split && new_folio == origin_folio)
  3473				continue;
  3474	
  3475			folio_unlock(new_folio);
  3476			/*
  3477			 * Subpages may be freed if there wasn't any mapping
  3478			 * like if add_to_swap() is running on a lru page that
  3479			 * had its mapping zapped. And freeing these pages
  3480			 * requires taking the lru_lock so we do the put_page
  3481			 * of the tail pages after the split is complete.
  3482			 */
  3483			free_page_and_swap_cache(&new_folio->page);
  3484		}
  3485		return 0;
  3486	}
  3487	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ