lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <202502151925.c1S00bk3-lkp@intel.com>
Date: Sat, 15 Feb 2025 20:07:19 +0800
From: kernel test robot <lkp@...el.com>
To: Vincent Donnefort <vdonnefort@...gle.com>
Cc: oe-kbuild-all@...ts.linux.dev, linux-kernel@...r.kernel.org,
	"Steven Rostedt (Google)" <rostedt@...dmis.org>
Subject: kernel/trace/ring_buffer.c:6265 __rb_map_vma() warn: unsigned '_x'
 is never less than zero.

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
head:   7ff71e6d923969d933e1ba7e0db857782d36cd19
commit: 117c39200d9d760cbd5944bb89efb7b9c51965aa ring-buffer: Introducing ring-buffer mapping functions
date:   9 months ago
config: riscv-randconfig-r073-20250213 (https://download.01.org/0day-ci/archive/20250215/202502151925.c1S00bk3-lkp@intel.com/config)
compiler: riscv32-linux-gcc (GCC) 14.2.0

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@...el.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202502151925.c1S00bk3-lkp@intel.com/

New smatch warnings:
kernel/trace/ring_buffer.c:6265 __rb_map_vma() warn: unsigned '_x' is never less than zero.

Old smatch warnings:
include/linux/mm.h:1306 virt_to_folio() warn: unsigned '_x' is never less than zero.

vim +/_x +6265 kernel/trace/ring_buffer.c

  6212	
  6213	/*
  6214	 *   +--------------+  pgoff == 0
  6215	 *   |   meta page  |
  6216	 *   +--------------+  pgoff == 1
  6217	 *   | subbuffer 0  |
  6218	 *   |              |
  6219	 *   +--------------+  pgoff == (1 + (1 << subbuf_order))
  6220	 *   | subbuffer 1  |
  6221	 *   |              |
  6222	 *         ...
  6223	 */
  6224	#ifdef CONFIG_MMU
  6225	static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
  6226				struct vm_area_struct *vma)
  6227	{
  6228		unsigned long nr_subbufs, nr_pages, vma_pages, pgoff = vma->vm_pgoff;
  6229		unsigned int subbuf_pages, subbuf_order;
  6230		struct page **pages;
  6231		int p = 0, s = 0;
  6232		int err;
  6233	
  6234		/* Refuse MP_PRIVATE or writable mappings */
  6235		if (vma->vm_flags & VM_WRITE || vma->vm_flags & VM_EXEC ||
  6236		    !(vma->vm_flags & VM_MAYSHARE))
  6237			return -EPERM;
  6238	
  6239		/*
  6240		 * Make sure the mapping cannot become writable later. Also tell the VM
  6241		 * to not touch these pages (VM_DONTCOPY | VM_DONTEXPAND).
  6242		 */
  6243		vm_flags_mod(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP,
  6244			     VM_MAYWRITE);
  6245	
  6246		lockdep_assert_held(&cpu_buffer->mapping_lock);
  6247	
  6248		subbuf_order = cpu_buffer->buffer->subbuf_order;
  6249		subbuf_pages = 1 << subbuf_order;
  6250	
  6251		nr_subbufs = cpu_buffer->nr_pages + 1; /* + reader-subbuf */
  6252		nr_pages = ((nr_subbufs) << subbuf_order) - pgoff + 1; /* + meta-page */
  6253	
  6254		vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
  6255		if (!vma_pages || vma_pages > nr_pages)
  6256			return -EINVAL;
  6257	
  6258		nr_pages = vma_pages;
  6259	
  6260		pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
  6261		if (!pages)
  6262			return -ENOMEM;
  6263	
  6264		if (!pgoff) {
> 6265			pages[p++] = virt_to_page(cpu_buffer->meta_page);
  6266	
  6267			/*
  6268			 * TODO: Align sub-buffers on their size, once
  6269			 * vm_insert_pages() supports the zero-page.
  6270			 */
  6271		} else {
  6272			/* Skip the meta-page */
  6273			pgoff--;
  6274	
  6275			if (pgoff % subbuf_pages) {
  6276				err = -EINVAL;
  6277				goto out;
  6278			}
  6279	
  6280			s += pgoff / subbuf_pages;
  6281		}
  6282	
  6283		while (p < nr_pages) {
  6284			struct page *page = virt_to_page(cpu_buffer->subbuf_ids[s]);
  6285			int off = 0;
  6286	
  6287			if (WARN_ON_ONCE(s >= nr_subbufs)) {
  6288				err = -EINVAL;
  6289				goto out;
  6290			}
  6291	
  6292			for (; off < (1 << (subbuf_order)); off++, page++) {
  6293				if (p >= nr_pages)
  6294					break;
  6295	
  6296				pages[p++] = page;
  6297			}
  6298			s++;
  6299		}
  6300	
  6301		err = vm_insert_pages(vma, vma->vm_start, pages, &nr_pages);
  6302	
  6303	out:
  6304		kfree(pages);
  6305	
  6306		return err;
  6307	}
  6308	#else
  6309	static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
  6310				struct vm_area_struct *vma)
  6311	{
  6312		return -EOPNOTSUPP;
  6313	}
  6314	#endif
  6315	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ