lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <202501242000.A2sKqaCL-lkp@intel.com>
Date: Fri, 24 Jan 2025 20:38:07 +0800
From: kernel test robot <lkp@...el.com>
To: Jens Axboe <axboe@...nel.dk>
Cc: oe-kbuild-all@...ts.linux.dev, linux-kernel@...r.kernel.org
Subject: io_uring/register.c:554:9: sparse: sparse: cast to non-scalar

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
head:   bc8198dc7ebc492ec3e9fa1617dcdfbe98e73b17
commit: 2c5aae129f427f83eeba5efbfb4e60a777cd073c io_uring/register: document io_register_resize_rings() shared mem usage
date:   9 days ago
config: alpha-randconfig-r112-20250124 (https://download.01.org/0day-ci/archive/20250124/202501242000.A2sKqaCL-lkp@intel.com/config)
compiler: alpha-linux-gcc (GCC) 14.2.0
reproduce: (https://download.01.org/0day-ci/archive/20250124/202501242000.A2sKqaCL-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@...el.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202501242000.A2sKqaCL-lkp@intel.com/

sparse warnings: (new ones prefixed by >>)
   io_uring/register.c: note: in included file:
   include/linux/io_uring_types.h:190:37: sparse: sparse: array of flexible structures
>> io_uring/register.c:554:9: sparse: sparse: cast to non-scalar
>> io_uring/register.c:554:9: sparse: sparse: cast from non-scalar

vim +554 io_uring/register.c

   393	
   394	#define swap_old(ctx, o, n, field)		\
   395		do {					\
   396			(o).field = (ctx)->field;	\
   397			(ctx)->field = (n).field;	\
   398		} while (0)
   399	
   400	#define RESIZE_FLAGS	(IORING_SETUP_CQSIZE | IORING_SETUP_CLAMP)
   401	#define COPY_FLAGS	(IORING_SETUP_NO_SQARRAY | IORING_SETUP_SQE128 | \
   402				 IORING_SETUP_CQE32 | IORING_SETUP_NO_MMAP)
   403	
   404	static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
   405	{
   406		struct io_ring_ctx_rings o = { }, n = { }, *to_free = NULL;
   407		size_t size, sq_array_offset;
   408		struct io_uring_params p;
   409		unsigned i, tail;
   410		void *ptr;
   411		int ret;
   412	
   413		/* for single issuer, must be owner resizing */
   414		if (ctx->flags & IORING_SETUP_SINGLE_ISSUER &&
   415		    current != ctx->submitter_task)
   416			return -EEXIST;
   417		/* limited to DEFER_TASKRUN for now */
   418		if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
   419			return -EINVAL;
   420		if (copy_from_user(&p, arg, sizeof(p)))
   421			return -EFAULT;
   422		if (p.flags & ~RESIZE_FLAGS)
   423			return -EINVAL;
   424	
   425		/* properties that are always inherited */
   426		p.flags |= (ctx->flags & COPY_FLAGS);
   427	
   428		ret = io_uring_fill_params(p.sq_entries, &p);
   429		if (unlikely(ret))
   430			return ret;
   431	
   432		/* nothing to do, but copy params back */
   433		if (p.sq_entries == ctx->sq_entries && p.cq_entries == ctx->cq_entries) {
   434			if (copy_to_user(arg, &p, sizeof(p)))
   435				return -EFAULT;
   436			return 0;
   437		}
   438	
   439		size = rings_size(p.flags, p.sq_entries, p.cq_entries,
   440					&sq_array_offset);
   441		if (size == SIZE_MAX)
   442			return -EOVERFLOW;
   443	
   444		if (!(p.flags & IORING_SETUP_NO_MMAP))
   445			n.rings = io_pages_map(&n.ring_pages, &n.n_ring_pages, size);
   446		else
   447			n.rings = __io_uaddr_map(&n.ring_pages, &n.n_ring_pages,
   448							p.cq_off.user_addr, size);
   449		if (IS_ERR(n.rings))
   450			return PTR_ERR(n.rings);
   451	
   452		/*
   453		 * At this point n.rings is shared with userspace, just like o.rings
   454		 * is as well. While we don't expect userspace to modify it while
   455		 * a resize is in progress, and it's most likely that userspace will
   456		 * shoot itself in the foot if it does, we can't always assume good
   457		 * intent... Use read/write once helpers from here on to indicate the
   458		 * shared nature of it.
   459		 */
   460		WRITE_ONCE(n.rings->sq_ring_mask, p.sq_entries - 1);
   461		WRITE_ONCE(n.rings->cq_ring_mask, p.cq_entries - 1);
   462		WRITE_ONCE(n.rings->sq_ring_entries, p.sq_entries);
   463		WRITE_ONCE(n.rings->cq_ring_entries, p.cq_entries);
   464	
   465		if (copy_to_user(arg, &p, sizeof(p))) {
   466			io_register_free_rings(&p, &n);
   467			return -EFAULT;
   468		}
   469	
   470		if (p.flags & IORING_SETUP_SQE128)
   471			size = array_size(2 * sizeof(struct io_uring_sqe), p.sq_entries);
   472		else
   473			size = array_size(sizeof(struct io_uring_sqe), p.sq_entries);
   474		if (size == SIZE_MAX) {
   475			io_register_free_rings(&p, &n);
   476			return -EOVERFLOW;
   477		}
   478	
   479		if (!(p.flags & IORING_SETUP_NO_MMAP))
   480			ptr = io_pages_map(&n.sqe_pages, &n.n_sqe_pages, size);
   481		else
   482			ptr = __io_uaddr_map(&n.sqe_pages, &n.n_sqe_pages,
   483						p.sq_off.user_addr,
   484						size);
   485		if (IS_ERR(ptr)) {
   486			io_register_free_rings(&p, &n);
   487			return PTR_ERR(ptr);
   488		}
   489	
   490		/*
   491		 * If using SQPOLL, park the thread
   492		 */
   493		if (ctx->sq_data) {
   494			mutex_unlock(&ctx->uring_lock);
   495			io_sq_thread_park(ctx->sq_data);
   496			mutex_lock(&ctx->uring_lock);
   497		}
   498	
   499		/*
   500		 * We'll do the swap. Grab the ctx->resize_lock, which will exclude
   501		 * any new mmap's on the ring fd. Clear out existing mappings to prevent
   502		 * mmap from seeing them, as we'll unmap them. Any attempt to mmap
   503		 * existing rings beyond this point will fail. Not that it could proceed
   504		 * at this point anyway, as the io_uring mmap side needs go grab the
   505		 * ctx->resize_lock as well. Likewise, hold the completion lock over the
   506		 * duration of the actual swap.
   507		 */
   508		mutex_lock(&ctx->resize_lock);
   509		spin_lock(&ctx->completion_lock);
   510		o.rings = ctx->rings;
   511		ctx->rings = NULL;
   512		o.sq_sqes = ctx->sq_sqes;
   513		ctx->sq_sqes = NULL;
   514	
   515		/*
   516		 * Now copy SQ and CQ entries, if any. If either of the destination
   517		 * rings can't hold what is already there, then fail the operation.
   518		 */
   519		n.sq_sqes = ptr;
   520		tail = READ_ONCE(o.rings->sq.tail);
   521		if (tail - READ_ONCE(o.rings->sq.head) > p.sq_entries)
   522			goto overflow;
   523		for (i = READ_ONCE(o.rings->sq.head); i < tail; i++) {
   524			unsigned src_head = i & (ctx->sq_entries - 1);
   525			unsigned dst_head = i & (p.sq_entries - 1);
   526	
   527			n.sq_sqes[dst_head] = o.sq_sqes[src_head];
   528		}
   529		WRITE_ONCE(n.rings->sq.head, READ_ONCE(o.rings->sq.head));
   530		WRITE_ONCE(n.rings->sq.tail, READ_ONCE(o.rings->sq.tail));
   531	
   532		tail = READ_ONCE(o.rings->cq.tail);
   533		if (tail - READ_ONCE(o.rings->cq.head) > p.cq_entries) {
   534	overflow:
   535			/* restore old rings, and return -EOVERFLOW via cleanup path */
   536			ctx->rings = o.rings;
   537			ctx->sq_sqes = o.sq_sqes;
   538			to_free = &n;
   539			ret = -EOVERFLOW;
   540			goto out;
   541		}
   542		for (i = READ_ONCE(o.rings->cq.head); i < tail; i++) {
   543			unsigned src_head = i & (ctx->cq_entries - 1);
   544			unsigned dst_head = i & (p.cq_entries - 1);
   545	
   546			n.rings->cqes[dst_head] = o.rings->cqes[src_head];
   547		}
   548		WRITE_ONCE(n.rings->cq.head, READ_ONCE(o.rings->cq.head));
   549		WRITE_ONCE(n.rings->cq.tail, READ_ONCE(o.rings->cq.tail));
   550		/* invalidate cached cqe refill */
   551		ctx->cqe_cached = ctx->cqe_sentinel = NULL;
   552	
   553		WRITE_ONCE(n.rings->sq_dropped, READ_ONCE(o.rings->sq_dropped));
 > 554		WRITE_ONCE(n.rings->sq_flags, READ_ONCE(o.rings->sq_flags));
   555		WRITE_ONCE(n.rings->cq_flags, READ_ONCE(o.rings->cq_flags));
   556		WRITE_ONCE(n.rings->cq_overflow, READ_ONCE(o.rings->cq_overflow));
   557	
   558		/* all done, store old pointers and assign new ones */
   559		if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
   560			ctx->sq_array = (u32 *)((char *)n.rings + sq_array_offset);
   561	
   562		ctx->sq_entries = p.sq_entries;
   563		ctx->cq_entries = p.cq_entries;
   564	
   565		ctx->rings = n.rings;
   566		ctx->sq_sqes = n.sq_sqes;
   567		swap_old(ctx, o, n, n_ring_pages);
   568		swap_old(ctx, o, n, n_sqe_pages);
   569		swap_old(ctx, o, n, ring_pages);
   570		swap_old(ctx, o, n, sqe_pages);
   571		to_free = &o;
   572		ret = 0;
   573	out:
   574		spin_unlock(&ctx->completion_lock);
   575		mutex_unlock(&ctx->resize_lock);
   576		io_register_free_rings(&p, to_free);
   577	
   578		if (ctx->sq_data)
   579			io_sq_thread_unpark(ctx->sq_data);
   580	
   581		return ret;
   582	}
   583	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ