lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:   Fri, 8 Oct 2021 06:06:05 +0800
From:   kernel test robot <lkp@...el.com>
To:     Kanchan Joshi <joshi.k@...sung.com>
Cc:     kbuild-all@...ts.01.org, linux-kernel@...r.kernel.org,
        Jens Axboe <axboe@...nel.dk>, Anuj Gupta <anuj20.g@...sung.com>
Subject: [axboe-block:nvme-passthru-wip 14/19]
 drivers/nvme/host/ioctl.c:158:5: warning: no previous prototype for
 'nvme_rq_map_user_fixedb'

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git nvme-passthru-wip
head:   9c18980ac90053bcdb21594eae48935d89bf389c
commit: 2e6a09df7c3f2f4161b9d6aa691f2801b2428eae [14/19] nvme: enable passthrough with fixed-buffer
config: sparc-buildonly-randconfig-r006-20211007 (attached as .config)
compiler: sparc-linux-gcc (GCC) 11.2.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git/commit/?id=2e6a09df7c3f2f4161b9d6aa691f2801b2428eae
        git remote add axboe-block https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
        git fetch --no-tags axboe-block nvme-passthru-wip
        git checkout 2e6a09df7c3f2f4161b9d6aa691f2801b2428eae
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross ARCH=sparc 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@...el.com>

All warnings (new ones prefixed by >>):

   In file included from drivers/nvme/host/nvme.h:19,
                    from drivers/nvme/host/ioctl.c:8:
   include/linux/io_uring.h:55:5: warning: no previous prototype for 'io_uring_cmd_import_fixed' [-Wmissing-prototypes]
      55 | int io_uring_cmd_import_fixed(void *ubuf, unsigned long len,
         |     ^~~~~~~~~~~~~~~~~~~~~~~~~
   drivers/nvme/host/ioctl.c: In function 'nvme_pt_task_cb':
   drivers/nvme/host/ioctl.c:47:19: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
      47 |         ptcmd64 = (void __user *) bcmd->unused2[0];
         |                   ^
   drivers/nvme/host/ioctl.c:62:58: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
      62 |                 struct nvme_passthru_cmd __user *ptcmd = (void *)bcmd->unused2[0];
         |                                                          ^
   drivers/nvme/host/ioctl.c: At top level:
>> drivers/nvme/host/ioctl.c:158:5: warning: no previous prototype for 'nvme_rq_map_user_fixedb' [-Wmissing-prototypes]
     158 | int nvme_rq_map_user_fixedb(struct request_queue *q, struct request *rq,
         |     ^~~~~~~~~~~~~~~~~~~~~~~
   drivers/nvme/host/ioctl.c: In function 'nvme_ns_async_ioctl':
   drivers/nvme/host/ioctl.c:534:29: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
     534 |         void __user *argp = (void __user *) bcmd->unused2[0];
         |                             ^


vim +/nvme_rq_map_user_fixedb +158 drivers/nvme/host/ioctl.c

    39	
    40	static void nvme_pt_task_cb(struct io_uring_cmd *ioucmd)
    41	{
    42		struct uring_cmd_data *ucd;
    43		struct nvme_passthru_cmd64 __user *ptcmd64 = NULL;
    44		struct block_uring_cmd *bcmd;
    45	
    46		bcmd = (struct block_uring_cmd *) &ioucmd->pdu;
  > 47		ptcmd64 = (void __user *) bcmd->unused2[0];
    48		ucd = (struct uring_cmd_data *) nvme_ioucmd_data_addr(ioucmd);
    49	
    50		if (ucd->meta) {
    51			void __user *umeta = nvme_to_user_ptr(ptcmd64->metadata);
    52	
    53			if (!ucd->status)
    54				if (copy_to_user(umeta, ucd->meta, ptcmd64->metadata_len))
    55					ucd->status = -EFAULT;
    56			kfree(ucd->meta);
    57		}
    58		if (likely(bcmd->ioctl_cmd == NVME_IOCTL_IO64_CMD)) {
    59			if (put_user(ucd->result, &ptcmd64->result))
    60				ucd->status = -EFAULT;
    61		} else {
    62			struct nvme_passthru_cmd __user *ptcmd = (void *)bcmd->unused2[0];
    63	
    64			if (put_user(ucd->result, &ptcmd->result))
    65				ucd->status = -EFAULT;
    66		}
    67		io_uring_cmd_done(ioucmd, ucd->status);
    68	}
    69	
    70	static void nvme_end_async_pt(struct request *req, blk_status_t err)
    71	{
    72		struct io_uring_cmd *ioucmd;
    73		struct uring_cmd_data *ucd;
    74		struct bio *bio;
    75	
    76		ioucmd = req->end_io_data;
    77		ucd = (struct uring_cmd_data *) nvme_ioucmd_data_addr(ioucmd);
    78		/* extract bio before reusing the same field for status */
    79		bio = ucd->bio;
    80	
    81		if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
    82			ucd->status = -EINTR;
    83		else
    84			ucd->status = nvme_req(req)->status;
    85		ucd->result = le64_to_cpu(nvme_req(req)->result.u64);
    86	
    87		/* this takes care of setting up task-work */
    88		io_uring_cmd_complete_in_task(ioucmd, nvme_pt_task_cb);
    89	
    90		/* we can unmap pages, free bio and request */
    91		blk_rq_unmap_user(bio);
    92		blk_mq_free_request(req);
    93	}
    94	
    95	static void nvme_setup_uring_cmd_data(struct request *rq,
    96			struct io_uring_cmd *ioucmd, void *meta, bool write)
    97	{
    98		struct uring_cmd_data *ucd;
    99	
   100		ucd = (struct uring_cmd_data *) nvme_ioucmd_data_addr(ioucmd);
   101		/* to free bio on completion, as req->bio will be null at that time */
   102		ucd->bio = rq->bio;
   103		/* meta update is required only for read requests */
   104		if (meta && !write)
   105			ucd->meta = meta;
   106		rq->end_io_data = ioucmd;
   107	}
   108	
   109	static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
   110			unsigned len, u32 seed, bool write)
   111	{
   112		struct bio_integrity_payload *bip;
   113		int ret = -ENOMEM;
   114		void *buf;
   115	
   116		buf = kmalloc(len, GFP_KERNEL);
   117		if (!buf)
   118			goto out;
   119	
   120		ret = -EFAULT;
   121		if (write && copy_from_user(buf, ubuf, len))
   122			goto out_free_meta;
   123	
   124		bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
   125		if (IS_ERR(bip)) {
   126			ret = PTR_ERR(bip);
   127			goto out_free_meta;
   128		}
   129	
   130		bip->bip_iter.bi_size = len;
   131		bip->bip_iter.bi_sector = seed;
   132		ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
   133				offset_in_page(buf));
   134		if (ret == len)
   135			return buf;
   136		ret = -ENOMEM;
   137	out_free_meta:
   138		kfree(buf);
   139	out:
   140		return ERR_PTR(ret);
   141	}
   142	static inline bool nvme_is_fixedb_passthru(struct io_uring_cmd *ioucmd)
   143	{
   144		struct block_uring_cmd *bcmd;
   145	
   146		if (!ioucmd)
   147			return false;
   148		bcmd = (struct block_uring_cmd *)&ioucmd->pdu;
   149		if (bcmd && ((bcmd->ioctl_cmd == NVME_IOCTL_IO_CMD_FIXED) ||
   150					(bcmd->ioctl_cmd == NVME_IOCTL_IO64_CMD_FIXED)))
   151			return true;
   152		return false;
   153	}
   154	/*
   155	 * Unlike blk_rq_map_user () this is only for fixed-buffer async passthrough.
   156	 * And hopefully faster as well.
   157	 */
 > 158	int nvme_rq_map_user_fixedb(struct request_queue *q, struct request *rq,
   159			     void __user *ubuf, unsigned long len, gfp_t gfp_mask,
   160			     struct io_uring_cmd *ioucmd)
   161	{
   162		struct iov_iter iter;
   163		size_t iter_count, nr_segs;
   164		struct bio *bio;
   165		int ret;
   166	
   167		/*
   168		 * Talk to io_uring to obtain BVEC iterator for the buffer.
   169		 * And use that iterator to form bio/request.
   170		 */
   171		ret = io_uring_cmd_import_fixed(ubuf, len, rq_data_dir(rq), &iter,
   172				ioucmd);
   173		if (unlikely(ret < 0))
   174			return ret;
   175		iter_count = iov_iter_count(&iter);
   176		nr_segs = iter.nr_segs;
   177	
   178		if (!iter_count || (iter_count >> 9) > queue_max_hw_sectors(q))
   179			return -EINVAL;
   180		if (nr_segs > queue_max_segments(q))
   181			return -EINVAL;
   182		/* no iovecs to alloc, as we already have a BVEC iterator */
   183		bio = bio_kmalloc(gfp_mask, 0);
   184		if (!bio)
   185			return -ENOMEM;
   186	
   187		bio->bi_opf |= req_op(rq);
   188		ret = bio_iov_iter_get_pages(bio, &iter);
   189		if (ret)
   190			goto out_free;
   191	
   192		blk_rq_bio_prep(rq, bio, nr_segs);
   193		return 0;
   194	
   195	out_free:
   196		bio_release_pages(bio, false);
   197		bio_put(bio);
   198		return ret;
   199	}
   200	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org

Download attachment ".config.gz" of type "application/gzip" (28720 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ