[<prev] [next>] [day] [month] [year] [list]
Message-ID: <202209080901.CB3IS8LC-lkp@intel.com>
Date: Thu, 8 Sep 2022 09:07:13 +0800
From: kernel test robot <lkp@...el.com>
To: Logan Gunthorpe <logang@...tatee.com>
Cc: llvm@...ts.linux.dev, kbuild-all@...ts.01.org,
linux-kernel@...r.kernel.org, Christoph Hellwig <hch@....de>,
John Hubbard <jhubbard@...dia.com>
Subject: [sbates130272-p2pmem:p2pdma_user_cmb_v10 6/8]
block/blk-map.c:261:11: error: implicit declaration of function
'iov_iter_get_pages_alloc_flags' is invalid in C99
tree: https://github.com/sbates130272/linux-p2pmem.git p2pdma_user_cmb_v10
head: efd6fef685a844fb4eec90383861b3a0bd48545f
commit: d931dbbbcc766ef83297fffb65d1194c6dbef235 [6/8] block: set FOLL_PCI_P2PDMA in bio_map_user_iov()
config: i386-randconfig-a013
compiler: clang version 14.0.6 (https://github.com/llvm/llvm-project f28c006a5895fc0e329fe15fead81e37457cb1d1)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/sbates130272/linux-p2pmem/commit/d931dbbbcc766ef83297fffb65d1194c6dbef235
git remote add sbates130272-p2pmem https://github.com/sbates130272/linux-p2pmem.git
git fetch --no-tags sbates130272-p2pmem p2pdma_user_cmb_v10
git checkout d931dbbbcc766ef83297fffb65d1194c6dbef235
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=i386 SHELL=/bin/bash
If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <lkp@...el.com>
All errors (new ones prefixed by >>):
>> block/blk-map.c:261:11: error: implicit declaration of function 'iov_iter_get_pages_alloc_flags' is invalid in C99 [-Werror,-Wimplicit-function-declaration]
bytes = iov_iter_get_pages_alloc_flags(iter, &pages, LONG_MAX,
^
block/blk-map.c:258:16: warning: variable 'added' set but not used [-Wunused-but-set-variable]
size_t offs, added = 0;
^
1 warning and 1 error generated.
vim +/iov_iter_get_pages_alloc_flags +261 block/blk-map.c
233
234 static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
235 gfp_t gfp_mask)
236 {
237 unsigned int max_sectors = queue_max_hw_sectors(rq->q);
238 unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
239 unsigned int gup_flags = 0;
240 struct bio *bio;
241 int ret;
242 int j;
243
244 if (!iov_iter_count(iter))
245 return -EINVAL;
246
247 bio = bio_kmalloc(nr_vecs, gfp_mask);
248 if (!bio)
249 return -ENOMEM;
250 bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
251
252 if (blk_queue_pci_p2pdma(rq->q))
253 gup_flags |= FOLL_PCI_P2PDMA;
254
255 while (iov_iter_count(iter)) {
256 struct page **pages;
257 ssize_t bytes;
258 size_t offs, added = 0;
259 int npages;
260
> 261 bytes = iov_iter_get_pages_alloc_flags(iter, &pages, LONG_MAX,
262 &offs, gup_flags);
263 if (unlikely(bytes <= 0)) {
264 ret = bytes ? bytes : -EFAULT;
265 goto out_unmap;
266 }
267
268 npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
269
270 if (unlikely(offs & queue_dma_alignment(rq->q)))
271 j = 0;
272 else {
273 for (j = 0; j < npages; j++) {
274 struct page *page = pages[j];
275 unsigned int n = PAGE_SIZE - offs;
276 bool same_page = false;
277
278 if (n > bytes)
279 n = bytes;
280
281 if (!bio_add_hw_page(rq->q, bio, page, n, offs,
282 max_sectors, &same_page)) {
283 if (same_page)
284 put_page(page);
285 break;
286 }
287
288 added += n;
289 bytes -= n;
290 offs = 0;
291 }
292 }
293 /*
294 * release the pages we didn't map into the bio, if any
295 */
296 while (j < npages)
297 put_page(pages[j++]);
298 kvfree(pages);
299 /* couldn't stuff something into bio? */
300 if (bytes) {
301 iov_iter_revert(iter, bytes);
302 break;
303 }
304 }
305
306 ret = blk_rq_append_bio(rq, bio);
307 if (ret)
308 goto out_unmap;
309 return 0;
310
311 out_unmap:
312 bio_release_pages(bio, false);
313 bio_uninit(bio);
314 kfree(bio);
315 return ret;
316 }
317
--
0-DAY CI Kernel Test Service
https://01.org/lkp
View attachment "config" of type "text/plain" (153154 bytes)
Powered by blists - more mailing lists