[<prev] [next>] [day] [month] [year] [list]
Message-ID: <202210051257.HUS5JhpI-lkp@intel.com>
Date: Wed, 5 Oct 2022 12:41:13 +0800
From: kernel test robot <lkp@...el.com>
To: Jason Gunthorpe <jgg@...dia.com>
Cc: kbuild-all@...ts.01.org, linux-kernel@...r.kernel.org,
Yi Liu <yi.l.liu@...el.com>, Nicolin Chen <nicolinc@...dia.com>
Subject: [jgunthorpe:vfio_iommufd 9/26]
drivers/iommu/iommufd/io_pagetable.c:453:5: error: conflicting types for
'iopt_access_pages'; have 'int(struct io_pagetable *, long unsigned int,
long unsigned int, struct page **, unsigned int)'
tree: https://github.com/jgunthorpe/linux vfio_iommufd
head: c639c250f6f5b991f23efa0e76fe5ca689eae450
commit: 1452b430c462a242e482338e43961cda539a8ee2 [9/26] iommufd: Data structure to provide IOVA to PFN mapping
config: s390-allmodconfig
compiler: s390-linux-gcc (GCC) 12.1.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/jgunthorpe/linux/commit/1452b430c462a242e482338e43961cda539a8ee2
git remote add jgunthorpe https://github.com/jgunthorpe/linux
git fetch --no-tags jgunthorpe vfio_iommufd
git checkout 1452b430c462a242e482338e43961cda539a8ee2
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=s390 SHELL=/bin/bash drivers/iommu/iommufd/
If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@...el.com>
Note: the jgunthorpe/vfio_iommufd HEAD c639c250f6f5b991f23efa0e76fe5ca689eae450 builds fine.
It only hurts bisectability.
All error/warnings (new ones prefixed by >>):
>> drivers/iommu/iommufd/io_pagetable.c:453:5: error: conflicting types for 'iopt_access_pages'; have 'int(struct io_pagetable *, long unsigned int, long unsigned int, struct page **, unsigned int)'
453 | int iopt_access_pages(struct io_pagetable *iopt, unsigned long iova,
| ^~~~~~~~~~~~~~~~~
In file included from drivers/iommu/iommufd/io_pagetable.c:11:
include/linux/iommufd.h:18:5: note: previous declaration of 'iopt_access_pages' with type 'int(struct io_pagetable *, long unsigned int, long unsigned int, struct page **, bool)' {aka 'int(struct io_pagetable *, long unsigned int, long unsigned int, struct page **, _Bool)'}
18 | int iopt_access_pages(struct io_pagetable *iopt, unsigned long iova,
| ^~~~~~~~~~~~~~~~~
In file included from include/linux/linkage.h:7,
from include/linux/kernel.h:17,
from include/linux/cpumask.h:10,
from include/linux/smp.h:13,
from include/linux/lockdep.h:14,
from drivers/iommu/iommufd/io_pagetable.c:12:
drivers/iommu/iommufd/io_pagetable.c:529:22: error: conflicting types for 'iopt_access_pages'; have 'int(struct io_pagetable *, long unsigned int, long unsigned int, struct page **, unsigned int)'
529 | EXPORT_SYMBOL_NS_GPL(iopt_access_pages, IOMMUFD);
| ^~~~~~~~~~~~~~~~~
include/linux/export.h:87:28: note: in definition of macro '___EXPORT_SYMBOL'
87 | extern typeof(sym) sym; \
| ^~~
include/linux/export.h:153:41: note: in expansion of macro '__EXPORT_SYMBOL'
153 | #define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "_gpl", __stringify(ns))
| ^~~~~~~~~~~~~~~
drivers/iommu/iommufd/io_pagetable.c:529:1: note: in expansion of macro 'EXPORT_SYMBOL_NS_GPL'
529 | EXPORT_SYMBOL_NS_GPL(iopt_access_pages, IOMMUFD);
| ^~~~~~~~~~~~~~~~~~~~
include/linux/iommufd.h:18:5: note: previous declaration of 'iopt_access_pages' with type 'int(struct io_pagetable *, long unsigned int, long unsigned int, struct page **, bool)' {aka 'int(struct io_pagetable *, long unsigned int, long unsigned int, struct page **, _Bool)'}
18 | int iopt_access_pages(struct io_pagetable *iopt, unsigned long iova,
| ^~~~~~~~~~~~~~~~~
>> drivers/iommu/iommufd/io_pagetable.c:578:5: warning: no previous prototype for 'iopt_access_rw' [-Wmissing-prototypes]
578 | int iopt_access_rw(struct io_pagetable *iopt, unsigned long iova, void *data,
| ^~~~~~~~~~~~~~
vim +453 drivers/iommu/iommufd/io_pagetable.c
435
436 /**
437 * iopt_access_pages() - Return a list of pages under the iova
438 * @iopt: io_pagetable to act on
439 * @iova: Starting IOVA
440 * @length: Number of bytes to access
441 * @out_pages: Output page list
442 * @flags: IOPMMUFD_ACCESS_RW_* flags
443 *
444 * Reads @length bytes starting at iova and returns the struct page * pointers.
445 * These can be kmap'd by the caller for CPU access.
446 *
447 * The caller must perform iopt_unaccess_pages() when done to balance this.
448 *
449 * iova can be unaligned from PAGE_SIZE. The first returned byte starts at
450 * page_to_phys(out_pages[0]) + (iova % PAGE_SIZE). The caller promises not to
451 * touch memory outside the requested iova slice.
452 */
> 453 int iopt_access_pages(struct io_pagetable *iopt, unsigned long iova,
454 unsigned long length, struct page **out_pages,
455 unsigned int flags)
456 {
457 unsigned long cur_iova = iova;
458 unsigned long last_iova;
459 struct iopt_area *area;
460 int rc;
461
462 if (!length)
463 return -EINVAL;
464 if (check_add_overflow(iova, length - 1, &last_iova))
465 return -EOVERFLOW;
466
467 down_read(&iopt->iova_rwsem);
468 for (area = iopt_area_iter_first(iopt, iova, last_iova); area;
469 area = iopt_area_iter_next(area, iova, last_iova)) {
470 unsigned long last = min(last_iova, iopt_area_last_iova(area));
471 unsigned long last_index;
472 unsigned long index;
473
474 /* Need contiguous areas in the access */
475 if (iopt_area_iova(area) > cur_iova || !area->pages ||
476 area->prevent_users) {
477 rc = -EINVAL;
478 goto out_remove;
479 }
480
481 /*
482 * The API can only return aligned pages, so the starting point
483 * must be at a page boundary.
484 */
485 if ((cur_iova - (iopt_area_iova(area) - area->page_offset)) %
486 PAGE_SIZE) {
487 rc = -EINVAL;
488 goto out_remove;
489 }
490
491 index = iopt_area_iova_to_index(area, cur_iova);
492 last_index = iopt_area_iova_to_index(area, last);
493
494 /*
495 * and an interior ending point must be at a page boundary
496 */
497 if (last != last_iova &&
498 (iopt_area_last_iova(area) - cur_iova + 1) % PAGE_SIZE) {
499 rc = -EINVAL;
500 goto out_remove;
501 }
502
503 mutex_lock(&area->pages->mutex);
504 rc = iopt_pages_add_access(area->pages, index, last_index,
505 out_pages, flags);
506 if (rc) {
507 mutex_unlock(&area->pages->mutex);
508 goto out_remove;
509 }
510 area->num_accesses++;
511 mutex_unlock(&area->pages->mutex);
512 if (last == last_iova)
513 break;
514 cur_iova = last + 1;
515 out_pages += last_index - index;
516 }
517 if (cur_iova != last_iova)
518 goto out_remove;
519
520 up_read(&iopt->iova_rwsem);
521 return 0;
522
523 out_remove:
524 if (cur_iova != iova)
525 iopt_unaccess_pages(iopt, iova, cur_iova - iova);
526 up_read(&iopt->iova_rwsem);
527 return rc;
528 }
529 EXPORT_SYMBOL_NS_GPL(iopt_access_pages, IOMMUFD);
530
531 /**
532 * iopt_unaccess_pages() - Undo iopt_access_pages
533 * @iopt: io_pagetable to act on
534 * @iova: Starting IOVA
535 * @length:- Number of bytes to access
536 *
537 * Return the struct page's. The caller must stop accessing them before calling
538 * this. The iova/length must exactly match the one provided to access_pages.
539 */
540 void iopt_unaccess_pages(struct io_pagetable *iopt, unsigned long iova,
541 unsigned long length)
542 {
543 unsigned long cur_iova = iova;
544 unsigned long last_iova;
545 struct iopt_area *area;
546
547 if (WARN_ON(!length) ||
548 WARN_ON(check_add_overflow(iova, length - 1, &last_iova)))
549 return;
550
551 down_read(&iopt->iova_rwsem);
552 for (area = iopt_area_iter_first(iopt, iova, last_iova); area;
553 area = iopt_area_iter_next(area, iova, last_iova)) {
554 unsigned long last = min(last_iova, iopt_area_last_iova(area));
555
556 iopt_pages_remove_access(area,
557 iopt_area_iova_to_index(area, cur_iova),
558 iopt_area_iova_to_index(area, last));
559 if (last == last_iova)
560 break;
561 cur_iova = last + 1;
562 }
563 up_read(&iopt->iova_rwsem);
564 }
565 EXPORT_SYMBOL_NS_GPL(iopt_unaccess_pages, IOMMUFD);
566
567 /**
568 * iopt_access_rw - Read or write data under the iova
569 * @iopt: io_pagetable to act on
570 * @iova: Starting IOVA
571 * @data: Kernel buffer to copy to/from
572 * @length: Number of bytes to access
573 *
574 * Copy kernel to/from data into the range given by IOVA/length. If flags
575 * indicates IOMMUFD_ACCESS_RW_KTHREAD then a large copy can be optimized
576 * by changing it into copy_to/from_user().
577 */
> 578 int iopt_access_rw(struct io_pagetable *iopt, unsigned long iova, void *data,
579 unsigned long length, unsigned int flags)
580 {
581 unsigned long cur_iova = iova;
582 struct iopt_area *area;
583 unsigned long last_iova;
584 int rc;
585
586 if (!length)
587 return -EINVAL;
588 if (check_add_overflow(iova, length - 1, &last_iova))
589 return -EOVERFLOW;
590
591 down_read(&iopt->iova_rwsem);
592 for (area = iopt_area_iter_first(iopt, iova, last_iova); area;
593 area = iopt_area_iter_next(area, iova, last_iova)) {
594 unsigned long last = min(last_iova, iopt_area_last_iova(area));
595 unsigned long bytes;
596
597 /* Need contiguous areas in the access */
598 if (iopt_area_iova(area) > cur_iova || !area->pages ||
599 area->prevent_users) {
600 rc = -EINVAL;
601 break;
602 }
603
604 bytes = (last - cur_iova) + 1;
605 rc = iopt_pages_rw_access(area->pages,
606 iopt_area_start_byte(area, cur_iova),
607 data, bytes, flags);
608 if (rc)
609 break;
610
611 cur_iova = last + 1;
612 data += bytes;
613 }
614 up_read(&iopt->iova_rwsem);
615 return rc;
616 }
617 EXPORT_SYMBOL_NS_GPL(iopt_access_rw, IOMMUFD);
618
--
0-DAY CI Kernel Test Service
https://01.org/lkp
View attachment "config" of type "text/plain" (119688 bytes)
Powered by blists - more mailing lists