[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0b649477-8e61-b330-aba5-eba4761a56d5@nvidia.com>
Date: Tue, 24 Jan 2023 11:23:45 -0800
From: John Hubbard <jhubbard@...dia.com>
To: David Howells <dhowells@...hat.com>,
Al Viro <viro@...iv.linux.org.uk>,
Christoph Hellwig <hch@...radead.org>
CC: Matthew Wilcox <willy@...radead.org>, Jens Axboe <axboe@...nel.dk>,
"Jan Kara" <jack@...e.cz>, Jeff Layton <jlayton@...nel.org>,
David Hildenbrand <david@...hat.com>,
Jason Gunthorpe <jgg@...dia.com>,
Logan Gunthorpe <logang@...tatee.com>,
<linux-fsdevel@...r.kernel.org>, <linux-block@...r.kernel.org>,
<linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v9 1/8] iov_iter: Define flags to qualify page extraction.
On 1/24/23 09:01, David Howells wrote:
> Define flags to qualify page extraction to pass into iov_iter_*_pages*()
> rather than passing in FOLL_* flags.
>
> For now only a flag to allow peer-to-peer DMA is supported.
>
> Signed-off-by: David Howells <dhowells@...hat.com>
> cc: Al Viro <viro@...iv.linux.org.uk>
> cc: Christoph Hellwig <hch@...radead.org>
> cc: Jens Axboe <axboe@...nel.dk>
> cc: Logan Gunthorpe <logang@...tatee.com>
> cc: linux-fsdevel@...r.kernel.org
> cc: linux-block@...r.kernel.org
> ---
>
> Notes:
> ver #9)
> - Change extract_flags to extraction_flags.
Reviewed-by: John Hubbard <jhubbard@...dia.com>
thanks,
--
John Hubbard
NVIDIA
>
> ver #7)
> - Don't use FOLL_* as a parameter, but rather define constants
> specifically to use with iov_iter_*_pages*().
> - Drop the I/O direction constants for now.
>
> block/bio.c | 6 +++---
> block/blk-map.c | 8 ++++----
> include/linux/uio.h | 7 +++++--
> lib/iov_iter.c | 14 ++++++++------
> 4 files changed, 20 insertions(+), 15 deletions(-)
>
> diff --git a/block/bio.c b/block/bio.c
> index ab59a491a883..683444e6b711 100644
> --- a/block/bio.c
> +++ b/block/bio.c
> @@ -1249,7 +1249,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
> unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
> struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
> struct page **pages = (struct page **)bv;
> - unsigned int gup_flags = 0;
> + unsigned int extraction_flags = 0;
> ssize_t size, left;
> unsigned len, i = 0;
> size_t offset, trim;
> @@ -1264,7 +1264,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
> pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
>
> if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue))
> - gup_flags |= FOLL_PCI_P2PDMA;
> + extraction_flags |= ITER_ALLOW_P2PDMA;
>
> /*
> * Each segment in the iov is required to be a block size multiple.
> @@ -1275,7 +1275,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
> */
> size = iov_iter_get_pages(iter, pages,
> UINT_MAX - bio->bi_iter.bi_size,
> - nr_pages, &offset, gup_flags);
> + nr_pages, &offset, extraction_flags);
> if (unlikely(size <= 0))
> return size ? size : -EFAULT;
>
> diff --git a/block/blk-map.c b/block/blk-map.c
> index 19940c978c73..7db52ad5b2d0 100644
> --- a/block/blk-map.c
> +++ b/block/blk-map.c
> @@ -267,7 +267,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
> {
> unsigned int max_sectors = queue_max_hw_sectors(rq->q);
> unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
> - unsigned int gup_flags = 0;
> + unsigned int extraction_flags = 0;
> struct bio *bio;
> int ret;
> int j;
> @@ -280,7 +280,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
> return -ENOMEM;
>
> if (blk_queue_pci_p2pdma(rq->q))
> - gup_flags |= FOLL_PCI_P2PDMA;
> + extraction_flags |= ITER_ALLOW_P2PDMA;
>
> while (iov_iter_count(iter)) {
> struct page **pages, *stack_pages[UIO_FASTIOV];
> @@ -291,10 +291,10 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
> if (nr_vecs <= ARRAY_SIZE(stack_pages)) {
> pages = stack_pages;
> bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
> - nr_vecs, &offs, gup_flags);
> + nr_vecs, &offs, extraction_flags);
> } else {
> bytes = iov_iter_get_pages_alloc(iter, &pages,
> - LONG_MAX, &offs, gup_flags);
> + LONG_MAX, &offs, extraction_flags);
> }
> if (unlikely(bytes <= 0)) {
> ret = bytes ? bytes : -EFAULT;
> diff --git a/include/linux/uio.h b/include/linux/uio.h
> index 9f158238edba..58fda77f6847 100644
> --- a/include/linux/uio.h
> +++ b/include/linux/uio.h
> @@ -252,12 +252,12 @@ void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *
> loff_t start, size_t count);
> ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
> size_t maxsize, unsigned maxpages, size_t *start,
> - unsigned gup_flags);
> + unsigned extraction_flags);
> ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
> size_t maxsize, unsigned maxpages, size_t *start);
> ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
> struct page ***pages, size_t maxsize, size_t *start,
> - unsigned gup_flags);
> + unsigned extraction_flags);
> ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
> size_t maxsize, size_t *start);
> int iov_iter_npages(const struct iov_iter *i, int maxpages);
> @@ -360,4 +360,7 @@ static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
> };
> }
>
> +/* Flags for iov_iter_get/extract_pages*() */
> +#define ITER_ALLOW_P2PDMA 0x01 /* Allow P2PDMA on the extracted pages */
> +
> #endif
> diff --git a/lib/iov_iter.c b/lib/iov_iter.c
> index f9a3ff37ecd1..da7db39075c6 100644
> --- a/lib/iov_iter.c
> +++ b/lib/iov_iter.c
> @@ -1432,9 +1432,9 @@ static struct page *first_bvec_segment(const struct iov_iter *i,
> static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
> struct page ***pages, size_t maxsize,
> unsigned int maxpages, size_t *start,
> - unsigned int gup_flags)
> + unsigned int extraction_flags)
> {
> - unsigned int n;
> + unsigned int n, gup_flags = 0;
>
> if (maxsize > i->count)
> maxsize = i->count;
> @@ -1442,6 +1442,8 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
> return 0;
> if (maxsize > MAX_RW_COUNT)
> maxsize = MAX_RW_COUNT;
> + if (extraction_flags & ITER_ALLOW_P2PDMA)
> + gup_flags |= FOLL_PCI_P2PDMA;
>
> if (likely(user_backed_iter(i))) {
> unsigned long addr;
> @@ -1495,14 +1497,14 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
>
> ssize_t iov_iter_get_pages(struct iov_iter *i,
> struct page **pages, size_t maxsize, unsigned maxpages,
> - size_t *start, unsigned gup_flags)
> + size_t *start, unsigned extraction_flags)
> {
> if (!maxpages)
> return 0;
> BUG_ON(!pages);
>
> return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages,
> - start, gup_flags);
> + start, extraction_flags);
> }
> EXPORT_SYMBOL_GPL(iov_iter_get_pages);
>
> @@ -1515,14 +1517,14 @@ EXPORT_SYMBOL(iov_iter_get_pages2);
>
> ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
> struct page ***pages, size_t maxsize,
> - size_t *start, unsigned gup_flags)
> + size_t *start, unsigned extraction_flags)
> {
> ssize_t len;
>
> *pages = NULL;
>
> len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start,
> - gup_flags);
> + extraction_flags);
> if (len <= 0) {
> kvfree(*pages);
> *pages = NULL;
>
Powered by blists - more mailing lists