[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y8ZXQArEsIRNO9k/@infradead.org>
Date: Tue, 17 Jan 2023 00:07:28 -0800
From: Christoph Hellwig <hch@...radead.org>
To: David Howells <dhowells@...hat.com>
Cc: Al Viro <viro@...iv.linux.org.uk>, Jens Axboe <axboe@...nel.dk>,
Jan Kara <jack@...e.cz>, Christoph Hellwig <hch@....de>,
Matthew Wilcox <willy@...radead.org>,
Logan Gunthorpe <logang@...tatee.com>,
linux-block@...r.kernel.org, Christoph Hellwig <hch@...radead.org>,
Jeff Layton <jlayton@...nel.org>,
linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v6 11/34] iov_iter, block: Make bio structs pin pages
rather than ref'ing if appropriate
> + size = iov_iter_extract_pages(iter, &pages,
> + UINT_MAX - bio->bi_iter.bi_size,
> + nr_pages, gup_flags, &offset);
> if (unlikely(size <= 0))
> + bio_set_cleanup_mode(bio, iter, gup_flags);
This should move out to bio_iov_iter_get_pages and only be called
once.
> +++ b/block/blk-map.c
> @@ -285,24 +285,24 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
> gup_flags |= FOLL_PCI_P2PDMA;
>
> while (iov_iter_count(iter)) {
> - struct page **pages, *stack_pages[UIO_FASTIOV];
> + struct page *stack_pages[UIO_FASTIOV];
> + struct page **pages = stack_pages;
> ssize_t bytes;
> size_t offs;
> int npages;
>
> - if (nr_vecs <= ARRAY_SIZE(stack_pages)) {
> - pages = stack_pages;
> - bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
> - nr_vecs, &offs, gup_flags);
> - } else {
> - bytes = iov_iter_get_pages_alloc(iter, &pages,
> - LONG_MAX, &offs, gup_flags);
> - }
> + if (nr_vecs > ARRAY_SIZE(stack_pages))
> + pages = NULL;
> +
> + bytes = iov_iter_extract_pages(iter, &pages, LONG_MAX,
> + nr_vecs, gup_flags, &offs);
> if (unlikely(bytes <= 0)) {
> ret = bytes ? bytes : -EFAULT;
> goto out_unmap;
> }
>
> + bio_set_cleanup_mode(bio, iter, gup_flags);
Same here - one call outside of the loop.
> +static inline void bio_set_cleanup_mode(struct bio *bio, struct iov_iter *iter,
> + unsigned int gup_flags)
> +{
> + unsigned int cleanup_mode;
> +
> + bio_clear_flag(bio, BIO_PAGE_REFFED);
.. and this should not be needed. Instead:
> + cleanup_mode = iov_iter_extract_mode(iter, gup_flags);
> + if (cleanup_mode & FOLL_GET)
> + bio_set_flag(bio, BIO_PAGE_REFFED);
> + if (cleanup_mode & FOLL_PIN)
> + bio_set_flag(bio, BIO_PAGE_PINNED);
We could warn if a not match flag is set here if we really care.
Powered by blists - more mailing lists