lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c40b04ac-408c-5575-e028-6a60ccb3430a@ddn.com>
Date:   Fri, 31 Mar 2023 16:22:26 +0000
From:   Bernd Schubert <bschubert@....com>
To:     Ming Lei <ming.lei@...hat.com>, Jens Axboe <axboe@...nel.dk>,
        "io-uring@...r.kernel.org" <io-uring@...r.kernel.org>,
        "linux-block@...r.kernel.org" <linux-block@...r.kernel.org>
CC:     "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
        Miklos Szeredi <mszeredi@...hat.com>,
        ZiyangZhang <ZiyangZhang@...ux.alibaba.com>,
        Xiaoguang Wang <xiaoguang.wang@...ux.alibaba.com>,
        Pavel Begunkov <asml.silence@...il.com>,
        Stefan Hajnoczi <stefanha@...hat.com>,
        Dan Williams <dan.j.williams@...el.com>
Subject: Re: [PATCH V6 12/17] block: ublk_drv: cleanup ublk_copy_user_pages

On 3/30/23 13:36, Ming Lei wrote:
> Clean up ublk_copy_user_pages() by using iov iter, and code
> gets simplified a lot and becomes much more readable than before.
> 
> Signed-off-by: Ming Lei <ming.lei@...hat.com>
> ---
>   drivers/block/ublk_drv.c | 112 +++++++++++++++++----------------------
>   1 file changed, 49 insertions(+), 63 deletions(-)
> 
> diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
> index fdccbf5fdaa1..cca0e95a89d8 100644
> --- a/drivers/block/ublk_drv.c
> +++ b/drivers/block/ublk_drv.c
> @@ -419,49 +419,39 @@ static const struct block_device_operations ub_fops = {
>   
>   #define UBLK_MAX_PIN_PAGES	32
>   
> -struct ublk_map_data {
> -	const struct request *rq;
> -	unsigned long	ubuf;
> -	unsigned int	len;
> -};
> -
>   struct ublk_io_iter {
>   	struct page *pages[UBLK_MAX_PIN_PAGES];
> -	unsigned pg_off;	/* offset in the 1st page in pages */
> -	int nr_pages;		/* how many page pointers in pages */
>   	struct bio *bio;
>   	struct bvec_iter iter;
>   };
>   
> -static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
> -		unsigned max_bytes, bool to_vm)
> +/* return how many pages are copied */
> +static void ublk_copy_io_pages(struct ublk_io_iter *data,
> +		size_t total, size_t pg_off, int dir)
>   {
> -	const unsigned total = min_t(unsigned, max_bytes,
> -			PAGE_SIZE - data->pg_off +
> -			((data->nr_pages - 1) << PAGE_SHIFT));
>   	unsigned done = 0;
>   	unsigned pg_idx = 0;
>   
>   	while (done < total) {
>   		struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
> -		const unsigned int bytes = min3(bv.bv_len, total - done,
> -				(unsigned)(PAGE_SIZE - data->pg_off));
> +		unsigned int bytes = min3(bv.bv_len, (unsigned)total - done,
> +				(unsigned)(PAGE_SIZE - pg_off));
>   		void *bv_buf = bvec_kmap_local(&bv);
>   		void *pg_buf = kmap_local_page(data->pages[pg_idx]);
>   
> -		if (to_vm)
> -			memcpy(pg_buf + data->pg_off, bv_buf, bytes);
> +		if (dir == ITER_DEST)
> +			memcpy(pg_buf + pg_off, bv_buf, bytes);
>   		else
> -			memcpy(bv_buf, pg_buf + data->pg_off, bytes);
> +			memcpy(bv_buf, pg_buf + pg_off, bytes);
>   
>   		kunmap_local(pg_buf);
>   		kunmap_local(bv_buf);
>   
>   		/* advance page array */
> -		data->pg_off += bytes;
> -		if (data->pg_off == PAGE_SIZE) {
> +		pg_off += bytes;
> +		if (pg_off == PAGE_SIZE) {
>   			pg_idx += 1;
> -			data->pg_off = 0;
> +			pg_off = 0;
>   		}
>   
>   		done += bytes;
> @@ -475,41 +465,40 @@ static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
>   			data->iter = data->bio->bi_iter;
>   		}
>   	}
> -
> -	return done;
>   }
>   
> -static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm)
> +/*
> + * Copy data between request pages and io_iter, and 'offset'
> + * is the start point of linear offset of request.
> + */
> +static size_t ublk_copy_user_pages(const struct request *req,
> +		struct iov_iter *uiter, int dir)
>   {
> -	const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0;
> -	const unsigned long start_vm = data->ubuf;
> -	unsigned int done = 0;
>   	struct ublk_io_iter iter = {
> -		.pg_off	= start_vm & (PAGE_SIZE - 1),
> -		.bio	= data->rq->bio,
> -		.iter	= data->rq->bio->bi_iter,
> +		.bio	= req->bio,
> +		.iter	= req->bio->bi_iter,
>   	};
> -	const unsigned int nr_pages = round_up(data->len +
> -			(start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT;
> -
> -	while (done < nr_pages) {
> -		const unsigned to_pin = min_t(unsigned, UBLK_MAX_PIN_PAGES,
> -				nr_pages - done);
> -		unsigned i, len;
> -
> -		iter.nr_pages = get_user_pages_fast(start_vm +
> -				(done << PAGE_SHIFT), to_pin, gup_flags,
> -				iter.pages);
> -		if (iter.nr_pages <= 0)
> -			return done == 0 ? iter.nr_pages : done;
> -		len = ublk_copy_io_pages(&iter, data->len, to_vm);
> -		for (i = 0; i < iter.nr_pages; i++) {
> -			if (to_vm)
> +	size_t done = 0;
> +
> +	while (iov_iter_count(uiter) && iter.bio) {
> +		unsigned nr_pages;
> +		size_t len, off;
> +		int i;
> +
> +		len = iov_iter_get_pages2(uiter, iter.pages,
> +				iov_iter_count(uiter),
> +				UBLK_MAX_PIN_PAGES, &off);
> +		if (len <= 0)
> +			return done;
> +
> +		ublk_copy_io_pages(&iter, len, off, dir);
> +		nr_pages = DIV_ROUND_UP(len + off, PAGE_SIZE);
> +		for (i = 0; i < nr_pages; i++) {
> +			if (dir == ITER_DEST)
>   				set_page_dirty(iter.pages[i]);
>   			put_page(iter.pages[i]);
>   		}
> -		data->len -= len;
> -		done += iter.nr_pages;
> +		done += len;
>   	}
>   
>   	return done;
> @@ -536,15 +525,14 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
>   	 * context is pretty fast, see ublk_pin_user_pages
>   	 */
>   	if (ublk_need_map_req(req)) {
> -		struct ublk_map_data data = {
> -			.rq	=	req,
> -			.ubuf	=	io->addr,
> -			.len	=	rq_bytes,
> -		};
> +		struct iov_iter iter;
> +		struct iovec iov;
> +		const int dir = ITER_DEST;

Maybe a comment here that this means "copy to daemon"?

>   
> -		ublk_copy_user_pages(&data, true);
> +		import_single_range(dir, u64_to_user_ptr(io->addr), rq_bytes,
> +				&iov, &iter);
>   
> -		return rq_bytes - data.len;
> +		return ublk_copy_user_pages(req, &iter, dir);
>   	}
>   	return rq_bytes;
>   }
> @@ -556,17 +544,15 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
>   	const unsigned int rq_bytes = blk_rq_bytes(req);
>   
>   	if (ublk_need_unmap_req(req)) {
> -		struct ublk_map_data data = {
> -			.rq	=	req,
> -			.ubuf	=	io->addr,
> -			.len	=	io->res,
> -		};
> +		struct iov_iter iter;
> +		struct iovec iov;
> +		const int dir = ITER_SOURCE;

And here "from daemon"?
>   
>   		WARN_ON_ONCE(io->res > rq_bytes);
>   
> -		ublk_copy_user_pages(&data, false);
> -
> -		return io->res - data.len;
> +		import_single_range(dir, u64_to_user_ptr(io->addr), io->res,
> +				&iov, &iter);
> +		return ublk_copy_user_pages(req, &iter, dir);
>   	}
>   	return rq_bytes;
>   }

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ