lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-Id: <20220227093434.2889464-3-jhubbard@nvidia.com> Date: Sun, 27 Feb 2022 01:34:30 -0800 From: jhubbard.send.patches@...il.com To: Jens Axboe <axboe@...nel.dk>, Jan Kara <jack@...e.cz>, Christoph Hellwig <hch@...radead.org>, Dave Chinner <dchinner@...hat.com>, "Darrick J . Wong" <djwong@...nel.org>, Theodore Ts'o <tytso@....edu>, Alexander Viro <viro@...iv.linux.org.uk>, Miklos Szeredi <miklos@...redi.hu>, Andrew Morton <akpm@...ux-foundation.org>, Chaitanya Kulkarni <kch@...dia.com> Cc: linux-block@...r.kernel.org, linux-fsdevel@...r.kernel.org, linux-xfs@...r.kernel.org, linux-mm@...ck.org, LKML <linux-kernel@...r.kernel.org>, John Hubbard <jhubbard@...dia.com> Subject: [PATCH 2/6] iov_iter: new iov_iter_pin_pages*(), for FOLL_PIN pages From: John Hubbard <jhubbard@...dia.com> Provide two new specialized routines that only handle user space pages, and invoke pin_user_pages_fast() on them: iov_iter_pin_pages() and iov_iter_pin_pages_alloc(). This allows subsequent patches to convert various callers of iov_iter_get_pages*(), to the new calls, without having to attempt a mass conversion all at once. Signed-off-by: John Hubbard <jhubbard@...dia.com> --- include/linux/uio.h | 4 +++ lib/iov_iter.c | 78 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) diff --git a/include/linux/uio.h b/include/linux/uio.h index 739285fe5a2f..208020c2b75a 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -236,6 +236,10 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start); ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start); +ssize_t iov_iter_pin_pages(struct iov_iter *i, struct page **pages, + size_t maxsize, unsigned int maxpages, size_t *start); +ssize_t iov_iter_pin_pages_alloc(struct iov_iter *i, struct page ***pages, + size_t maxsize, size_t *start); int iov_iter_npages(const struct iov_iter *i, int maxpages); void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 6dd5330f7a99..e64e8e4edd0c 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1560,6 +1560,41 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, } EXPORT_SYMBOL(iov_iter_get_pages); +ssize_t iov_iter_pin_pages(struct iov_iter *i, + struct page **pages, size_t maxsize, unsigned int maxpages, + size_t *start) +{ + size_t len; + int n, res; + + if (maxsize > i->count) + maxsize = i->count; + if (!maxsize) + return 0; + + WARN_ON_ONCE(!iter_is_iovec(i)); + + if (likely(iter_is_iovec(i))) { + unsigned int gup_flags = 0; + unsigned long addr; + + if (iov_iter_rw(i) != WRITE) + gup_flags |= FOLL_WRITE; + if (i->nofault) + gup_flags |= FOLL_NOFAULT; + + addr = first_iovec_segment(i, &len, start, maxsize, maxpages); + n = DIV_ROUND_UP(len, PAGE_SIZE); + res = pin_user_pages_fast(addr, n, gup_flags, pages); + if (unlikely(res <= 0)) + return res; + return (res == n ? len : res * PAGE_SIZE) - *start; + } + + return -EFAULT; +} +EXPORT_SYMBOL(iov_iter_pin_pages); + static struct page **get_pages_array(size_t n) { return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL); @@ -1696,6 +1731,49 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, } EXPORT_SYMBOL(iov_iter_get_pages_alloc); +ssize_t iov_iter_pin_pages_alloc(struct iov_iter *i, + struct page ***pages, size_t maxsize, + size_t *start) +{ + struct page **p; + size_t len; + int n, res; + + if (maxsize > i->count) + maxsize = i->count; + if (!maxsize) + return 0; + + WARN_ON_ONCE(!iter_is_iovec(i)); + + if (likely(iter_is_iovec(i))) { + unsigned int gup_flags = 0; + unsigned long addr; + + if (iov_iter_rw(i) != WRITE) + gup_flags |= FOLL_WRITE; + if (i->nofault) + gup_flags |= FOLL_NOFAULT; + + addr = first_iovec_segment(i, &len, start, maxsize, ~0U); + n = DIV_ROUND_UP(len, PAGE_SIZE); + p = get_pages_array(n); + if (!p) + return -ENOMEM; + res = pin_user_pages_fast(addr, n, gup_flags, p); + if (unlikely(res <= 0)) { + kvfree(p); + *pages = NULL; + return res; + } + *pages = p; + return (res == n ? len : res * PAGE_SIZE) - *start; + } + + return -EFAULT; +} +EXPORT_SYMBOL(iov_iter_pin_pages_alloc); + size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i) { -- 2.35.1
Powered by blists - more mailing lists