[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <003801d658ec$bd526c70$37f74550$@xen.org>
Date: Mon, 13 Jul 2020 09:08:04 +0100
From: Paul Durrant <xadimgnik@...il.com>
To: "'Souptick Joarder'" <jrdr.linux@...il.com>,
<boris.ostrovsky@...cle.com>, <jgross@...e.com>,
<sstabellini@...nel.org>
Cc: <xen-devel@...ts.xenproject.org>, <linux-kernel@...r.kernel.org>,
"'John Hubbard'" <jhubbard@...dia.com>,
"'Paul Durrant'" <xadimgnik@...il.com>
Subject: RE: [PATCH v3 1/3] xen/privcmd: Corrected error handling path
> -----Original Message-----
> From: Souptick Joarder <jrdr.linux@...il.com>
> Sent: 12 July 2020 04:40
> To: boris.ostrovsky@...cle.com; jgross@...e.com; sstabellini@...nel.org
> Cc: xen-devel@...ts.xenproject.org; linux-kernel@...r.kernel.org; Souptick Joarder
> <jrdr.linux@...il.com>; John Hubbard <jhubbard@...dia.com>; Paul Durrant <xadimgnik@...il.com>
> Subject: [PATCH v3 1/3] xen/privcmd: Corrected error handling path
>
> Previously, if lock_pages() end up partially mapping pages, it used
> to return -ERRNO due to which unlock_pages() have to go through
> each pages[i] till *nr_pages* to validate them. This can be avoided
> by passing correct number of partially mapped pages & -ERRNO separately,
> while returning from lock_pages() due to error.
>
> With this fix unlock_pages() doesn't need to validate pages[i] till
> *nr_pages* for error scenario and few condition checks can be ignored.
>
> Signed-off-by: Souptick Joarder <jrdr.linux@...il.com>
> Reviewed-by: Juergen Gross <jgross@...e.com>
> Cc: John Hubbard <jhubbard@...dia.com>
> Cc: Boris Ostrovsky <boris.ostrovsky@...cle.com>
> Cc: Paul Durrant <xadimgnik@...il.com>
Reviewed-by: Paul Durrant <paul@....org>
> ---
> drivers/xen/privcmd.c | 31 +++++++++++++++----------------
> 1 file changed, 15 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
> index 5dfc59f..b001673 100644
> --- a/drivers/xen/privcmd.c
> +++ b/drivers/xen/privcmd.c
> @@ -579,13 +579,13 @@ static long privcmd_ioctl_mmap_batch(
>
> static int lock_pages(
> struct privcmd_dm_op_buf kbufs[], unsigned int num,
> - struct page *pages[], unsigned int nr_pages)
> + struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
> {
> unsigned int i;
>
> for (i = 0; i < num; i++) {
> unsigned int requested;
> - int pinned;
> + int page_count;
>
> requested = DIV_ROUND_UP(
> offset_in_page(kbufs[i].uptr) + kbufs[i].size,
> @@ -593,14 +593,15 @@ static int lock_pages(
> if (requested > nr_pages)
> return -ENOSPC;
>
> - pinned = get_user_pages_fast(
> + page_count = get_user_pages_fast(
> (unsigned long) kbufs[i].uptr,
> requested, FOLL_WRITE, pages);
> - if (pinned < 0)
> - return pinned;
> + if (page_count < 0)
> + return page_count;
>
> - nr_pages -= pinned;
> - pages += pinned;
> + *pinned += page_count;
> + nr_pages -= page_count;
> + pages += page_count;
> }
>
> return 0;
> @@ -610,13 +611,8 @@ static void unlock_pages(struct page *pages[], unsigned int nr_pages)
> {
> unsigned int i;
>
> - if (!pages)
> - return;
> -
> - for (i = 0; i < nr_pages; i++) {
> - if (pages[i])
> - put_page(pages[i]);
> - }
> + for (i = 0; i < nr_pages; i++)
> + put_page(pages[i]);
> }
>
> static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
> @@ -629,6 +625,7 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
> struct xen_dm_op_buf *xbufs = NULL;
> unsigned int i;
> long rc;
> + unsigned int pinned = 0;
>
> if (copy_from_user(&kdata, udata, sizeof(kdata)))
> return -EFAULT;
> @@ -682,9 +679,11 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
> goto out;
> }
>
> - rc = lock_pages(kbufs, kdata.num, pages, nr_pages);
> - if (rc)
> + rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
> + if (rc < 0) {
> + nr_pages = pinned;
> goto out;
> + }
>
> for (i = 0; i < kdata.num; i++) {
> set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
> --
> 1.9.1
Powered by blists - more mailing lists