[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1593054160-12628-1-git-send-email-jrdr.linux@gmail.com>
Date: Thu, 25 Jun 2020 08:32:39 +0530
From: Souptick Joarder <jrdr.linux@...il.com>
To: boris.ostrovsky@...cle.com, jgross@...e.com, sstabellini@...nel.org
Cc: xen-devel@...ts.xenproject.org, linux-kernel@...r.kernel.org,
Souptick Joarder <jrdr.linux@...il.com>,
John Hubbard <jhubbard@...dia.com>,
Paul Durrant <xadimgnik@...il.com>
Subject: [PATCH 1/2] xen/privcmd: Corrected error handling path and mark pages dirty
Previously, if lock_pages() end up partially mapping pages, it used
to return -ERRNO due to which unlock_pages() have to go through
each pages[i] till *nr_pages* to validate them. This can be avoided
by passing correct number of partially mapped pages & -ERRNO separately,
while returning from lock_pages() due to error.
With this fix unlock_pages() doesn't need to validate pages[i] till
*nr_pages* for error scenario and few condition checks can be ignored.
As discussed, pages need to be marked as dirty before unpinned it in
unlock_pages() which was oversight.
Signed-off-by: Souptick Joarder <jrdr.linux@...il.com>
Cc: John Hubbard <jhubbard@...dia.com>
Cc: Boris Ostrovsky <boris.ostrovsky@...cle.com>
Cc: Paul Durrant <xadimgnik@...il.com>
---
Hi,
I'm compile tested this, but unable to run-time test, so any testing
help is much appriciated.
drivers/xen/privcmd.c | 34 +++++++++++++++++++---------------
1 file changed, 19 insertions(+), 15 deletions(-)
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index a250d11..0da417c 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -580,43 +580,44 @@ static long privcmd_ioctl_mmap_batch(
static int lock_pages(
struct privcmd_dm_op_buf kbufs[], unsigned int num,
- struct page *pages[], unsigned int nr_pages)
+ struct page *pages[], unsigned int nr_pages, int *pinned)
{
unsigned int i;
+ int errno = 0, page_count = 0;
for (i = 0; i < num; i++) {
unsigned int requested;
- int pinned;
+ *pinned += page_count;
requested = DIV_ROUND_UP(
offset_in_page(kbufs[i].uptr) + kbufs[i].size,
PAGE_SIZE);
if (requested > nr_pages)
return -ENOSPC;
- pinned = get_user_pages_fast(
+ page_count = get_user_pages_fast(
(unsigned long) kbufs[i].uptr,
requested, FOLL_WRITE, pages);
- if (pinned < 0)
- return pinned;
+ if (page_count < 0) {
+ errno = page_count;
+ return errno;
+ }
- nr_pages -= pinned;
- pages += pinned;
+ nr_pages -= page_count;
+ pages += page_count;
}
- return 0;
+ return errno;
}
static void unlock_pages(struct page *pages[], unsigned int nr_pages)
{
unsigned int i;
- if (!pages)
- return;
-
for (i = 0; i < nr_pages; i++) {
- if (pages[i])
- put_page(pages[i]);
+ if (!PageDirty(page))
+ set_page_dirty_lock(page);
+ put_page(pages[i]);
}
}
@@ -630,6 +631,7 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
struct xen_dm_op_buf *xbufs = NULL;
unsigned int i;
long rc;
+ int pinned = 0;
if (copy_from_user(&kdata, udata, sizeof(kdata)))
return -EFAULT;
@@ -683,9 +685,11 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
goto out;
}
- rc = lock_pages(kbufs, kdata.num, pages, nr_pages);
- if (rc)
+ rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
+ if (rc < 0) {
+ nr_pages = pinned;
goto out;
+ }
for (i = 0; i < kdata.num; i++) {
set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
--
1.9.1
Powered by blists - more mailing lists