[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAFqt6zaB_wcrUzmOJ6kQWMqdcPrENyJO4FNc_UU5z-AQQh_e3w@mail.gmail.com>
Date: Sat, 5 Sep 2020 06:46:24 +0530
From: Souptick Joarder <jrdr.linux@...il.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Jason Gunthorpe <jgg@...pe.ca>,
Dan Williams <dan.j.williams@...el.com>,
Greg KH <gregkh@...uxfoundation.org>, timur@...escale.com,
linux-kernel@...r.kernel.org,
Dan Carpenter <dan.carpenter@...cle.com>,
John Hubbard <jhubbard@...dia.com>
Subject: Re: [linux-next PATCH v4] drivers/virt/fsl_hypervisor: Fix error
handling path
Hi Andrew,
On Wed, Sep 2, 2020 at 3:00 AM John Hubbard <jhubbard@...dia.com> wrote:
>
> On 9/1/20 2:21 PM, Souptick Joarder wrote:
> > First, when memory allocation for sg_list_unaligned failed, there
> > is a bug of calling put_pages() as we haven't pinned any pages.
> >
> > Second, if get_user_pages_fast() failed we should unpin num_pinned
> > pages.
> >
> > This will address both.
> >
> > As part of these changes, minor update in documentation.
> >
> > Fixes: 6db7199407ca ("drivers/virt: introduce Freescale hypervisor
> > management driver")
> > Signed-off-by: Souptick Joarder <jrdr.linux@...il.com>
> > Reviewed-by: Dan Carpenter <dan.carpenter@...cle.com>
> > Reviewed-by: John Hubbard <jhubbard@...dia.com>
> > ---
>
> This looks good to me.
Can you please take this patch through the mm tree ?
>
> thanks,
> --
> John Hubbard
> NVIDIA
>
> > v2:
> > Added review tag.
> >
> > v3:
> > Address review comment on v2 from John.
> > Added review tag.
> >
> > v4:
> > Address another set of review comments from John.
> >
> > drivers/virt/fsl_hypervisor.c | 17 ++++++++---------
> > 1 file changed, 8 insertions(+), 9 deletions(-)
> >
> > diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
> > index 1b0b11b..46ee0a0 100644
> > --- a/drivers/virt/fsl_hypervisor.c
> > +++ b/drivers/virt/fsl_hypervisor.c
> > @@ -157,7 +157,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
> >
> > unsigned int i;
> > long ret = 0;
> > - int num_pinned; /* return value from get_user_pages() */
> > + int num_pinned = 0; /* return value from get_user_pages_fast() */
> > phys_addr_t remote_paddr; /* The next address in the remote buffer */
> > uint32_t count; /* The number of bytes left to copy */
> >
> > @@ -174,7 +174,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
> > return -EINVAL;
> >
> > /*
> > - * The array of pages returned by get_user_pages() covers only
> > + * The array of pages returned by get_user_pages_fast() covers only
> > * page-aligned memory. Since the user buffer is probably not
> > * page-aligned, we need to handle the discrepancy.
> > *
> > @@ -224,7 +224,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
> >
> > /*
> > * 'pages' is an array of struct page pointers that's initialized by
> > - * get_user_pages().
> > + * get_user_pages_fast().
> > */
> > pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
> > if (!pages) {
> > @@ -241,7 +241,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
> > if (!sg_list_unaligned) {
> > pr_debug("fsl-hv: could not allocate S/G list\n");
> > ret = -ENOMEM;
> > - goto exit;
> > + goto free_pages;
> > }
> > sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
> >
> > @@ -250,7 +250,6 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
> > num_pages, param.source != -1 ? FOLL_WRITE : 0, pages);
> >
> > if (num_pinned != num_pages) {
> > - /* get_user_pages() failed */
> > pr_debug("fsl-hv: could not lock source buffer\n");
> > ret = (num_pinned < 0) ? num_pinned : -EFAULT;
> > goto exit;
> > @@ -292,13 +291,13 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
> > virt_to_phys(sg_list), num_pages);
> >
> > exit:
> > - if (pages) {
> > - for (i = 0; i < num_pages; i++)
> > - if (pages[i])
> > - put_page(pages[i]);
> > + if (pages && (num_pinned > 0)) {
> > + for (i = 0; i < num_pinned; i++)
> > + put_page(pages[i]);
> > }
> >
> > kfree(sg_list_unaligned);
> > +free_pages:
> > kfree(pages);
> >
> > if (!ret)
> >
>
Powered by blists - more mailing lists