[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <alpine.DEB.2.22.394.2210131724440.3690179@ubuntu-linux-20-04-desktop>
Date: Thu, 13 Oct 2022 17:24:53 -0700 (PDT)
From: Stefano Stabellini <sstabellini@...nel.org>
To: Oleksandr Tyshchenko <olekstysh@...il.com>
cc: xen-devel@...ts.xenproject.org, linux-kernel@...r.kernel.org,
Oleksandr Tyshchenko <oleksandr_tyshchenko@...m.com>,
Stefano Stabellini <sstabellini@...nel.org>,
Juergen Gross <jgross@...e.com>,
Xenia Ragiadakou <burzalodowa@...il.com>
Subject: Re: [PATCH V2 2/2] xen/virtio: Convert PAGE_SIZE/PAGE_SHIFT/PFN_UP
to Xen counterparts
On Sat, 8 Oct 2022, Oleksandr Tyshchenko wrote:
> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@...m.com>
>
> Currently, a grant ref is always based on the Xen page granularity
> (4KB), and guest commonly uses the same page granularity.
> But the guest may use a different page granularity (i.e 64KB).
> So adopt the code to be able to deal with it.
>
> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@...m.com>
Acked-by: Stefano Stabellini <sstabellini@...nel.org>
> ---
> Cc: Juergen Gross <jgross@...e.com>
> Cc: Xenia Ragiadakou <burzalodowa@...il.com>
>
> Changes V1 -> V2:
> - update commit description
> - rebase
> - use xen_offset_in_page() in xen_grant_dma_map(unmap)_page()
>
> Previous discussion is at:
> https://lore.kernel.org/xen-devel/20221006120912.1948459-1-olekstysh@gmail.com/
> ---
> drivers/xen/grant-dma-ops.c | 24 ++++++++++++------------
> 1 file changed, 12 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
> index 1d018e3a68a0..aff0f95ed954 100644
> --- a/drivers/xen/grant-dma-ops.c
> +++ b/drivers/xen/grant-dma-ops.c
> @@ -31,12 +31,12 @@ static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ);
>
> static inline dma_addr_t grant_to_dma(grant_ref_t grant)
> {
> - return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT);
> + return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << XEN_PAGE_SHIFT);
> }
>
> static inline grant_ref_t dma_to_grant(dma_addr_t dma)
> {
> - return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT);
> + return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> XEN_PAGE_SHIFT);
> }
>
> static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
> @@ -79,7 +79,7 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size,
> unsigned long attrs)
> {
> struct xen_grant_dma_data *data;
> - unsigned int i, n_pages = PFN_UP(size);
> + unsigned int i, n_pages = XEN_PFN_UP(size);
> unsigned long pfn;
> grant_ref_t grant;
> void *ret;
> @@ -91,14 +91,14 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size,
> if (unlikely(data->broken))
> return NULL;
>
> - ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp);
> + ret = alloc_pages_exact(n_pages * XEN_PAGE_SIZE, gfp);
> if (!ret)
> return NULL;
>
> pfn = virt_to_pfn(ret);
>
> if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
> - free_pages_exact(ret, n_pages * PAGE_SIZE);
> + free_pages_exact(ret, n_pages * XEN_PAGE_SIZE);
> return NULL;
> }
>
> @@ -116,7 +116,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
> dma_addr_t dma_handle, unsigned long attrs)
> {
> struct xen_grant_dma_data *data;
> - unsigned int i, n_pages = PFN_UP(size);
> + unsigned int i, n_pages = XEN_PFN_UP(size);
> grant_ref_t grant;
>
> data = find_xen_grant_dma_data(dev);
> @@ -138,7 +138,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
>
> gnttab_free_grant_reference_seq(grant, n_pages);
>
> - free_pages_exact(vaddr, n_pages * PAGE_SIZE);
> + free_pages_exact(vaddr, n_pages * XEN_PAGE_SIZE);
> }
>
> static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size,
> @@ -168,9 +168,9 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
> unsigned long attrs)
> {
> struct xen_grant_dma_data *data;
> - unsigned long dma_offset = offset_in_page(offset),
> - pfn_offset = PFN_DOWN(offset);
> - unsigned int i, n_pages = PFN_UP(dma_offset + size);
> + unsigned long dma_offset = xen_offset_in_page(offset),
> + pfn_offset = XEN_PFN_DOWN(offset);
> + unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
> grant_ref_t grant;
> dma_addr_t dma_handle;
>
> @@ -203,8 +203,8 @@ static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
> unsigned long attrs)
> {
> struct xen_grant_dma_data *data;
> - unsigned long offset = dma_handle & (PAGE_SIZE - 1);
> - unsigned int i, n_pages = PFN_UP(offset + size);
> + unsigned long dma_offset = xen_offset_in_page(dma_handle);
> + unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
> grant_ref_t grant;
>
> if (WARN_ON(dir == DMA_NONE))
> --
> 2.25.1
>
Powered by blists - more mailing lists