[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200603222247.11681-11-sstabellini@kernel.org>
Date: Wed, 3 Jun 2020 15:22:47 -0700
From: Stefano Stabellini <sstabellini@...nel.org>
To: jgross@...e.com, boris.ostrovsky@...cle.com, konrad.wilk@...cle.com
Cc: sstabellini@...nel.org, xen-devel@...ts.xenproject.org,
linux-kernel@...r.kernel.org, tamas@...engyel.com,
roman@...eda.com,
Stefano Stabellini <stefano.stabellini@...inx.com>
Subject: [PATCH v2 11/11] xen/arm: call dma_to_phys on the dma_addr_t parameter of dma_cache_maint
From: Stefano Stabellini <stefano.stabellini@...inx.com>
dma_cache_maint is getting called passing a dma address which could be
different from a physical address.
Add a struct device* parameter to dma_cache_maint.
Translate the dma_addr_t parameter of dma_cache_maint by calling
dma_to_phys. Do it for the first page and all the following pages, in
case of multipage handling.
Signed-off-by: Stefano Stabellini <stefano.stabellini@...inx.com>
Tested-by: Corey Minyard <cminyard@...sta.com>
Tested-by: Roman Shaposhnik <roman@...eda.com>
---
Changes in v2:
- improve commit message
---
arch/arm/xen/mm.c | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index bbad712a890d..1dc20f4bdc33 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -43,15 +43,18 @@ unsigned long xen_get_swiotlb_free_pages(unsigned int order)
static bool hypercall_cflush = false;
/* buffers in highmem or foreign pages cannot cross page boundaries */
-static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op)
+static void dma_cache_maint(struct device *dev, dma_addr_t handle,
+ size_t size, u32 op)
{
struct gnttab_cache_flush cflush;
- cflush.a.dev_bus_addr = handle & XEN_PAGE_MASK;
cflush.offset = xen_offset_in_page(handle);
cflush.op = op;
+ handle &= XEN_PAGE_MASK;
do {
+ cflush.a.dev_bus_addr = dma_to_phys(dev, handle);
+
if (size + cflush.offset > XEN_PAGE_SIZE)
cflush.length = XEN_PAGE_SIZE - cflush.offset;
else
@@ -60,7 +63,7 @@ static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op)
HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
cflush.offset = 0;
- cflush.a.dev_bus_addr += cflush.length;
+ handle += cflush.length;
size -= cflush.length;
} while (size);
}
@@ -79,7 +82,7 @@ void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, handle))))
arch_sync_dma_for_cpu(paddr, size, dir);
else if (dir != DMA_TO_DEVICE)
- dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
+ dma_cache_maint(dev, handle, size, GNTTAB_CACHE_INVAL);
}
void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
@@ -89,9 +92,9 @@ void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, handle))))
arch_sync_dma_for_device(paddr, size, dir);
else if (dir == DMA_FROM_DEVICE)
- dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
+ dma_cache_maint(dev, handle, size, GNTTAB_CACHE_INVAL);
else
- dma_cache_maint(handle, size, GNTTAB_CACHE_CLEAN);
+ dma_cache_maint(dev, handle, size, GNTTAB_CACHE_CLEAN);
}
bool xen_arch_need_swiotlb(struct device *dev,
--
2.17.1
Powered by blists - more mailing lists