[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87sghz2ibh.fsf@linux.ibm.com>
Date: Mon, 23 Mar 2020 21:07:38 +0530
From: "Aneesh Kumar K.V" <aneesh.kumar@...ux.ibm.com>
To: Christoph Hellwig <hch@....de>,
Alexey Kardashevskiy <aik@...abs.ru>
Cc: Christoph Hellwig <hch@....de>, iommu@...ts.linux-foundation.org,
linuxppc-dev@...ts.ozlabs.org, Lu Baolu <baolu.lu@...ux.intel.com>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Joerg Roedel <joro@...tes.org>,
Robin Murphy <robin.murphy@....com>,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/2] dma-mapping: add a dma_ops_bypass flag to struct
device
Christoph Hellwig <hch@....de> writes:
> On Mon, Mar 23, 2020 at 09:37:05AM +0100, Christoph Hellwig wrote:
>> > > + /*
>> > > + * Allows IOMMU drivers to bypass dynamic translations if the DMA mask
>> > > + * is large enough.
>> > > + */
>> > > + if (dev->dma_ops_bypass) {
>> > > + if (min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit) >=
>> > > + dma_direct_get_required_mask(dev))
>> > > + return true;
>> > > + }
>> >
>> >
>> > Why not do this in dma_map_direct() as well?
>>
>> Mostly beacuse it is a relatively expensive operation, including a
>> fls64.
>
> Which I guess isn't too bad compared to a dynamic IOMMU mapping. Can
> you just send a draft patch for what you'd like to see for ppc?
This is what I was trying, but considering I am new to DMA subsystem, I
am not sure I got all the details correct. The idea is to look at the
cpu addr and see if that can be used in direct map fashion(is
bus_dma_limit the right restriction here?) if not fallback to dynamic
IOMMU mapping.
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index e486d1d78de2..bc7e6a8b2caa 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -31,6 +31,87 @@ static inline bool dma_iommu_map_bypass(struct device *dev,
(!iommu_fixed_is_weak || (attrs & DMA_ATTR_WEAK_ORDERING));
}
+static inline bool __dma_direct_map_capable(struct device *dev, struct page *page,
+ unsigned long offset, size_t size)
+{
+ phys_addr_t phys = page_to_phys(page) + offset;
+ dma_addr_t dma_addr = phys_to_dma(dev, phys);
+ dma_addr_t end = dma_addr + size - 1;
+
+ return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
+}
+
+static inline bool dma_direct_map_capable(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ unsigned long attrs)
+{
+ if (!dma_iommu_map_bypass(dev, attrs))
+ return false;
+
+ if (!dev->dma_mask)
+ return false;
+
+ return __dma_direct_map_capable(dev, page, offset, size);
+}
+
+
+static inline bool dma_direct_unmap_capable(struct device *dev, dma_addr_t addr, size_t size,
+ unsigned long attrs)
+{
+ dma_addr_t end = addr + size - 1;
+
+ if (!dma_iommu_map_bypass(dev, attrs))
+ return false;
+
+ if (!dev->dma_mask)
+ return false;
+
+ return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
+}
+
+static inline bool dma_direct_sg_map_capable(struct device *dev, struct scatterlist *sglist,
+ int nelems, unsigned long attrs)
+{
+ int i;
+ struct scatterlist *sg;
+
+ if (!dma_iommu_map_bypass(dev, attrs))
+ return false;
+
+ if (!dev->dma_mask)
+ return false;
+
+ for_each_sg(sglist, sg, nelems, i) {
+ if (!__dma_direct_map_capable(dev, sg_page(sg),
+ sg->offset, sg->length))
+ return false;
+ }
+ return true;
+}
+
+static inline bool dma_direct_sg_unmap_capable(struct device *dev, struct scatterlist *sglist,
+ int nelems, unsigned long attrs)
+{
+ int i;
+ dma_addr_t end;
+ struct scatterlist *sg;
+
+ if (!dma_iommu_map_bypass(dev, attrs))
+ return false;
+
+ if (!dev->dma_mask)
+ return false;
+
+ for_each_sg(sglist, sg, nelems, i) {
+ end = sg->dma_address + sg_dma_len(sg);
+
+ if (end > min_not_zero(*dev->dma_mask, dev->bus_dma_limit))
+ return false;
+ }
+ return true;
+}
+
+
/* Allocates a contiguous real buffer and creates mappings over it.
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address (mapping) of the first page.
@@ -67,7 +148,7 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
enum dma_data_direction direction,
unsigned long attrs)
{
- if (dma_iommu_map_bypass(dev, attrs))
+ if (dma_direct_map_capable(dev, page, offset, size, attrs))
return dma_direct_map_page(dev, page, offset, size, direction,
attrs);
return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
@@ -79,7 +160,7 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
unsigned long attrs)
{
- if (!dma_iommu_map_bypass(dev, attrs))
+ if (!dma_direct_unmap_capable(dev, dma_handle, size, attrs))
iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size,
direction, attrs);
else
@@ -91,7 +172,7 @@ static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
unsigned long attrs)
{
- if (dma_iommu_map_bypass(dev, attrs))
+ if (dma_direct_sg_map_capable(dev, sglist, nelems, attrs))
return dma_direct_map_sg(dev, sglist, nelems, direction, attrs);
return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
dma_get_mask(dev), direction, attrs);
@@ -101,7 +182,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
unsigned long attrs)
{
- if (!dma_iommu_map_bypass(dev, attrs))
+ if (!dma_direct_sg_unmap_capable(dev, sglist, nelems, attrs))
ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
direction, attrs);
else
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 99f72162dd85..702a680f5766 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -1119,6 +1119,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
spin_unlock(&direct_window_list_lock);
dma_addr = be64_to_cpu(ddwprop->dma_base);
+ dev->dev.bus_dma_limit = dma_addr + query.largest_available_block;
goto out_unlock;
out_free_window:
Powered by blists - more mailing lists