[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211019163737.46269-3-sven@svenpeter.dev>
Date: Tue, 19 Oct 2021 18:37:33 +0200
From: Sven Peter <sven@...npeter.dev>
To: iommu@...ts.linux-foundation.org,
Robin Murphy <robin.murphy@....com>
Cc: Sven Peter <sven@...npeter.dev>, Joerg Roedel <joro@...tes.org>,
Will Deacon <will@...nel.org>, Arnd Bergmann <arnd@...nel.org>,
Marc Zyngier <maz@...nel.org>,
Mohamed Mediouni <mohamed.mediouni@...amail.com>,
Alexander Graf <graf@...zon.com>,
Hector Martin <marcan@...can.st>,
Alyssa Rosenzweig <alyssa@...enzweig.io>,
linux-kernel@...r.kernel.org
Subject: [PATCH v3 2/6] iommu/dma: Support granule > PAGE_SIZE in dma_map_sg
Add support to iommu_dma_map_sg's impedance matching to also align
sg_lists correctly when the IOMMU granule is larger than PAGE_SIZE.
Co-developed-by: Robin Murphy <robin.murphy@....com>
Signed-off-by: Robin Murphy <robin.murphy@....com>
Signed-off-by: Sven Peter <sven@...npeter.dev>
---
drivers/iommu/dma-iommu.c | 25 ++++++++++++++++---------
1 file changed, 16 insertions(+), 9 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 17f25632a0d6..ea799e70fc98 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -19,6 +19,7 @@
#include <linux/irq.h>
#include <linux/mm.h>
#include <linux/mutex.h>
+#include <linux/pfn.h>
#include <linux/pci.h>
#include <linux/swiotlb.h>
#include <linux/scatterlist.h>
@@ -878,8 +879,9 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
unsigned int s_length = sg_dma_len(s);
unsigned int s_iova_len = s->length;
- s->offset += s_iova_off;
- s->length = s_length;
+ sg_set_page(s,
+ pfn_to_page(PHYS_PFN(sg_phys(s) + s_iova_off)),
+ s_length, s_iova_off & ~PAGE_MASK);
sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;
@@ -920,13 +922,17 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
static void __invalidate_sg(struct scatterlist *sg, int nents)
{
struct scatterlist *s;
+ phys_addr_t orig_paddr;
int i;
for_each_sg(sg, s, nents, i) {
- if (sg_dma_address(s) != DMA_MAPPING_ERROR)
- s->offset += sg_dma_address(s);
- if (sg_dma_len(s))
- s->length = sg_dma_len(s);
+ if (sg_dma_len(s)) {
+ orig_paddr = sg_phys(s) + sg_dma_address(s);
+ sg_set_page(s,
+ pfn_to_page(PHYS_PFN(orig_paddr)),
+ sg_dma_len(s),
+ sg_dma_address(s) & ~PAGE_MASK);
+ }
sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;
}
@@ -1003,15 +1009,16 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
* stashing the unaligned parts in the as-yet-unused DMA fields.
*/
for_each_sg(sg, s, nents, i) {
- size_t s_iova_off = iova_offset(iovad, s->offset);
+ phys_addr_t s_phys = sg_phys(s);
+ size_t s_iova_off = iova_offset(iovad, s_phys);
size_t s_length = s->length;
size_t pad_len = (mask - iova_len + 1) & mask;
sg_dma_address(s) = s_iova_off;
sg_dma_len(s) = s_length;
- s->offset -= s_iova_off;
s_length = iova_align(iovad, s_length + s_iova_off);
- s->length = s_length;
+ sg_set_page(s, pfn_to_page(PHYS_PFN(s_phys - s_iova_off)),
+ s_length, s->offset & ~s_iova_off);
/*
* Due to the alignment of our single IOVA allocation, we can
--
2.25.1
Powered by blists - more mailing lists