[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210715164544.6827-5-logang@deltatee.com>
Date: Thu, 15 Jul 2021 10:45:32 -0600
From: Logan Gunthorpe <logang@...tatee.com>
To: linux-kernel@...r.kernel.org, linux-alpha@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org, linux-ia64@...r.kernel.org,
linux-mips@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org,
linux-s390@...r.kernel.org, sparclinux@...r.kernel.org,
iommu@...ts.linux-foundation.org, linux-parisc@...r.kernel.org,
xen-devel@...ts.xenproject.org
Cc: Christoph Hellwig <hch@....de>,
Marek Szyprowski <m.szyprowski@...sung.com>,
Robin Murphy <robin.murphy@....com>,
Stephen Bates <sbates@...thlin.com>,
Martin Oliveira <martin.oliveira@...eticom.com>,
Logan Gunthorpe <logang@...tatee.com>,
Joerg Roedel <joro@...tes.org>, Will Deacon <will@...nel.org>
Subject: [PATCH v1 04/16] dma-iommu: Return error code from iommu_dma_map_sg()
Pass through appropriate error codes from iommu_dma_map_sg() now that
the error code will be passed through dma_map_sgtable().
Signed-off-by: Logan Gunthorpe <logang@...tatee.com>
Cc: Joerg Roedel <joro@...tes.org>
Cc: Will Deacon <will@...nel.org>
---
drivers/iommu/dma-iommu.c | 20 +++++++++++++-------
1 file changed, 13 insertions(+), 7 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 98ba927aee1a..9d35e9994306 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -972,7 +972,7 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
out_unmap:
iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
- return 0;
+ return -EINVAL;
}
/*
@@ -993,11 +993,14 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
dma_addr_t iova;
size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
+ ssize_t ret;
int i;
- if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
- iommu_deferred_attach(dev, domain))
- return 0;
+ if (static_branch_unlikely(&iommu_deferred_attach_enabled)) {
+ ret = iommu_deferred_attach(dev, domain);
+ if (ret)
+ return ret;
+ }
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
@@ -1045,14 +1048,17 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
}
iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
- if (!iova)
+ if (!iova) {
+ ret = -ENOMEM;
goto out_restore_sg;
+ }
/*
* We'll leave any physical concatenation to the IOMMU driver's
* implementation - it knows better than we do.
*/
- if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
+ ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot);
+ if (ret < iova_len)
goto out_free_iova;
return __finalise_sg(dev, sg, nents, iova);
@@ -1061,7 +1067,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
iommu_dma_free_iova(cookie, iova, iova_len, NULL);
out_restore_sg:
__invalidate_sg(sg, nents);
- return 0;
+ return ret;
}
static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
--
2.20.1
Powered by blists - more mailing lists