lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-Id: <20190430002952.18909-4-tmurphy@arista.com> Date: Tue, 30 Apr 2019 01:29:50 +0100 From: Tom Murphy <tmurphy@...sta.com> To: iommu@...ts.linux-foundation.org Cc: murphyt7@....ie, Tom Murphy <tmurphy@...sta.com>, Joerg Roedel <joro@...tes.org>, Will Deacon <will.deacon@....com>, Robin Murphy <robin.murphy@....com>, Marek Szyprowski <m.szyprowski@...sung.com>, Kukjin Kim <kgene@...nel.org>, Krzysztof Kozlowski <krzk@...nel.org>, David Woodhouse <dwmw2@...radead.org>, Andy Gross <andy.gross@...aro.org>, David Brown <david.brown@...aro.org>, Matthias Brugger <matthias.bgg@...il.com>, Rob Clark <robdclark@...il.com>, Heiko Stuebner <heiko@...ech.de>, Gerald Schaefer <gerald.schaefer@...ibm.com>, Thierry Reding <thierry.reding@...il.com>, Jonathan Hunter <jonathanh@...dia.com>, linux-kernel@...r.kernel.org, linux-arm-kernel@...ts.infradead.org, linux-samsung-soc@...r.kernel.org, linux-arm-msm@...r.kernel.org, linux-mediatek@...ts.infradead.org, linux-rockchip@...ts.infradead.org, linux-s390@...r.kernel.org, linux-tegra@...r.kernel.org Subject: [PATCH v2 3/4] iommu/dma-iommu: Use the dev->coherent_dma_mask Use the dev->coherent_dma_mask when allocating in the dma-iommu ops api. Signed-off-by: Tom Murphy <tmurphy@...sta.com> --- drivers/iommu/dma-iommu.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index c18f74ad1e8b..df03104978d7 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -436,7 +436,8 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, } static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, - size_t size, int prot, struct iommu_domain *domain) + size_t size, int prot, struct iommu_domain *domain, + dma_addr_t dma_limit) { struct iommu_dma_cookie *cookie = domain->iova_cookie; size_t iova_off = 0; @@ -447,7 +448,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size = iova_align(&cookie->iovad, size + iova_off); } - iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); + iova = iommu_dma_alloc_iova(domain, size, dma_limit, dev); if (!iova) return DMA_MAPPING_ERROR; @@ -490,7 +491,7 @@ static void *iommu_dma_alloc_contiguous(struct device *dev, size_t size, return NULL; *dma_handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, - iommu_get_dma_domain(dev)); + iommu_get_dma_domain(dev), dev->coherent_dma_mask); if (*dma_handle == DMA_MAPPING_ERROR) { if (!dma_release_from_contiguous(dev, page, count)) __free_pages(page, page_order); @@ -760,7 +761,7 @@ static void *iommu_dma_alloc_pool(struct device *dev, size_t size, *dma_handle = __iommu_dma_map(dev, page_to_phys(page), size, dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs), - iommu_get_domain_for_dev(dev)); + iommu_get_domain_for_dev(dev), dev->coherent_dma_mask); if (*dma_handle == DMA_MAPPING_ERROR) { dma_free_from_pool(vaddr, PAGE_ALIGN(size)); return NULL; @@ -850,7 +851,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, dma_handle =__iommu_dma_map(dev, phys, size, dma_info_to_prot(dir, coherent, attrs), - iommu_get_dma_domain(dev)); + iommu_get_dma_domain(dev), dma_get_mask(dev)); if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && dma_handle != DMA_MAPPING_ERROR) arch_sync_dma_for_device(dev, phys, size, dir); @@ -1065,7 +1066,7 @@ static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, return __iommu_dma_map(dev, phys, size, dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, - iommu_get_dma_domain(dev)); + iommu_get_dma_domain(dev), dma_get_mask(dev)); } static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, @@ -1250,7 +1251,8 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, if (!msi_page) return NULL; - iova = __iommu_dma_map(dev, msi_addr, size, prot, domain); + iova = __iommu_dma_map(dev, msi_addr, size, prot, domain, + dma_get_mask(dev)); if (iova == DMA_MAPPING_ERROR) goto out_free_page; -- 2.17.1
Powered by blists - more mailing lists