[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8c6d5e7db2d1a01888cc7b9b9850b05e19c75c64.1709631413.git.leon@kernel.org>
Date: Tue, 5 Mar 2024 12:22:11 +0200
From: Leon Romanovsky <leon@...nel.org>
To: Christoph Hellwig <hch@....de>,
Robin Murphy <robin.murphy@....com>,
Marek Szyprowski <m.szyprowski@...sung.com>,
Joerg Roedel <joro@...tes.org>,
Will Deacon <will@...nel.org>,
Jason Gunthorpe <jgg@...pe.ca>,
Chaitanya Kulkarni <chaitanyak@...dia.com>
Cc: Leon Romanovsky <leonro@...dia.com>,
Jonathan Corbet <corbet@....net>,
Jens Axboe <axboe@...nel.dk>,
Keith Busch <kbusch@...nel.org>,
Sagi Grimberg <sagi@...mberg.me>,
Yishai Hadas <yishaih@...dia.com>,
Shameer Kolothum <shameerali.kolothum.thodi@...wei.com>,
Kevin Tian <kevin.tian@...el.com>,
Alex Williamson <alex.williamson@...hat.com>,
Jérôme Glisse <jglisse@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-block@...r.kernel.org,
linux-rdma@...r.kernel.org,
iommu@...ts.linux.dev,
linux-nvme@...ts.infradead.org,
kvm@...r.kernel.org,
linux-mm@...ck.org,
Bart Van Assche <bvanassche@....org>,
Damien Le Moal <damien.lemoal@...nsource.wdc.com>,
Amir Goldstein <amir73il@...il.com>,
"josef@...icpanda.com" <josef@...icpanda.com>,
"Martin K. Petersen" <martin.petersen@...cle.com>,
"daniel@...earbox.net" <daniel@...earbox.net>,
Dan Williams <dan.j.williams@...el.com>,
"jack@...e.com" <jack@...e.com>,
Zhu Yanjun <zyjzyj2000@...il.com>
Subject: [RFC 10/16] RDMA/umem: Prevent UMEM ODP creation with SWIOTLB
From: Leon Romanovsky <leonro@...dia.com>
RDMA UMEM never supported DMA addresses returned from SWIOTLB, as these
addresses should be programmed to the hardware which is not aware that
it is bounce buffers and not real ones.
Instead of silently leave broken system for the users who didn't
know it, let's be explicit and return an error to them.
Signed-off-by: Leon Romanovsky <leonro@...dia.com>
---
Documentation/core-api/dma-attributes.rst | 7 +++
drivers/infiniband/core/umem_odp.c | 77 +++++++++++------------
include/linux/dma-mapping.h | 6 ++
kernel/dma/direct.h | 4 +-
kernel/dma/mapping.c | 4 ++
5 files changed, 58 insertions(+), 40 deletions(-)
diff --git a/Documentation/core-api/dma-attributes.rst b/Documentation/core-api/dma-attributes.rst
index 1887d92e8e92..b337ec65d506 100644
--- a/Documentation/core-api/dma-attributes.rst
+++ b/Documentation/core-api/dma-attributes.rst
@@ -130,3 +130,10 @@ accesses to DMA buffers in both privileged "supervisor" and unprivileged
subsystem that the buffer is fully accessible at the elevated privilege
level (and ideally inaccessible or at least read-only at the
lesser-privileged levels).
+
+DMA_ATTR_NO_TRANSLATION
+-----------------------
+
+This attribute is used to indicate to the DMA-mapping subsystem that the
+buffer is not subject to any address translation. This is used for devices
+that doesn't need buffer bouncing or fixing DMA addresses.
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 1301009a6b78..57c56000f60e 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -50,51 +50,50 @@
static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
const struct mmu_interval_notifier_ops *ops)
{
+ size_t page_size = 1UL << umem_odp->page_shift;
struct ib_device *dev = umem_odp->umem.ibdev;
+ size_t ndmas, npfns;
+ unsigned long start;
+ unsigned long end;
int ret;
umem_odp->umem.is_odp = 1;
mutex_init(&umem_odp->umem_mutex);
- if (!umem_odp->is_implicit_odp) {
- size_t page_size = 1UL << umem_odp->page_shift;
- unsigned long start;
- unsigned long end;
- size_t ndmas, npfns;
-
- start = ALIGN_DOWN(umem_odp->umem.address, page_size);
- if (check_add_overflow(umem_odp->umem.address,
- (unsigned long)umem_odp->umem.length,
- &end))
- return -EOVERFLOW;
- end = ALIGN(end, page_size);
- if (unlikely(end < page_size))
- return -EOVERFLOW;
-
- ndmas = (end - start) >> umem_odp->page_shift;
- if (!ndmas)
- return -EINVAL;
-
- npfns = (end - start) >> PAGE_SHIFT;
- umem_odp->pfn_list = kvcalloc(
- npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL);
- if (!umem_odp->pfn_list)
- return -ENOMEM;
-
-
- umem_odp->iova.dev = dev->dma_device;
- umem_odp->iova.size = end - start;
- umem_odp->iova.dir = DMA_BIDIRECTIONAL;
- ret = ib_dma_alloc_iova(dev, &umem_odp->iova);
- if (ret)
- goto out_pfn_list;
-
- ret = mmu_interval_notifier_insert(&umem_odp->notifier,
- umem_odp->umem.owning_mm,
- start, end - start, ops);
- if (ret)
- goto out_free_iova;
- }
+ if (umem_odp->is_implicit_odp)
+ return 0;
+
+ start = ALIGN_DOWN(umem_odp->umem.address, page_size);
+ if (check_add_overflow(umem_odp->umem.address,
+ (unsigned long)umem_odp->umem.length, &end))
+ return -EOVERFLOW;
+ end = ALIGN(end, page_size);
+ if (unlikely(end < page_size))
+ return -EOVERFLOW;
+
+ ndmas = (end - start) >> umem_odp->page_shift;
+ if (!ndmas)
+ return -EINVAL;
+
+ npfns = (end - start) >> PAGE_SHIFT;
+ umem_odp->pfn_list =
+ kvcalloc(npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL);
+ if (!umem_odp->pfn_list)
+ return -ENOMEM;
+
+ umem_odp->iova.dev = dev->dma_device;
+ umem_odp->iova.size = end - start;
+ umem_odp->iova.dir = DMA_BIDIRECTIONAL;
+ umem_odp->iova.attrs = DMA_ATTR_NO_TRANSLATION;
+ ret = ib_dma_alloc_iova(dev, &umem_odp->iova);
+ if (ret)
+ goto out_pfn_list;
+
+ ret = mmu_interval_notifier_insert(&umem_odp->notifier,
+ umem_odp->umem.owning_mm, start,
+ end - start, ops);
+ if (ret)
+ goto out_free_iova;
return 0;
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 91cc084adb53..89945e707a9b 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -62,6 +62,12 @@
*/
#define DMA_ATTR_PRIVILEGED (1UL << 9)
+/*
+ * DMA_ATTR_NO_TRANSLATION: used to indicate that the buffer should not be mapped
+ * through address translation.
+ */
+#define DMA_ATTR_NO_TRANSLATION (1UL << 10)
+
/*
* A dma_addr_t can hold any valid DMA or bus address for the platform. It can
* be given to a device to use as a DMA source or target. It is specific to a
diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
index 1c30e1cd607a..1c9ec204c999 100644
--- a/kernel/dma/direct.h
+++ b/kernel/dma/direct.h
@@ -92,6 +92,8 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
if (is_swiotlb_force_bounce(dev)) {
if (is_pci_p2pdma_page(page))
return DMA_MAPPING_ERROR;
+ if (attrs & DMA_ATTR_NO_TRANSLATION)
+ return DMA_MAPPING_ERROR;
return swiotlb_map(dev, phys, size, dir, attrs);
}
@@ -99,7 +101,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
dma_kmalloc_needs_bounce(dev, size, dir)) {
if (is_pci_p2pdma_page(page))
return DMA_MAPPING_ERROR;
- if (is_swiotlb_active(dev))
+ if (is_swiotlb_active(dev) && !(attrs & DMA_ATTR_NO_TRANSLATION))
return swiotlb_map(dev, phys, size, dir, attrs);
dev_WARN_ONCE(dev, 1,
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index f989c64622c2..49b1fde510c5 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -188,6 +188,10 @@ int dma_alloc_iova(struct dma_iova_attrs *iova)
struct device *dev = iova->dev;
const struct dma_map_ops *ops = get_dma_ops(dev);
+ if (dma_map_direct(dev, ops) && is_swiotlb_force_bounce(dev) &&
+ iova->attrs & DMA_ATTR_NO_TRANSLATION)
+ return -EOPNOTSUPP;
+
if (dma_map_direct(dev, ops) || !ops->alloc_iova) {
iova->addr = 0;
return 0;
--
2.44.0
Powered by blists - more mailing lists