[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200803133620.13840-1-ajaykumar.rs@samsung.com>
Date: Mon, 3 Aug 2020 19:06:20 +0530
From: Ajay Kumar <ajaykumar.rs@...sung.com>
To: iommu@...ts.linux-foundation.org, linux-kernel@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org, robh+dt@...nel.org,
mark.rutland@....com, will@...nel.org
Cc: joro@...tes.org, nleeder@...eaurora.org, robin.murphy@....com,
Ajay Kumar <ajaykumar.rs@...sung.com>
Subject: [RFC PATCH] dma-iommu: allow devices to set IOVA range dynamically
Currently, there is no other option to change the lower limit of
IOVA for any device than calling iova_init_domain(), but the
said function will re-init whole domain and also doesn't track
the previously allocated IOVA before re-initing the domain.
There are cases where the device might not support continuous
range of addresses, and can also have dependency among buffers
allocated for firmware, image manipulation, etc and all of the
address requests pass through IOMMU. In such cases, we can allocate
buffers stage by stage by setting address limit, and also keep
track the of same.
Bit of background can be found here:
IOVA allocation dependency between firmware buffer and remaining buffers
https://www.spinics.net/lists/iommu/msg43586.html
This patch allows devices to limit the IOVA space they want
during allocation at any given point of time. We shall allow
the same only if the device owns the corresponding iommu_domain,
that is the device is the only master attached to the domain.
The lower limit of IOVA space is marked by start_pfn, and the upper
limit is marked by dma_mask and this patch honors the same.
Since changing dma_mask can extend the addressable region beyond
current cached node, we do a reset of current cached nodes as well.
Signed-off-by: Ajay Kumar <ajaykumar.rs@...sung.com>
---
drivers/iommu/dma-iommu.c | 73 +++++++++++++++++++++++++++++++++++++++
drivers/iommu/iommu.c | 16 +++++++++
include/linux/iommu.h | 6 ++++
include/linux/iova.h | 6 ++++
4 files changed, 101 insertions(+)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 4959f5df21bd..2fe3f57ab648 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -167,6 +167,79 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
}
EXPORT_SYMBOL(iommu_dma_get_resv_regions);
+/**
+ * iommu_set_iova_range - Limit the IOVA region for a specific device
+ * @dev: Device to set IOVA range for
+ * @base: Base address or the lower limit of the IOVA range
+ * @size: Size of address range from lower limit to upper limit
+ *
+ * Allow a master device to dynamically control the range of IOVA addresses
+ * which are allocated iff the master device is the only device attached to
+ * the corresponding iommu_domain.
+ * This function doesn't harm IOVA addresses outside of current range,
+ * which were allocated prior to calling this function.
+ */
+int iommu_set_iova_range(struct device *dev, dma_addr_t base, u64 size)
+{
+ struct iommu_domain *domain;
+ struct iommu_dma_cookie *cookie;
+ struct iova_domain *iovad;
+ unsigned long shift, base_pfn;
+ u64 new_dma_mask;
+
+ /*
+ * Check if the IOMMU master device is the sole entry in the group
+ * If the group has more than one master device using the same IOMMU
+ * we shouldn't be allowing that device to change the IOVA limit
+ */
+ if (iommu_group_device_count_from_dev(dev) != 1)
+ return -EINVAL;
+
+ domain = iommu_get_domain_for_dev(dev);
+ if (!domain)
+ return -ENODEV;
+
+ if (domain->type != IOMMU_DOMAIN_DMA)
+ return -EINVAL;
+
+ cookie = domain->iova_cookie;
+ if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
+ return -ENODEV;
+
+ iovad = &cookie->iovad;
+
+ shift = iova_shift(iovad);
+ base_pfn = base >> shift;
+
+ base_pfn = max_t(unsigned long, 1, base_pfn);
+
+ /* base cannot be outside aperture */
+ if (domain->geometry.force_aperture) {
+ if (base > domain->geometry.aperture_end ||
+ base + size <= domain->geometry.aperture_start) {
+ pr_warn("specified DMA range outside IOMMU capability\n");
+ return -EFAULT;
+ }
+ /* ...then finally give it a kicking to make sure it fits */
+ base_pfn = max_t(unsigned long, base_pfn,
+ domain->geometry.aperture_start >> shift);
+ }
+ /* Set page aligned lower limit of IOVA range to start_pfn */
+ iovad->start_pfn = base_pfn;
+
+ /* Set upper limit of IOVA range to dma_mask */
+ new_dma_mask = (u64)base + size;
+ dma_set_mask_and_coherent(dev, new_dma_mask);
+
+ /* Reset cached nodes to start IOVA search from the anchor node */
+ iovad->cached_node = &iovad->anchor.node;
+ iovad->cached32_node = &iovad->anchor.node;
+ iovad->max32_alloc_size = iovad->dma_32bit_pfn;
+
+ return 0;
+}
+EXPORT_SYMBOL(iommu_set_iova_range);
+
static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
phys_addr_t start, phys_addr_t end)
{
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 609bd25bf154..30b2d4e5487d 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -919,6 +919,22 @@ static int iommu_group_device_count(struct iommu_group *group)
return ret;
}
+int iommu_group_device_count_from_dev(struct device *dev)
+{
+ struct iommu_group *group;
+ int group_device_count;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ return 0;
+
+ group_device_count = iommu_group_device_count(group);
+ iommu_group_put(group);
+
+ return group_device_count;
+}
+EXPORT_SYMBOL_GPL(iommu_group_device_count_from_dev);
+
/**
* iommu_group_for_each_dev - iterate over each device in the group
* @group: the group
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index fee209efb756..4dbd4dab91c0 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -477,6 +477,7 @@ extern int iommu_group_set_name(struct iommu_group *group, const char *name);
extern int iommu_group_add_device(struct iommu_group *group,
struct device *dev);
extern void iommu_group_remove_device(struct device *dev);
+extern int iommu_group_device_count_from_dev(struct device *dev);
extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *));
extern struct iommu_group *iommu_group_get(struct device *dev);
@@ -828,6 +829,11 @@ static inline void iommu_group_remove_device(struct device *dev)
{
}
+static int iommu_group_device_count_from_dev(struct device *dev)
+{
+ return 0;
+}
+
static inline int iommu_group_for_each_dev(struct iommu_group *group,
void *data,
int (*fn)(struct device *, void *))
diff --git a/include/linux/iova.h b/include/linux/iova.h
index a0637abffee8..1e7d7b23ff66 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -163,6 +163,7 @@ void put_iova_domain(struct iova_domain *iovad);
struct iova *split_and_remove_iova(struct iova_domain *iovad,
struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
+int iommu_set_iova_range(struct device *dev, dma_addr_t base, u64 size);
#else
static inline int iova_cache_get(void)
{
@@ -270,6 +271,11 @@ static inline void free_cpu_cached_iovas(unsigned int cpu,
struct iova_domain *iovad)
{
}
+
+static int iommu_set_iova_range(struct device *dev, dma_addr_t base, u64 size)
+{
+ return 0;
+}
#endif
#endif
--
2.17.1
Powered by blists - more mailing lists