[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <ebe0ce98-4a02-1e94-d21b-ccb010abfd2d@huawei.com>
Date: Thu, 23 Jun 2022 09:38:05 +0100
From: John Garry <john.garry@...wei.com>
To: <joro@...tes.org>, <will@...nel.org>, <robin.murphy@....com>
CC: <linux-doc@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<linux-ide@...r.kernel.org>, <iommu@...ts.linux-foundation.org>,
<linux-scsi@...r.kernel.org>, <liyihang6@...ilicon.com>,
<chenxiang66@...ilicon.com>, <thunder.leizhen@...wei.com>,
<damien.lemoal@...nsource.wdc.com>, <m.szyprowski@...sung.com>,
<martin.petersen@...cle.com>, <jejb@...ux.ibm.com>, <hch@....de>
Subject: Re: [PATCH v3 2/4] dma-iommu: Add iommu_dma_opt_mapping_size()
On 14/06/2022 14:12, John Garry wrote:
> On 06/06/2022 10:30, John Garry wrote:
>> Add the IOMMU callback for DMA mapping API dma_opt_mapping_size(), which
>> allows the drivers to know the optimal mapping limit and thus limit the
>> requested IOVA lengths.
>>
>> This value is based on the IOVA rcache range limit, as IOVAs allocated
>> above this limit must always be newly allocated, which may be quite slow.
>>
>
> Can I please get some sort of ack from the IOMMU people on this one?
>
Another request for an ack please.
Thanks,
john
>
>> Signed-off-by: John Garry <john.garry@...wei.com>
>> Reviewed-by: Damien Le Moal <damien.lemoal@...nsource.wdc.com>
>> ---
>> drivers/iommu/dma-iommu.c | 6 ++++++
>> drivers/iommu/iova.c | 5 +++++
>> include/linux/iova.h | 2 ++
>> 3 files changed, 13 insertions(+)
>>
>> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
>> index f90251572a5d..9e1586447ee8 100644
>> --- a/drivers/iommu/dma-iommu.c
>> +++ b/drivers/iommu/dma-iommu.c
>> @@ -1459,6 +1459,11 @@ static unsigned long
>> iommu_dma_get_merge_boundary(struct device *dev)
>> return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
>> }
>> +static size_t iommu_dma_opt_mapping_size(void)
>> +{
>> + return iova_rcache_range();
>> +}
>> +
>> static const struct dma_map_ops iommu_dma_ops = {
>> .alloc = iommu_dma_alloc,
>> .free = iommu_dma_free,
>> @@ -1479,6 +1484,7 @@ static const struct dma_map_ops iommu_dma_ops = {
>> .map_resource = iommu_dma_map_resource,
>> .unmap_resource = iommu_dma_unmap_resource,
>> .get_merge_boundary = iommu_dma_get_merge_boundary,
>> + .opt_mapping_size = iommu_dma_opt_mapping_size,
>> };
>> /*
>> diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
>> index db77aa675145..9f00b58d546e 100644
>> --- a/drivers/iommu/iova.c
>> +++ b/drivers/iommu/iova.c
>> @@ -26,6 +26,11 @@ static unsigned long iova_rcache_get(struct
>> iova_domain *iovad,
>> static void free_cpu_cached_iovas(unsigned int cpu, struct
>> iova_domain *iovad);
>> static void free_iova_rcaches(struct iova_domain *iovad);
>> +unsigned long iova_rcache_range(void)
>> +{
>> + return PAGE_SIZE << (IOVA_RANGE_CACHE_MAX_SIZE - 1);
>> +}
>> +
>> static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
>> {
>> struct iova_domain *iovad;
>> diff --git a/include/linux/iova.h b/include/linux/iova.h
>> index 320a70e40233..c6ba6d95d79c 100644
>> --- a/include/linux/iova.h
>> +++ b/include/linux/iova.h
>> @@ -79,6 +79,8 @@ static inline unsigned long iova_pfn(struct
>> iova_domain *iovad, dma_addr_t iova)
>> int iova_cache_get(void);
>> void iova_cache_put(void);
>> +unsigned long iova_rcache_range(void);
>> +
>> void free_iova(struct iova_domain *iovad, unsigned long pfn);
>> void __free_iova(struct iova_domain *iovad, struct iova *iova);
>> struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
>
Powered by blists - more mailing lists