[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <7da1ab0b-e78a-2ed1-0263-e9174c3af256@linux.ibm.com>
Date: Fri, 28 Oct 2022 12:03:51 -0400
From: Matthew Rosato <mjrosato@...ux.ibm.com>
To: Niklas Schnelle <schnelle@...ux.ibm.com>, iommu@...ts.linux.dev,
Joerg Roedel <joro@...tes.org>, Will Deacon <will@...nel.org>,
Robin Murphy <robin.murphy@....com>,
Jason Gunthorpe <jgg@...dia.com>
Cc: Gerd Bayer <gbayer@...ux.ibm.com>,
Pierre Morel <pmorel@...ux.ibm.com>,
linux-s390@...r.kernel.org, borntraeger@...ux.ibm.com,
hca@...ux.ibm.com, gor@...ux.ibm.com,
gerald.schaefer@...ux.ibm.com, agordeev@...ux.ibm.com,
svens@...ux.ibm.com, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 2/5] iommu/s390: Add I/O TLB ops
On 10/18/22 10:51 AM, Niklas Schnelle wrote:
> Currently s390-iommu does an I/O TLB flush (RPCIT) for every update of
> the I/O translation table explicitly. For one this is wasteful since
> RPCIT can be skipped after a mapping operation if zdev->tlb_refresh is
> unset. Moreover we can do a single RPCIT for a range of pages including
> whne doing lazy unmapping.
>
> Thankfully both of these optimizations can be achieved by implementing
> the IOMMU operations common code provides for the different types of I/O
> tlb flushes:
>
> * flush_iotlb_all: Flushes the I/O TLB for the entire IOVA space
> * iotlb_sync: Flushes the I/O TLB for a range of pages that can be
> gathered up, for example to implement lazy unmapping.
> * iotlb_sync_map: Flushes the I/O TLB after a mapping operation
>
> Signed-off-by: Niklas Schnelle <schnelle@...ux.ibm.com>
> ---
> drivers/iommu/s390-iommu.c | 76 ++++++++++++++++++++++++++++++++------
> 1 file changed, 65 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
> index ee88e717254b..a4c2e9bc6d83 100644
> --- a/drivers/iommu/s390-iommu.c
> +++ b/drivers/iommu/s390-iommu.c
> @@ -199,14 +199,72 @@ static void s390_iommu_release_device(struct device *dev)
> __s390_iommu_detach_device(zdev);
> }
>
> +static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
> +{
> + struct s390_domain *s390_domain = to_s390_domain(domain);
> + struct zpci_dev *zdev;
> + unsigned long flags;
> + int rc;
> +
> + spin_lock_irqsave(&s390_domain->list_lock, flags);
> + list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
> + rc = zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
> + zdev->end_dma - zdev->start_dma + 1);
> + if (rc)
> + break;
> + }
> + spin_unlock_irqrestore(&s390_domain->list_lock, flags);
> +}
> +
> +static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
> + struct iommu_iotlb_gather *gather)
> +{
> + struct s390_domain *s390_domain = to_s390_domain(domain);
> + size_t size = gather->end - gather->start + 1;
> + struct zpci_dev *zdev;
> + unsigned long flags;
> + int rc;
> +
> + /* If gather was never added to there is nothing to flush */
> + if (gather->start == ULONG_MAX)
> + return;
Hmm, this seems a little awkward in that it depends on the init value in iommu_iotlb_gather_init never changing. I don't see any other iommu drivers doing this -- Is there no other way to tell there's nothing to flush?
If we really need to do this, maybe some shared #define in iommu.h that is used in iommu_iotlb_gather_init and here?
> +
> + spin_lock_irqsave(&s390_domain->list_lock, flags);
> + list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
> + rc = zpci_refresh_trans((u64)zdev->fh << 32, gather->start,
> + size);
> + if (rc)
> + break;
> + }
> + spin_unlock_irqrestore(&s390_domain->list_lock, flags);
> +}
> +
> +static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
> + unsigned long iova, size_t size)
> +{
> + struct s390_domain *s390_domain = to_s390_domain(domain);
> + struct zpci_dev *zdev;
> + unsigned long flags;
> + int rc;
> +
> + spin_lock_irqsave(&s390_domain->list_lock, flags);
> + list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
> + if (!zdev->tlb_refresh)
> + continue;
> + rc = zpci_refresh_trans((u64)zdev->fh << 32,
> + iova, size);
> + if (rc)
> + break;
> + }
> + spin_unlock_irqrestore(&s390_domain->list_lock, flags);
> +}
> +
> static int s390_iommu_update_trans(struct s390_domain *s390_domain,
> phys_addr_t pa, dma_addr_t dma_addr,
> unsigned long nr_pages, int flags)
> {
> phys_addr_t page_addr = pa & PAGE_MASK;
> - dma_addr_t start_dma_addr = dma_addr;
> unsigned long irq_flags, i;
> - struct zpci_dev *zdev;
> unsigned long *entry;
> int rc = 0;
>
> @@ -225,15 +283,6 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
> dma_addr += PAGE_SIZE;
> }
>
> - spin_lock(&s390_domain->list_lock);
> - list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
> - rc = zpci_refresh_trans((u64)zdev->fh << 32,
> - start_dma_addr, nr_pages * PAGE_SIZE);
> - if (rc)
> - break;
> - }
> - spin_unlock(&s390_domain->list_lock);
> -
> undo_cpu_trans:
> if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
> flags = ZPCI_PTE_INVALID;
> @@ -340,6 +389,8 @@ static size_t s390_iommu_unmap_pages(struct iommu_domain *domain,
> if (rc)
> return 0;
>
> + iommu_iotlb_gather_add_range(gather, iova, size);
> +
> return size;
> }
>
> @@ -384,6 +435,9 @@ static const struct iommu_ops s390_iommu_ops = {
> .detach_dev = s390_iommu_detach_device,
> .map_pages = s390_iommu_map_pages,
> .unmap_pages = s390_iommu_unmap_pages,
> + .flush_iotlb_all = s390_iommu_flush_iotlb_all,
> + .iotlb_sync = s390_iommu_iotlb_sync,
> + .iotlb_sync_map = s390_iommu_iotlb_sync_map,
> .iova_to_phys = s390_iommu_iova_to_phys,
> .free = s390_domain_free,
> }
Powered by blists - more mailing lists