lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 19 Oct 2022 16:44:35 +0200
From:   Niklas Schnelle <schnelle@...ux.ibm.com>
To:     Matthew Rosato <mjrosato@...ux.ibm.com>, iommu@...ts.linux.dev,
        Joerg Roedel <joro@...tes.org>, Will Deacon <will@...nel.org>,
        Robin Murphy <robin.murphy@....com>,
        Jason Gunthorpe <jgg@...dia.com>
Cc:     Gerd Bayer <gbayer@...ux.ibm.com>,
        Pierre Morel <pmorel@...ux.ibm.com>,
        linux-s390@...r.kernel.org, borntraeger@...ux.ibm.com,
        hca@...ux.ibm.com, gor@...ux.ibm.com,
        gerald.schaefer@...ux.ibm.com, agordeev@...ux.ibm.com,
        svens@...ux.ibm.com, linux-kernel@...r.kernel.org,
        Wenjia Zhang <wenjia@...ux.ibm.com>,
        Julian Ruess <julianr@...ux.ibm.com>
Subject: [RFC 6/6] iommu/s390: flush queued IOVAs on RPCIT out of resource indication

When RPCIT indicates that the underlying hypervisor has run out of
resources it often means that its IOVA space is exhausted and IOVAs need
to be freed before new ones can be created. By triggering a flush of the
IOVA queue we can get the queued IOVAs freed and also get the new
mapping established during the global flush.

Signed-off-by: Niklas Schnelle <schnelle@...ux.ibm.com>
---
 drivers/iommu/dma-iommu.c  |  2 +-
 drivers/iommu/dma-iommu.h  |  1 +
 drivers/iommu/s390-iommu.c | 12 ++++++++++++
 3 files changed, 14 insertions(+), 1 deletion(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 427fb84f50c3..4853f98f3305 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -287,7 +287,7 @@ static void flush_percpu(struct iommu_dma_cookie *cookie)
 	}
 }
 
-static void iommu_dma_flush_fq(struct iommu_dma_cookie *cookie)
+void iommu_dma_flush_fq(struct iommu_dma_cookie *cookie)
 {
 	if (!cookie->fq_domain)
 		return;
diff --git a/drivers/iommu/dma-iommu.h b/drivers/iommu/dma-iommu.h
index 942790009292..cac06030aa26 100644
--- a/drivers/iommu/dma-iommu.h
+++ b/drivers/iommu/dma-iommu.h
@@ -13,6 +13,7 @@ int iommu_get_dma_cookie(struct iommu_domain *domain);
 void iommu_put_dma_cookie(struct iommu_domain *domain);
 
 int iommu_dma_init_fq(struct iommu_domain *domain);
+void iommu_dma_flush_fq(struct iommu_dma_cookie *cookie);
 
 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
 
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 506f8b92931f..270662584f96 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -502,6 +502,10 @@ static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
 		atomic64_inc(&s390_domain->ctrs.global_rpcits);
 		rc = zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
 					zdev->end_dma - zdev->start_dma + 1);
+		if (rc == -ENOMEM) {
+			iommu_dma_flush_fq(domain->iova_cookie);
+			rc = 0;
+		}
 		if (rc)
 			break;
 	}
@@ -525,6 +529,10 @@ static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
 		atomic64_inc(&s390_domain->ctrs.sync_rpcits);
 		rc = zpci_refresh_trans((u64)zdev->fh << 32, gather->start,
 					size);
+		if (rc == -ENOMEM) {
+			iommu_dma_flush_fq(domain->iova_cookie);
+			rc = 0;
+		}
 		if (rc)
 			break;
 	}
@@ -545,6 +553,10 @@ static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
 		atomic64_inc(&s390_domain->ctrs.sync_map_rpcits);
 		rc = zpci_refresh_trans((u64)zdev->fh << 32,
 					iova, size);
+		if (rc == -ENOMEM) {
+			iommu_dma_flush_fq(domain->iova_cookie);
+			rc = 0;
+		}
 		if (rc)
 			break;
 	}
-- 
2.34.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ