[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <5af5e77102ca52576cb96816f0abcf6398820055.1571245656.git.robin.murphy@arm.com>
Date: Wed, 16 Oct 2019 18:07:36 +0100
From: Robin Murphy <robin.murphy@....com>
To: joro@...tes.org
Cc: iommu@...ts.linux-foundation.org, maz@...nel.org,
julien.grall@....com, linux-kernel@...r.kernel.org,
Qian Cai <cai@....pw>
Subject: [PATCH] iommu/dma: Relax locking in iommu_dma_prepare_msi()
Since commit ece6e6f0218b ("iommu/dma-iommu: Split iommu_dma_map_msi_msg()
in two parts"), iommu_dma_prepare_msi() should no longer have to worry
about preempting itself, nor being called in atomic context at all. Thus
we can downgrade the IRQ-safe locking to a simple mutex to avoid angering
the new might_sleep() check in iommu_map().
Reported-by: Qian Cai <cai@....pw>
Signed-off-by: Robin Murphy <robin.murphy@....com>
---
drivers/iommu/dma-iommu.c | 16 +++++++---------
1 file changed, 7 insertions(+), 9 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f321279baf9e..4ba3c49de017 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -19,6 +19,7 @@
#include <linux/iova.h>
#include <linux/irq.h>
#include <linux/mm.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
@@ -43,7 +44,6 @@ struct iommu_dma_cookie {
dma_addr_t msi_iova;
};
struct list_head msi_page_list;
- spinlock_t msi_lock;
/* Domain for flush queue callback; NULL if flush queue not in use */
struct iommu_domain *fq_domain;
@@ -62,7 +62,6 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
if (cookie) {
- spin_lock_init(&cookie->msi_lock);
INIT_LIST_HEAD(&cookie->msi_page_list);
cookie->type = type;
}
@@ -1150,7 +1149,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (msi_page->phys == msi_addr)
return msi_page;
- msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
+ msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
if (!msi_page)
return NULL;
@@ -1180,7 +1179,7 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_dma_cookie *cookie;
struct iommu_dma_msi_page *msi_page;
- unsigned long flags;
+ static DEFINE_MUTEX(msi_prepare_lock);
if (!domain || !domain->iova_cookie) {
desc->iommu_cookie = NULL;
@@ -1190,13 +1189,12 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
cookie = domain->iova_cookie;
/*
- * We disable IRQs to rule out a possible inversion against
- * irq_desc_lock if, say, someone tries to retarget the affinity
- * of an MSI from within an IPI handler.
+ * In fact we should already be serialised by irq_domain_mutex
+ * here, but that's way subtle, so belt and braces...
*/
- spin_lock_irqsave(&cookie->msi_lock, flags);
+ mutex_lock(&msi_prepare_lock);
msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
- spin_unlock_irqrestore(&cookie->msi_lock, flags);
+ mutex_unlock(&msi_prepare_lock);
msi_desc_set_iommu_cookie(desc, msi_page);
--
2.21.0.dirty
Powered by blists - more mailing lists