[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200929045247.15596-3-nicoleotsuka@gmail.com>
Date: Mon, 28 Sep 2020 21:52:47 -0700
From: Nicolin Chen <nicoleotsuka@...il.com>
To: thierry.reding@...il.com, joro@...tes.org
Cc: vdumpa@...dia.com, jonathanh@...dia.com, digetx@...il.com,
linux-tegra@...r.kernel.org, iommu@...ts.linux-foundation.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v3 2/2] iommu/tegra-smmu: Expend mutex protection range
This is used to protect potential race condition at use_count.
since probes of client drivers, calling attach_dev(), may run
concurrently.
Signed-off-by: Nicolin Chen <nicoleotsuka@...il.com>
---
Changelog
v2->v3:
* Renamed label "err_unlock" to "unlock"
v1->v2:
* N/A
drivers/iommu/tegra-smmu.c | 34 +++++++++++++++++++++-------------
1 file changed, 21 insertions(+), 13 deletions(-)
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index ec4c9dafff95..6a3ecc334481 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -256,26 +256,19 @@ static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
{
unsigned long id;
- mutex_lock(&smmu->lock);
-
id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
- if (id >= smmu->soc->num_asids) {
- mutex_unlock(&smmu->lock);
+ if (id >= smmu->soc->num_asids)
return -ENOSPC;
- }
set_bit(id, smmu->asids);
*idp = id;
- mutex_unlock(&smmu->lock);
return 0;
}
static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
{
- mutex_lock(&smmu->lock);
clear_bit(id, smmu->asids);
- mutex_unlock(&smmu->lock);
}
static bool tegra_smmu_capable(enum iommu_cap cap)
@@ -420,17 +413,21 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
struct tegra_smmu_as *as)
{
u32 value;
- int err;
+ int err = 0;
+
+ mutex_lock(&smmu->lock);
if (as->use_count > 0) {
as->use_count++;
- return 0;
+ goto unlock;
}
as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
DMA_TO_DEVICE);
- if (dma_mapping_error(smmu->dev, as->pd_dma))
- return -ENOMEM;
+ if (dma_mapping_error(smmu->dev, as->pd_dma)) {
+ err = -ENOMEM;
+ goto unlock;
+ }
/* We can't handle 64-bit DMA addresses */
if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
@@ -453,24 +450,35 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
as->smmu = smmu;
as->use_count++;
+ mutex_unlock(&smmu->lock);
+
return 0;
err_unmap:
dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+unlock:
+ mutex_unlock(&smmu->lock);
+
return err;
}
static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
struct tegra_smmu_as *as)
{
- if (--as->use_count > 0)
+ mutex_lock(&smmu->lock);
+
+ if (--as->use_count > 0) {
+ mutex_unlock(&smmu->lock);
return;
+ }
tegra_smmu_free_asid(smmu, as->id);
dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
as->smmu = NULL;
+
+ mutex_unlock(&smmu->lock);
}
static int tegra_smmu_attach_dev(struct iommu_domain *domain,
--
2.17.1
Powered by blists - more mailing lists