lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221103195154.21495-6-semen.protsenko@linaro.org>
Date:   Thu,  3 Nov 2022 20:51:53 +0100
From:   Sam Protsenko <semen.protsenko@...aro.org>
To:     Marek Szyprowski <m.szyprowski@...sung.com>,
        Krzysztof Kozlowski <krzysztof.kozlowski@...aro.org>
Cc:     Joerg Roedel <joro@...tes.org>, Will Deacon <will@...nel.org>,
        Robin Murphy <robin.murphy@....com>,
        Sumit Semwal <sumit.semwal@...aro.org>,
        Alim Akhtar <alim.akhtar@...sung.com>,
        Janghyuck Kim <janghyuck.kim@...sung.com>,
        Cho KyongHo <pullip.cho@...sung.com>,
        Daniel Mentz <danielmentz@...gle.com>,
        David Virag <virag.david003@...il.com>, iommu@...ts.linux.dev,
        linux-kernel@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
        linux-samsung-soc@...r.kernel.org
Subject: [PATCH v2 5/6] iommu/exynos: Rearrange the platform driver code

Move the platform_driver code to the bottom of the driver, as it's a
canonical form for that. No functional change.

Signed-off-by: Sam Protsenko <semen.protsenko@...aro.org>
---
Changes in v2:
  - Fixed checkpatch style suggestion with "--strict" flag

 drivers/iommu/exynos-iommu.c | 361 +++++++++++++++++------------------
 1 file changed, 180 insertions(+), 181 deletions(-)

diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index cd3f74e638f0..c995cf8294cf 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -641,187 +641,6 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
 	spin_unlock_irqrestore(&data->lock, flags);
 }
 
-static const struct iommu_ops exynos_iommu_ops;
-
-static int exynos_sysmmu_probe(struct platform_device *pdev)
-{
-	int irq, ret;
-	struct device *dev = &pdev->dev;
-	struct sysmmu_drvdata *data;
-	struct resource *res;
-
-	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
-	if (!data)
-		return -ENOMEM;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	data->sfrbase = devm_ioremap_resource(dev, res);
-	if (IS_ERR(data->sfrbase))
-		return PTR_ERR(data->sfrbase);
-
-	irq = platform_get_irq(pdev, 0);
-	if (irq <= 0)
-		return irq;
-
-	ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
-				dev_name(dev), data);
-	if (ret) {
-		dev_err(dev, "Unabled to register handler of irq %d\n", irq);
-		return ret;
-	}
-
-	data->clk = devm_clk_get(dev, "sysmmu");
-	if (PTR_ERR(data->clk) == -ENOENT)
-		data->clk = NULL;
-	else if (IS_ERR(data->clk))
-		return PTR_ERR(data->clk);
-
-	data->aclk = devm_clk_get(dev, "aclk");
-	if (PTR_ERR(data->aclk) == -ENOENT)
-		data->aclk = NULL;
-	else if (IS_ERR(data->aclk))
-		return PTR_ERR(data->aclk);
-
-	data->pclk = devm_clk_get(dev, "pclk");
-	if (PTR_ERR(data->pclk) == -ENOENT)
-		data->pclk = NULL;
-	else if (IS_ERR(data->pclk))
-		return PTR_ERR(data->pclk);
-
-	if (!data->clk && (!data->aclk || !data->pclk)) {
-		dev_err(dev, "Failed to get device clock(s)!\n");
-		return -ENOENT;
-	}
-
-	data->clk_master = devm_clk_get(dev, "master");
-	if (PTR_ERR(data->clk_master) == -ENOENT)
-		data->clk_master = NULL;
-	else if (IS_ERR(data->clk_master))
-		return PTR_ERR(data->clk_master);
-
-	data->sysmmu = dev;
-	spin_lock_init(&data->lock);
-
-	__sysmmu_get_version(data);
-
-	ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
-				     dev_name(data->sysmmu));
-	if (ret)
-		return ret;
-
-	ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
-	if (ret)
-		goto err_iommu_register;
-
-	platform_set_drvdata(pdev, data);
-
-	if (PG_ENT_SHIFT < 0) {
-		if (MMU_MAJ_VER(data->version) < 5) {
-			PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
-			LV1_PROT = SYSMMU_LV1_PROT;
-			LV2_PROT = SYSMMU_LV2_PROT;
-		} else {
-			PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
-			LV1_PROT = SYSMMU_V5_LV1_PROT;
-			LV2_PROT = SYSMMU_V5_LV2_PROT;
-		}
-	}
-
-	if (MMU_MAJ_VER(data->version) >= 5) {
-		ret = dma_set_mask(dev, DMA_BIT_MASK(36));
-		if (ret) {
-			dev_err(dev, "Unable to set DMA mask: %d\n", ret);
-			goto err_dma_set_mask;
-		}
-	}
-
-	/*
-	 * use the first registered sysmmu device for performing
-	 * dma mapping operations on iommu page tables (cpu cache flush)
-	 */
-	if (!dma_dev)
-		dma_dev = &pdev->dev;
-
-	pm_runtime_enable(dev);
-
-	return 0;
-
-err_dma_set_mask:
-	iommu_device_unregister(&data->iommu);
-err_iommu_register:
-	iommu_device_sysfs_remove(&data->iommu);
-	return ret;
-}
-
-static void exynos_sysmmu_shutdown(struct platform_device *pdev)
-{
-	struct sysmmu_drvdata *data = platform_get_drvdata(pdev);
-	struct device *dev = &pdev->dev;
-	int irq = platform_get_irq(pdev, 0);
-
-	devm_free_irq(dev, irq, data);
-	pm_runtime_force_suspend(dev);
-}
-
-static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
-{
-	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
-	struct device *master = data->master;
-
-	if (master) {
-		struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
-
-		mutex_lock(&owner->rpm_lock);
-		if (data->domain) {
-			dev_dbg(data->sysmmu, "saving state\n");
-			__sysmmu_disable(data);
-		}
-		mutex_unlock(&owner->rpm_lock);
-	}
-	return 0;
-}
-
-static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
-{
-	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
-	struct device *master = data->master;
-
-	if (master) {
-		struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
-
-		mutex_lock(&owner->rpm_lock);
-		if (data->domain) {
-			dev_dbg(data->sysmmu, "restoring state\n");
-			__sysmmu_enable(data);
-		}
-		mutex_unlock(&owner->rpm_lock);
-	}
-	return 0;
-}
-
-static const struct dev_pm_ops sysmmu_pm_ops = {
-	SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
-	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-				pm_runtime_force_resume)
-};
-
-static const struct of_device_id sysmmu_of_match[] = {
-	{ .compatible	= "samsung,exynos-sysmmu", },
-	{ },
-};
-MODULE_DEVICE_TABLE(of, sysmmu_of_match);
-
-static struct platform_driver exynos_sysmmu_driver = {
-	.probe	= exynos_sysmmu_probe,
-	.shutdown = exynos_sysmmu_shutdown,
-	.driver	= {
-		.name		= "exynos-sysmmu",
-		.of_match_table	= sysmmu_of_match,
-		.pm		= &sysmmu_pm_ops,
-		.suppress_bind_attrs = true,
-	}
-};
-
 static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
 {
 	dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
@@ -1428,6 +1247,185 @@ static const struct iommu_ops exynos_iommu_ops = {
 	}
 };
 
+static int exynos_sysmmu_probe(struct platform_device *pdev)
+{
+	int irq, ret;
+	struct device *dev = &pdev->dev;
+	struct sysmmu_drvdata *data;
+	struct resource *res;
+
+	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	data->sfrbase = devm_ioremap_resource(dev, res);
+	if (IS_ERR(data->sfrbase))
+		return PTR_ERR(data->sfrbase);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0)
+		return irq;
+
+	ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0, dev_name(dev),
+			       data);
+	if (ret) {
+		dev_err(dev, "Unabled to register handler of irq %d\n", irq);
+		return ret;
+	}
+
+	data->clk = devm_clk_get(dev, "sysmmu");
+	if (PTR_ERR(data->clk) == -ENOENT)
+		data->clk = NULL;
+	else if (IS_ERR(data->clk))
+		return PTR_ERR(data->clk);
+
+	data->aclk = devm_clk_get(dev, "aclk");
+	if (PTR_ERR(data->aclk) == -ENOENT)
+		data->aclk = NULL;
+	else if (IS_ERR(data->aclk))
+		return PTR_ERR(data->aclk);
+
+	data->pclk = devm_clk_get(dev, "pclk");
+	if (PTR_ERR(data->pclk) == -ENOENT)
+		data->pclk = NULL;
+	else if (IS_ERR(data->pclk))
+		return PTR_ERR(data->pclk);
+
+	if (!data->clk && (!data->aclk || !data->pclk)) {
+		dev_err(dev, "Failed to get device clock(s)!\n");
+		return -ENOENT;
+	}
+
+	data->clk_master = devm_clk_get(dev, "master");
+	if (PTR_ERR(data->clk_master) == -ENOENT)
+		data->clk_master = NULL;
+	else if (IS_ERR(data->clk_master))
+		return PTR_ERR(data->clk_master);
+
+	data->sysmmu = dev;
+	spin_lock_init(&data->lock);
+
+	__sysmmu_get_version(data);
+
+	ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
+				     dev_name(data->sysmmu));
+	if (ret)
+		return ret;
+
+	ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
+	if (ret)
+		goto err_iommu_register;
+
+	platform_set_drvdata(pdev, data);
+
+	if (PG_ENT_SHIFT < 0) {
+		if (MMU_MAJ_VER(data->version) < 5) {
+			PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
+			LV1_PROT = SYSMMU_LV1_PROT;
+			LV2_PROT = SYSMMU_LV2_PROT;
+		} else {
+			PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
+			LV1_PROT = SYSMMU_V5_LV1_PROT;
+			LV2_PROT = SYSMMU_V5_LV2_PROT;
+		}
+	}
+
+	if (MMU_MAJ_VER(data->version) >= 5) {
+		ret = dma_set_mask(dev, DMA_BIT_MASK(36));
+		if (ret) {
+			dev_err(dev, "Unable to set DMA mask: %d\n", ret);
+			goto err_dma_set_mask;
+		}
+	}
+
+	/*
+	 * use the first registered sysmmu device for performing
+	 * dma mapping operations on iommu page tables (cpu cache flush)
+	 */
+	if (!dma_dev)
+		dma_dev = &pdev->dev;
+
+	pm_runtime_enable(dev);
+
+	return 0;
+
+err_dma_set_mask:
+	iommu_device_unregister(&data->iommu);
+err_iommu_register:
+	iommu_device_sysfs_remove(&data->iommu);
+	return ret;
+}
+
+static void exynos_sysmmu_shutdown(struct platform_device *pdev)
+{
+	struct sysmmu_drvdata *data = platform_get_drvdata(pdev);
+	struct device *dev = &pdev->dev;
+	int irq = platform_get_irq(pdev, 0);
+
+	devm_free_irq(dev, irq, data);
+	pm_runtime_force_suspend(dev);
+}
+
+static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
+{
+	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
+	struct device *master = data->master;
+
+	if (master) {
+		struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
+
+		mutex_lock(&owner->rpm_lock);
+		if (data->domain) {
+			dev_dbg(data->sysmmu, "saving state\n");
+			__sysmmu_disable(data);
+		}
+		mutex_unlock(&owner->rpm_lock);
+	}
+	return 0;
+}
+
+static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
+{
+	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
+	struct device *master = data->master;
+
+	if (master) {
+		struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
+
+		mutex_lock(&owner->rpm_lock);
+		if (data->domain) {
+			dev_dbg(data->sysmmu, "restoring state\n");
+			__sysmmu_enable(data);
+		}
+		mutex_unlock(&owner->rpm_lock);
+	}
+	return 0;
+}
+
+static const struct dev_pm_ops sysmmu_pm_ops = {
+	SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+};
+
+static const struct of_device_id sysmmu_of_match[] = {
+	{ .compatible	= "samsung,exynos-sysmmu", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, sysmmu_of_match);
+
+static struct platform_driver exynos_sysmmu_driver = {
+	.probe	= exynos_sysmmu_probe,
+	.shutdown = exynos_sysmmu_shutdown,
+	.driver	= {
+		.name		= "exynos-sysmmu",
+		.of_match_table	= sysmmu_of_match,
+		.pm		= &sysmmu_pm_ops,
+		.suppress_bind_attrs = true,
+	}
+};
+
 static int __init exynos_iommu_init(void)
 {
 	struct device_node *np;
@@ -1461,6 +1459,7 @@ static int __init exynos_iommu_init(void)
 	}
 
 	return 0;
+
 err_zero_lv2:
 	platform_driver_unregister(&exynos_sysmmu_driver);
 err_reg_driver:
-- 
2.35.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ