lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1384853593-32202-9-git-send-email-hdoyu@nvidia.com>
Date:	Tue, 19 Nov 2013 11:33:12 +0200
From:	Hiroshi Doyu <hdoyu@...dia.com>
To:	<swarren@...dia.com>, <will.deacon@....com>,
	<grant.likely@...aro.org>, <thierry.reding@...il.com>,
	<swarren@...dotorg.org>, <galak@...eaurora.org>
CC:	Hiroshi Doyu <hdoyu@...dia.com>, <mark.rutland@....com>,
	<devicetree@...r.kernel.org>, <iommu@...ts.linux-foundation.org>,
	<linux-tegra@...r.kernel.org>,
	<linux-arm-kernel@...ts.infradead.org>,
	<lorenzo.pieralisi@....com>, <linux-kernel@...r.kernel.org>
Subject: [PATCHv5 8/9] iommu/tegra: smmu: Rename hwgrp -> swgroups

Use the correct term for SWGROUP related variables and macros.

The term "swgroup" is the collection of "memory client". A "memory
client" usually represents a HardWare Accelerator(HWA) like
GPU. Sometimes a strut device can belong to multiple "swgroup" so that
"swgroup's'" is used here. This "swgroups" is the term used in Tegra
TRM. Rename along with TRM.

Signed-off-by: Hiroshi Doyu <hdoyu@...dia.com>
---
v4:
New for v4
---
 drivers/iommu/tegra-smmu.c | 36 ++++++++++++++++++------------------
 1 file changed, 18 insertions(+), 18 deletions(-)

diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index c2ed075..003a491 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -179,12 +179,12 @@ enum {
 
 #define NUM_SMMU_REG_BANKS	3
 
-#define smmu_client_enable_hwgrp(c, m)	smmu_client_set_hwgrp(c, m, 1)
-#define smmu_client_disable_hwgrp(c)	smmu_client_set_hwgrp(c, 0, 0)
-#define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1)
-#define __smmu_client_disable_hwgrp(c)	__smmu_client_set_hwgrp(c, 0, 0)
+#define smmu_client_enable_swgroups(c, m) smmu_client_set_swgroups(c, m, 1)
+#define smmu_client_disable_swgroups(c) smmu_client_set_swgroups(c, 0, 0)
+#define __smmu_client_enable_swgroups(c, m) __smmu_client_set_swgroups(c, m, 1)
+#define __smmu_client_disable_swgroups(c) __smmu_client_set_swgroups(c, 0, 0)
 
-#define HWGRP_ASID_REG(x) ((x) * sizeof(u32) + SMMU_ASID_BASE)
+#define SWGROUPS_ASID_REG(x) ((x) * sizeof(u32) + SMMU_ASID_BASE)
 
 /*
  * Per client for address space
@@ -195,7 +195,7 @@ struct smmu_client {
 	struct device		*dev;
 	struct list_head	list;
 	struct smmu_as		*as;
-	unsigned long		hwgrp[2];
+	unsigned long		swgroups[2];
 };
 
 /*
@@ -377,7 +377,7 @@ static int register_smmu_client(struct smmu_device *smmu,
 
 	client->dev = dev;
 	client->of_node = dev->of_node;
-	memcpy(client->hwgrp, swgroups, sizeof(u64));
+	memcpy(client->swgroups, swgroups, sizeof(u64));
 	return insert_smmu_client(smmu, client);
 }
 
@@ -403,7 +403,7 @@ static int smmu_of_get_swgroups(struct device *dev, unsigned long *swgroups)
 	return -ENODEV;
 }
 
-static int __smmu_client_set_hwgrp(struct smmu_client *c,
+static int __smmu_client_set_swgroups(struct smmu_client *c,
 				   unsigned long *map, int on)
 {
 	int i;
@@ -412,10 +412,10 @@ static int __smmu_client_set_hwgrp(struct smmu_client *c,
 	struct smmu_device *smmu = as->smmu;
 
 	if (!on)
-		map = c->hwgrp;
+		map = c->swgroups;
 
 	for_each_set_bit(i, map, TEGRA_SWGROUP_MAX) {
-		offs = HWGRP_ASID_REG(i);
+		offs = SWGROUPS_ASID_REG(i);
 		val = smmu_read(smmu, offs);
 		if (on) {
 			if (val) {
@@ -425,7 +425,7 @@ static int __smmu_client_set_hwgrp(struct smmu_client *c,
 			}
 
 			val = mask;
-			memcpy(c->hwgrp, map, sizeof(u64));
+			memcpy(c->swgroups, map, sizeof(u64));
 		} else {
 			WARN_ON((val & mask) == mask);
 			val &= ~mask;
@@ -438,7 +438,7 @@ skip:
 	return 0;
 }
 
-static int smmu_client_set_hwgrp(struct smmu_client *c,
+static int smmu_client_set_swgroups(struct smmu_client *c,
 				 unsigned long *map, int on)
 {
 	int err;
@@ -447,7 +447,7 @@ static int smmu_client_set_hwgrp(struct smmu_client *c,
 	struct smmu_device *smmu = as->smmu;
 
 	spin_lock_irqsave(&smmu->lock, flags);
-	err = __smmu_client_set_hwgrp(c, map, on);
+	err = __smmu_client_set_swgroups(c, map, on);
 	spin_unlock_irqrestore(&smmu->lock, flags);
 	return err;
 }
@@ -487,7 +487,7 @@ static int smmu_setup_regs(struct smmu_device *smmu)
 		smmu_write(smmu, val, SMMU_PTB_DATA);
 
 		list_for_each_entry(c, &as->client, list)
-			__smmu_client_set_hwgrp(c, c->hwgrp, 1);
+			__smmu_client_set_swgroups(c, c->swgroups, 1);
 	}
 
 	smmu_write(smmu, smmu->translation_enable_0, SMMU_TRANSLATION_ENABLE_0);
@@ -815,7 +815,7 @@ static int smmu_iommu_attach_dev(struct iommu_domain *domain,
 		return -ENOMEM;
 
 	client->as = as;
-	err = smmu_client_enable_hwgrp(client, client->hwgrp);
+	err = smmu_client_enable_swgroups(client, client->swgroups);
 	if (err)
 		return -EINVAL;
 
@@ -835,7 +835,7 @@ static int smmu_iommu_attach_dev(struct iommu_domain *domain,
 	 * Reserve "page zero" for AVP vectors using a common dummy
 	 * page.
 	 */
-	if (test_bit(TEGRA_SWGROUP_AVPC, client->hwgrp)) {
+	if (test_bit(TEGRA_SWGROUP_AVPC, client->swgroups)) {
 		struct page *page;
 
 		page = as->smmu->avp_vector_page;
@@ -848,7 +848,7 @@ static int smmu_iommu_attach_dev(struct iommu_domain *domain,
 	return 0;
 
 err_client:
-	smmu_client_disable_hwgrp(client);
+	smmu_client_disable_swgroups(client);
 	spin_unlock(&as->client_lock);
 	return err;
 }
@@ -864,7 +864,7 @@ static void smmu_iommu_detach_dev(struct iommu_domain *domain,
 
 	list_for_each_entry(c, &as->client, list) {
 		if (c->dev == dev) {
-			smmu_client_disable_hwgrp(c);
+			smmu_client_disable_swgroups(c);
 			list_del(&c->list);
 			c->as = NULL;
 			dev_dbg(smmu->dev,
-- 
1.8.1.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ