[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190711114959.15675-9-eric.auger@redhat.com>
Date: Thu, 11 Jul 2019 13:49:53 +0200
From: Eric Auger <eric.auger@...hat.com>
To: eric.auger.pro@...il.com, eric.auger@...hat.com,
iommu@...ts.linux-foundation.org, linux-kernel@...r.kernel.org,
kvm@...r.kernel.org, kvmarm@...ts.cs.columbia.edu, joro@...tes.org,
alex.williamson@...hat.com, jacob.jun.pan@...ux.intel.com,
yi.l.liu@...el.com, jean-philippe.brucker@....com,
will.deacon@....com, robin.murphy@....com
Cc: kevin.tian@...el.com, ashok.raj@...el.com, marc.zyngier@....com,
peter.maydell@...aro.org, vincent.stehle@....com,
zhangfei.gao@...il.com
Subject: [PATCH v9 08/14] iommu/smmuv3: Introduce __arm_smmu_tlb_inv_asid/s1_range_nosync
Introduce helpers to invalidate a given asid/vmid or invalidate
address ranges associated to a given asid/vmid.
S1 helpers will be used to invalidate stage 1 caches upon
userspace request, in nested mode.
Signed-off-by: Eric Auger <eric.auger@...hat.com>
---
---
drivers/iommu/arm-smmu-v3.c | 98 ++++++++++++++++++++++++++++---------
1 file changed, 74 insertions(+), 24 deletions(-)
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 8785f26e669c..8b3c35ea58b2 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1611,20 +1611,15 @@ static void arm_smmu_tlb_sync(void *cookie)
arm_smmu_cmdq_issue_sync(smmu_domain->smmu);
}
-static void arm_smmu_tlb_inv_context(void *cookie)
+static void __arm_smmu_tlb_inv_asid(struct arm_smmu_domain *smmu_domain,
+ u16 vmid, u16 asid)
{
- struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
- struct arm_smmu_cmdq_ent cmd;
+ struct arm_smmu_cmdq_ent cmd = {};
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
- cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
- cmd.tlbi.asid = smmu_domain->s1_cfg->cd.asid;
- cmd.tlbi.vmid = 0;
- } else {
- cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
- cmd.tlbi.vmid = smmu_domain->s2_cfg->vmid;
- }
+ cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
+ cmd.tlbi.vmid = vmid;
+ cmd.tlbi.asid = asid;
/*
* NOTE: when io-pgtable is in non-strict mode, we may get here with
@@ -1636,32 +1631,87 @@ static void arm_smmu_tlb_inv_context(void *cookie)
arm_smmu_cmdq_issue_sync(smmu);
}
-static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+static void __arm_smmu_tlb_inv_vmid(struct arm_smmu_domain *smmu_domain,
+ u16 vmid)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_cmdq_ent cmd = {};
+
+ cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
+ cmd.tlbi.vmid = vmid;
+
+ /* See DSB related comment in __arm_smmu_tlb_inv_asid */
+ arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ arm_smmu_cmdq_issue_sync(smmu);
+}
+
+static void arm_smmu_tlb_inv_context(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- struct arm_smmu_cmdq_ent cmd = {
- .tlbi = {
- .leaf = leaf,
- .addr = iova,
- },
- };
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
- cmd.opcode = CMDQ_OP_TLBI_NH_VA;
- cmd.tlbi.asid = smmu_domain->s1_cfg->cd.asid;
+ __arm_smmu_tlb_inv_asid(smmu_domain, 0,
+ smmu_domain->s1_cfg->cd.asid);
} else {
- cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
- cmd.tlbi.vmid = smmu_domain->s2_cfg->vmid;
+ __arm_smmu_tlb_inv_vmid(smmu_domain,
+ smmu_domain->s2_cfg->vmid);
}
+}
+static void
+__arm_smmu_tlb_inv_s1_range_nosync(struct arm_smmu_domain *smmu_domain,
+ u16 vmid, u16 asid, unsigned long iova,
+ size_t size, size_t granule, bool leaf)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_cmdq_ent cmd = {};
+
+ cmd.opcode = CMDQ_OP_TLBI_NH_VA;
+ cmd.tlbi.vmid = vmid;
+ cmd.tlbi.asid = asid;
+ cmd.tlbi.addr = iova;
+ cmd.tlbi.leaf = leaf;
do {
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
cmd.tlbi.addr += granule;
} while (size -= granule);
}
+static void
+__arm_smmu_tlb_inv_s2_range_nosync(struct arm_smmu_domain *smmu_domain,
+ u16 vmid, unsigned long iova, size_t size,
+ size_t granule, bool leaf)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_cmdq_ent cmd = {};
+
+ cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
+ cmd.tlbi.vmid = vmid;
+ cmd.tlbi.addr = iova;
+ cmd.tlbi.leaf = leaf;
+ do {
+ arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ cmd.tlbi.addr += granule;
+ } while (size -= granule);
+}
+
+static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
+ size_t granule, bool leaf,
+ void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ __arm_smmu_tlb_inv_s1_range_nosync(smmu_domain, 0,
+ smmu_domain->s1_cfg->cd.asid,
+ iova, size, granule, leaf);
+ } else {
+ __arm_smmu_tlb_inv_s2_range_nosync(smmu_domain,
+ smmu_domain->s2_cfg->vmid,
+ iova, size, granule, leaf);
+ }
+}
+
static const struct iommu_gather_ops arm_smmu_gather_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
--
2.20.1
Powered by blists - more mailing lists