[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1502459130-6234-5-git-send-email-eric.auger@redhat.com>
Date: Fri, 11 Aug 2017 15:45:30 +0200
From: Eric Auger <eric.auger@...hat.com>
To: eric.auger.pro@...il.com, eric.auger@...hat.com,
iommu@...ts.linux-foundation.org, linux-kernel@...r.kernel.org,
Will.Deacon@....com, robin.murphy@....com,
Jean-Philippe.Brucker@....com
Cc: christoffer.dall@...aro.org, Marc.Zyngier@....com,
alex.williamson@...hat.com, peterx@...hat.com, mst@...hat.com,
tn@...ihalf.com, bharat.bhushan@....com
Subject: [RFC v2 4/4] iommu/arm-smmu-v3: add CMD_TLBI_NH_VA_AM command for iova range invalidation
When using a virtual SMMU and running the driver in TLBI_ON_MAP
mode we need invalidate large IOVA ranges. This typically happens
in DPDK use case where hugepages are used. In that case, invalidating
pages by page is really inefficient and we would need to invalidate
by iova range. Unfortunately there is no such command specified in the
SMMUv3 architecture spec. Let's add a new implementation defined command
that takes an address mask.
The CMD_TLBI_NH_VA_AM command format is inherited from CMD_TLBI_NH_VA's
one, replace the currently unused VMID field by the AM field.
Signed-off-by: Eric Auger <eric.auger@...hat.com>
---
drivers/iommu/arm-smmu-v3.c | 22 ++++++++++++++++++++--
1 file changed, 20 insertions(+), 2 deletions(-)
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index a1c10af..9da2785 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -492,9 +492,15 @@ struct arm_smmu_cmdq_ent {
#define CMDQ_OP_TLBI_S12_VMALL 0x28
#define CMDQ_OP_TLBI_S2_IPA 0x2a
#define CMDQ_OP_TLBI_NSNH_ALL 0x30
+
+ /* vIOMMU ASID/IOVA Range Invalidation */
+ #define CMDQ_OP_TLBI_NH_VA_AM 0x8F
struct {
u16 asid;
- u16 vmid;
+ union {
+ u16 vmid;
+ u16 am; /* address mask */
+ };
bool leaf;
u64 addr;
} tlbi;
@@ -853,6 +859,12 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
break;
+ case CMDQ_OP_TLBI_NH_VA_AM:
+ cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
+ cmd[0] |= (u64)ent->tlbi.am << CMDQ_TLBI_0_VMID_SHIFT;
+ cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
+ cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
+ break;
case CMDQ_OP_TLBI_S2_IPA:
cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
@@ -1402,8 +1414,14 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
};
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
- cmd.opcode = CMDQ_OP_TLBI_NH_VA;
cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
+ if (smmu->options & ARM_SMMU_OPT_TLBI_ON_MAP) {
+ cmd.opcode = CMDQ_OP_TLBI_NH_VA_AM;
+ cmd.tlbi.am = size >> 12;
+ granule = size;
+ } else {
+ cmd.opcode = CMDQ_OP_TLBI_NH_VA;
+ }
} else {
cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
--
2.5.5
Powered by blists - more mailing lists