[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210223205634.604221-4-eric.auger@redhat.com>
Date: Tue, 23 Feb 2021 21:56:24 +0100
From: Eric Auger <eric.auger@...hat.com>
To: eric.auger.pro@...il.com, eric.auger@...hat.com,
iommu@...ts.linux-foundation.org, linux-kernel@...r.kernel.org,
kvm@...r.kernel.org, kvmarm@...ts.cs.columbia.edu, will@...nel.org,
maz@...nel.org, robin.murphy@....com, joro@...tes.org,
alex.williamson@...hat.com, tn@...ihalf.com, zhukeqian1@...wei.com
Cc: jacob.jun.pan@...ux.intel.com, yi.l.liu@...el.com,
wangxingang5@...wei.com, jiangkunkun@...wei.com,
jean-philippe@...aro.org, zhangfei.gao@...aro.org,
zhangfei.gao@...il.com, vivek.gautam@....com,
shameerali.kolothum.thodi@...wei.com, yuzenghui@...wei.com,
nicoleotsuka@...il.com, lushenming@...wei.com, vsethi@...dia.com
Subject: [PATCH v14 03/13] iommu/smmuv3: Allow s1 and s2 configs to coexist
In true nested mode, both s1_cfg and s2_cfg will coexist.
Let's remove the union and add a "set" field in each
config structure telling whether the config is set and needs
to be applied when writing the STE. In legacy nested mode,
only the second stage is used. In true nested mode, both stages
are used and the S1 config is "set" when the guest passes
its pasid table.
No functional change intended.
Signed-off-by: Eric Auger <eric.auger@...hat.com>
---
v13 -> v14:
- slight reword of the commit message
v12 -> v13:
- does not dynamically allocate s1-cfg and s2_cfg anymore. Add
the set field
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 43 +++++++++++++--------
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 8 ++--
2 files changed, 31 insertions(+), 20 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 0f1264a86eec..14e5666c25dc 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1239,8 +1239,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
u64 val = le64_to_cpu(dst[0]);
bool ste_live = false;
struct arm_smmu_device *smmu = NULL;
- struct arm_smmu_s1_cfg *s1_cfg = NULL;
- struct arm_smmu_s2_cfg *s2_cfg = NULL;
+ struct arm_smmu_s1_cfg *s1_cfg;
+ struct arm_smmu_s2_cfg *s2_cfg;
struct arm_smmu_domain *smmu_domain = NULL;
struct arm_smmu_cmdq_ent prefetch_cmd = {
.opcode = CMDQ_OP_PREFETCH_CFG,
@@ -1255,13 +1255,24 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
}
if (smmu_domain) {
+ s1_cfg = &smmu_domain->s1_cfg;
+ s2_cfg = &smmu_domain->s2_cfg;
+
switch (smmu_domain->stage) {
case ARM_SMMU_DOMAIN_S1:
- s1_cfg = &smmu_domain->s1_cfg;
+ s1_cfg->set = true;
+ s2_cfg->set = false;
break;
case ARM_SMMU_DOMAIN_S2:
+ s1_cfg->set = false;
+ s2_cfg->set = true;
+ break;
case ARM_SMMU_DOMAIN_NESTED:
- s2_cfg = &smmu_domain->s2_cfg;
+ /*
+ * Actual usage of stage 1 depends on nested mode:
+ * legacy (2d stage only) or true nested mode
+ */
+ s2_cfg->set = true;
break;
default:
break;
@@ -1288,7 +1299,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
val = STRTAB_STE_0_V;
/* Bypass/fault */
- if (!smmu_domain || !(s1_cfg || s2_cfg)) {
+ if (!smmu_domain || !(s1_cfg->set || s2_cfg->set)) {
if (!smmu_domain && disable_bypass)
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
else
@@ -1307,7 +1318,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
return;
}
- if (s1_cfg) {
+ if (s1_cfg->set) {
u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ?
STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
@@ -1329,7 +1340,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
FIELD_PREP(STRTAB_STE_0_S1FMT, s1_cfg->s1fmt);
}
- if (s2_cfg) {
+ if (s2_cfg->set) {
BUG_ON(ste_live);
dst[2] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
@@ -2016,24 +2027,24 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_s1_cfg *s1_cfg = &smmu_domain->s1_cfg;
+ struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg;
iommu_put_dma_cookie(domain);
free_io_pgtable_ops(smmu_domain->pgtbl_ops);
/* Free the CD and ASID, if we allocated them */
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
- struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
-
+ if (s1_cfg->set) {
/* Prevent SVA from touching the CD while we're freeing it */
mutex_lock(&arm_smmu_asid_lock);
- if (cfg->cdcfg.cdtab)
+ if (s1_cfg->cdcfg.cdtab)
arm_smmu_free_cd_tables(smmu_domain);
- arm_smmu_free_asid(&cfg->cd);
+ arm_smmu_free_asid(&s1_cfg->cd);
mutex_unlock(&arm_smmu_asid_lock);
- } else {
- struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
- if (cfg->vmid)
- arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
+ }
+ if (s2_cfg->set) {
+ if (s2_cfg->vmid)
+ arm_smmu_bitmap_free(smmu->vmid_map, s2_cfg->vmid);
}
kfree(smmu_domain);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 59af0bbd2f7b..ec2b77596b6a 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -598,12 +598,14 @@ struct arm_smmu_s1_cfg {
struct arm_smmu_ctx_desc cd;
u8 s1fmt;
u8 s1cdmax;
+ bool set;
};
struct arm_smmu_s2_cfg {
u16 vmid;
u64 vttbr;
u64 vtcr;
+ bool set;
};
struct arm_smmu_strtab_cfg {
@@ -718,10 +720,8 @@ struct arm_smmu_domain {
atomic_t nr_ats_masters;
enum arm_smmu_domain_stage stage;
- union {
- struct arm_smmu_s1_cfg s1_cfg;
- struct arm_smmu_s2_cfg s2_cfg;
- };
+ struct arm_smmu_s1_cfg s1_cfg;
+ struct arm_smmu_s2_cfg s2_cfg;
struct iommu_domain domain;
--
2.26.2
Powered by blists - more mailing lists