[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <01cf1f27-43dc-fb4d-6755-c34c8cac8ec2@huawei.com>
Date: Mon, 1 Feb 2021 20:35:28 +0800
From: Keqian Zhu <zhukeqian1@...wei.com>
To: Eric Auger <eric.auger@...hat.com>, <eric.auger.pro@...il.com>,
<iommu@...ts.linux-foundation.org>, <linux-kernel@...r.kernel.org>,
<kvm@...r.kernel.org>, <kvmarm@...ts.cs.columbia.edu>,
<will@...nel.org>, <joro@...tes.org>, <maz@...nel.org>,
<robin.murphy@....com>, <alex.williamson@...hat.com>
CC: <jean-philippe@...aro.org>, <jacob.jun.pan@...ux.intel.com>,
<nicoleotsuka@...il.com>, <vivek.gautam@....com>,
<yi.l.liu@...el.com>, <zhangfei.gao@...aro.org>
Subject: Re: [PATCH v13 04/15] iommu/smmuv3: Allow s1 and s2 configs to
coexist
Hi Eric,
On 2020/11/18 19:21, Eric Auger wrote:
> In true nested mode, both s1_cfg and s2_cfg will coexist.
> Let's remove the union and add a "set" field in each
> config structure telling whether the config is set and needs
> to be applied when writing the STE. In legacy nested mode,
> only the 2d stage is used. In true nested mode, the "set" field
nit: s/2d/2nd
> will be set when the guest passes the pasid table.
nit: ... the "set" filed of s1_cfg and s2_cfg will be set ...
>
> Signed-off-by: Eric Auger <eric.auger@...hat.com>
>
> ---
> v12 -> v13:
> - does not dynamically allocate s1-cfg and s2_cfg anymore. Add
> the set field
> ---
> drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 43 +++++++++++++--------
> drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 8 ++--
> 2 files changed, 31 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> index 1e4acc7f3d3c..18ac5af1b284 100644
> --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> @@ -1195,8 +1195,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
> u64 val = le64_to_cpu(dst[0]);
> bool ste_live = false;
> struct arm_smmu_device *smmu = NULL;
> - struct arm_smmu_s1_cfg *s1_cfg = NULL;
> - struct arm_smmu_s2_cfg *s2_cfg = NULL;
> + struct arm_smmu_s1_cfg *s1_cfg;
> + struct arm_smmu_s2_cfg *s2_cfg;
> struct arm_smmu_domain *smmu_domain = NULL;
> struct arm_smmu_cmdq_ent prefetch_cmd = {
> .opcode = CMDQ_OP_PREFETCH_CFG,
> @@ -1211,13 +1211,24 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
> }
>
> if (smmu_domain) {
> + s1_cfg = &smmu_domain->s1_cfg;
> + s2_cfg = &smmu_domain->s2_cfg;
> +
> switch (smmu_domain->stage) {
> case ARM_SMMU_DOMAIN_S1:
> - s1_cfg = &smmu_domain->s1_cfg;
> + s1_cfg->set = true;
> + s2_cfg->set = false;
> break;
> case ARM_SMMU_DOMAIN_S2:
> + s1_cfg->set = false;
> + s2_cfg->set = true;
> + break;
> case ARM_SMMU_DOMAIN_NESTED:
> - s2_cfg = &smmu_domain->s2_cfg;
> + /*
> + * Actual usage of stage 1 depends on nested mode:
> + * legacy (2d stage only) or true nested mode
> + */
> + s2_cfg->set = true;
> break;
> default:
> break;
> @@ -1244,7 +1255,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
> val = STRTAB_STE_0_V;
>
> /* Bypass/fault */
> - if (!smmu_domain || !(s1_cfg || s2_cfg)) {
> + if (!smmu_domain || !(s1_cfg->set || s2_cfg->set)) {
> if (!smmu_domain && disable_bypass)
> val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
> else
> @@ -1263,7 +1274,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
> return;
> }
>
> - if (s1_cfg) {
> + if (s1_cfg->set) {
> BUG_ON(ste_live);
> dst[1] = cpu_to_le64(
> FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
> @@ -1282,7 +1293,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
> FIELD_PREP(STRTAB_STE_0_S1FMT, s1_cfg->s1fmt);
> }
>
> - if (s2_cfg) {
> + if (s2_cfg->set) {
> BUG_ON(ste_live);
> dst[2] = cpu_to_le64(
> FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
> @@ -1846,24 +1857,24 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
> {
> struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
> struct arm_smmu_device *smmu = smmu_domain->smmu;
> + struct arm_smmu_s1_cfg *s1_cfg = &smmu_domain->s1_cfg;
> + struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg;
>
> iommu_put_dma_cookie(domain);
> free_io_pgtable_ops(smmu_domain->pgtbl_ops);
>
> /* Free the CD and ASID, if we allocated them */
> - if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
> - struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
> -
> + if (s1_cfg->set) {
> /* Prevent SVA from touching the CD while we're freeing it */
> mutex_lock(&arm_smmu_asid_lock);
> - if (cfg->cdcfg.cdtab)
> + if (s1_cfg->cdcfg.cdtab)
> arm_smmu_free_cd_tables(smmu_domain);
> - arm_smmu_free_asid(&cfg->cd);
> + arm_smmu_free_asid(&s1_cfg->cd);
> mutex_unlock(&arm_smmu_asid_lock);
> - } else {
> - struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
> - if (cfg->vmid)
> - arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
> + }
> + if (s2_cfg->set) {
> + if (s2_cfg->vmid)
> + arm_smmu_bitmap_free(smmu->vmid_map, s2_cfg->vmid);
> }
>
> kfree(smmu_domain);
> diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
> index 19196eea7c1d..07f59252dd21 100644
> --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
> +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
> @@ -562,12 +562,14 @@ struct arm_smmu_s1_cfg {
> struct arm_smmu_ctx_desc cd;
> u8 s1fmt;
> u8 s1cdmax;
> + bool set;
> };
>
> struct arm_smmu_s2_cfg {
> u16 vmid;
> u64 vttbr;
> u64 vtcr;
> + bool set;
> };
>
> struct arm_smmu_strtab_cfg {
> @@ -678,10 +680,8 @@ struct arm_smmu_domain {
> atomic_t nr_ats_masters;
>
> enum arm_smmu_domain_stage stage;
> - union {
> - struct arm_smmu_s1_cfg s1_cfg;
> - struct arm_smmu_s2_cfg s2_cfg;
> - };
> + struct arm_smmu_s1_cfg s1_cfg;
> + struct arm_smmu_s2_cfg s2_cfg;
>
> struct iommu_domain domain;
>
Other looks good to me. ;-)
>
Thanks,
Keqian
Powered by blists - more mailing lists