[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAF6AEGuF_fC4=vBKr24HogE-d3KkXUQivOpVde9iqf+RvRzNtA@mail.gmail.com>
Date: Sun, 26 Jul 2020 10:03:07 -0700
From: Rob Clark <robdclark@...il.com>
To: Jordan Crouse <jcrouse@...eaurora.org>
Cc: linux-arm-msm <linux-arm-msm@...r.kernel.org>,
Sai Prakash Ranjan <saiprakash.ranjan@...eaurora.org>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
freedreno <freedreno@...ts.freedesktop.org>,
Joerg Roedel <joro@...tes.org>,
Robin Murphy <robin.murphy@....com>,
Bjorn Andersson <bjorn.andersson@...aro.org>,
"list@....net:IOMMU DRIVERS <iommu@...ts.linux-foundation.org>, Joerg
Roedel <joro@...tes.org>," <iommu@...ts.linux-foundation.org>,
Will Deacon <will@...nel.org>,
"moderated list:ARM/FREESCALE IMX / MXC ARM ARCHITECTURE"
<linux-arm-kernel@...ts.infradead.org>
Subject: Re: [Freedreno] [PATCH v10 06/13] iommu/arm-smmu-qcom: Get and set
the pagetable config for split pagetables
On Mon, Jul 20, 2020 at 8:41 AM Jordan Crouse <jcrouse@...eaurora.org> wrote:
>
> The Adreno GPU has the capability to manage its own pagetables and switch
> them dynamically from the hardware. To do this the GPU uses TTBR1 for
> "global" GPU memory and creates local pagetables for each context and
> switches them dynamically with the GPU.
>
> Use DOMAIN_ATTR_PGTABLE_CFG to get the current configuration for the
> TTBR1 pagetable from the smmu driver so the leaf driver can create
> compatible pagetables for use with TTBR0.
>
> Because TTBR0 is disabled by default when TTBR1 is enabled the GPU
> driver can pass the configuration of one of the newly created pagetables
> back through DOMAIN_ATTR_PGTABLE_CFG as a trigger to enable translation on
> TTBR0.
>
> Signed-off-by: Jordan Crouse <jcrouse@...eaurora.org>
> ---
>
> drivers/iommu/arm-smmu-qcom.c | 47 +++++++++++++++++++++++++++++++++++
> drivers/iommu/arm-smmu.c | 32 ++++++++++++++++++------
> drivers/iommu/arm-smmu.h | 10 ++++++++
> 3 files changed, 81 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/iommu/arm-smmu-qcom.c b/drivers/iommu/arm-smmu-qcom.c
> index b9a5c5369e86..9a0c64ca9cb6 100644
> --- a/drivers/iommu/arm-smmu-qcom.c
> +++ b/drivers/iommu/arm-smmu-qcom.c
> @@ -34,6 +34,52 @@ static bool qcom_adreno_smmu_is_gpu_device(struct device *dev)
> return false;
> }
>
> +/*
> + * Local implementation to configure TTBR0 wil the specified pagetable config.
> + * The GPU driver will call this to enable TTBR0 when per-instance pagetables
> + * are active
> + */
> +static int qcom_adreno_smmu_set_pgtable_cfg(struct arm_smmu_domain *smmu_domain,
> + struct io_pgtable_cfg *pgtbl_cfg)
> +{
> + struct io_pgtable *pgtable = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
> + struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
> + struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
> +
> + /* The domain must have split pagetables already enabled */
> + if (cb->tcr[0] & ARM_SMMU_TCR_EPD1)
> + return -EINVAL;
> +
> + /* If the pagetable config is NULL, disable TTBR0 */
> + if (!pgtbl_cfg) {
> + /* Do nothing if it is already disabled */
> + if ((cb->tcr[0] & ARM_SMMU_TCR_EPD0))
> + return -EINVAL;
> +
> + /* Set TCR to the original configuration */
> + cb->tcr[0] = arm_smmu_lpae_tcr(&pgtable->cfg);
> + cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
> + } else {
> + u32 tcr = cb->tcr[0];
> +
> + /* FIXME: What sort of validation do we need to do here? */
> +
> + /* Don't call this again if TTBR0 is already enabled */
> + if (!(cb->tcr[0] & ARM_SMMU_TCR_EPD0))
> + return -EINVAL;
> +
> + tcr |= arm_smmu_lpae_tcr(pgtbl_cfg);
> + tcr &= ~(ARM_SMMU_TCR_EPD0 | ARM_SMMU_TCR_EPD1);
> +
> + cb->tcr[0] = tcr;
> + cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
> + cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
> + }
> +
> + arm_smmu_write_context_bank(smmu_domain->smmu, cb->cfg->cbndx);
> + return 0;
> +}
> +
> static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
> struct device *dev, int start, int count)
> {
> @@ -131,6 +177,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_impl = {
> .def_domain_type = qcom_smmu_def_domain_type,
> .reset = qcom_smmu500_reset,
> .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
> + .set_pgtable_cfg = qcom_adreno_smmu_set_pgtable_cfg,
> };
>
> static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
> diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
> index fff536a44faa..e1036ae54a8d 100644
> --- a/drivers/iommu/arm-smmu.c
> +++ b/drivers/iommu/arm-smmu.c
> @@ -86,13 +86,6 @@ struct arm_smmu_smr {
> bool valid;
> };
>
> -struct arm_smmu_cb {
> - u64 ttbr[2];
> - u32 tcr[2];
> - u32 mair[2];
> - struct arm_smmu_cfg *cfg;
> -};
> -
> static bool using_legacy_binding, using_generic_binding;
>
> static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
> @@ -558,7 +551,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
> }
> }
>
> -static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
> +void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
> {
> u32 reg;
> bool stage1;
> @@ -1515,6 +1508,18 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
> case DOMAIN_ATTR_NESTING:
> *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
> return 0;
> + case DOMAIN_ATTR_PGTABLE_CFG: {
> + struct io_pgtable *pgtable;
> + struct io_pgtable_cfg *dest = data;
> +
> + if (!smmu_domain->pgtbl_ops)
> + return -ENODEV;
> +
> + pgtable = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
> +
> + memcpy(dest, &pgtable->cfg, sizeof(*dest));
> + return 0;
> + }
hmm, maybe it would make sense to have impl hooks for get/set_attr, so
we could handle DOMAIN_ATTR_PGTABLE_CFG inside the adreno_smmu_impl?
Having impl specific domain attrs would be useful for what I have in
mind to enable stall/resume support, so we can hook in devcoredump to
iova faults (which would be a huge improvement for debugability, right
now iova faults are somewhat harder to debug than needed). My rough
idea was to add DOMAIN_ATTR_RESUME, which could be used with
set_attr() to (1) enable STALL and let drm/msm know whether the iommu
supports it, and (2) resume translation from wq context after
devcoredump snapshot is collected.
BR,
-R
> default:
> return -ENODEV;
> }
> @@ -1555,6 +1560,17 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
> else
> smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
> break;
> + case DOMAIN_ATTR_PGTABLE_CFG: {
> + struct arm_smmu_device *smmu = smmu_domain->smmu;
> +
> + ret = -EPERM;
> +
> + if (smmu)
> + if (smmu->impl && smmu->impl->set_pgtable_cfg)
> + ret = smmu->impl->set_pgtable_cfg(smmu_domain,
> + data);
> + }
> + break;
> default:
> ret = -ENODEV;
> }
> diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h
> index 9f81c1fffe1e..9325fc28d24a 100644
> --- a/drivers/iommu/arm-smmu.h
> +++ b/drivers/iommu/arm-smmu.h
> @@ -328,6 +328,13 @@ struct arm_smmu_cfg {
> };
> #define ARM_SMMU_INVALID_IRPTNDX 0xff
>
> +struct arm_smmu_cb {
> + u64 ttbr[2];
> + u32 tcr[2];
> + u32 mair[2];
> + struct arm_smmu_cfg *cfg;
> +};
> +
> enum arm_smmu_domain_stage {
> ARM_SMMU_DOMAIN_S1 = 0,
> ARM_SMMU_DOMAIN_S2,
> @@ -408,6 +415,8 @@ struct arm_smmu_impl {
> int (*def_domain_type)(struct device *dev);
> int (*alloc_context_bank)(struct arm_smmu_domain *smmu_domain,
> struct device *dev, int start, int max);
> + int (*set_pgtable_cfg)(struct arm_smmu_domain *smmu_domain,
> + struct io_pgtable_cfg *cfg);
> };
>
> #define INVALID_SMENDX -1
> @@ -493,6 +502,7 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu);
> struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu);
> struct arm_smmu_device *qcom_adreno_smmu_impl_init(struct arm_smmu_device *smmu);
>
> +void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx);
> int arm_mmu500_reset(struct arm_smmu_device *smmu);
>
> #endif /* _ARM_SMMU_H */
> --
> 2.25.1
>
> _______________________________________________
> Freedreno mailing list
> Freedreno@...ts.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/freedreno
Powered by blists - more mailing lists