[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CA+G9fYtj1RBYcPhXZRm-qm5ygtdLj1jD8vFZSqQvwi_DNJLBwQ@mail.gmail.com>
Date: Tue, 21 Jul 2020 00:45:17 +0530
From: Naresh Kamboju <naresh.kamboju@...aro.org>
To: Rob Clark <robdclark@...il.com>
Cc: iommu@...ts.linux-foundation.org,
linux-arm-msm <linux-arm-msm@...r.kernel.org>,
Robin Murphy <robin.murphy@....com>,
Rob Clark <robdclark@...omium.org>,
Andy Gross <agross@...nel.org>,
Bjorn Andersson <bjorn.andersson@...aro.org>,
Joerg Roedel <joro@...tes.org>,
open list <linux-kernel@...r.kernel.org>,
Arnd Bergmann <arnd@...db.de>, lkft-triage@...ts.linaro.org
Subject: Re: [PATCH] iommu/qcom: Use domain rather than dev as tlb cookie
On Mon, 20 Jul 2020 at 21:21, Rob Clark <robdclark@...il.com> wrote:
>
> From: Rob Clark <robdclark@...omium.org>
>
> The device may be torn down, but the domain should still be valid. Lets
> use that as the tlb flush ops cookie.
>
> Fixes a problem reported in [1]
This proposed fix patch applied on top of linux mainline master
and boot test PASS on db410c.
The reported problem got fixed.
>
> [1] https://lkml.org/lkml/2020/7/20/104
>
> Signed-off-by: Rob Clark <robdclark@...omium.org>
Reported-by: Naresh Kamboju <naresh.kamboju@...aro.org>
Tested-by: Naresh Kamboju <naresh.kamboju@...aro.org>
> ---
> Note I don't have a good setup to test this atm, but I think it should
> work.
>
> drivers/iommu/qcom_iommu.c | 37 +++++++++++++++++--------------------
> 1 file changed, 17 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
> index c3e1fbd1988c..d176df569af8 100644
> --- a/drivers/iommu/qcom_iommu.c
> +++ b/drivers/iommu/qcom_iommu.c
> @@ -65,6 +65,7 @@ struct qcom_iommu_domain {
> struct mutex init_mutex; /* Protects iommu pointer */
> struct iommu_domain domain;
> struct qcom_iommu_dev *iommu;
> + struct iommu_fwspec *fwspec;
> };
>
> static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
> @@ -84,9 +85,9 @@ static struct qcom_iommu_dev * to_iommu(struct device *dev)
> return dev_iommu_priv_get(dev);
> }
>
> -static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid)
> +static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid)
> {
> - struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
> + struct qcom_iommu_dev *qcom_iommu = d->iommu;
> if (!qcom_iommu)
> return NULL;
> return qcom_iommu->ctxs[asid - 1];
> @@ -118,14 +119,12 @@ iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
>
> static void qcom_iommu_tlb_sync(void *cookie)
> {
> - struct iommu_fwspec *fwspec;
> - struct device *dev = cookie;
> + struct qcom_iommu_domain *qcom_domain = cookie;
> + struct iommu_fwspec *fwspec = qcom_domain->fwspec;
> unsigned i;
>
> - fwspec = dev_iommu_fwspec_get(dev);
> -
> for (i = 0; i < fwspec->num_ids; i++) {
> - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
> + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
> unsigned int val, ret;
>
> iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
> @@ -139,14 +138,12 @@ static void qcom_iommu_tlb_sync(void *cookie)
>
> static void qcom_iommu_tlb_inv_context(void *cookie)
> {
> - struct device *dev = cookie;
> - struct iommu_fwspec *fwspec;
> + struct qcom_iommu_domain *qcom_domain = cookie;
> + struct iommu_fwspec *fwspec = qcom_domain->fwspec;
> unsigned i;
>
> - fwspec = dev_iommu_fwspec_get(dev);
> -
> for (i = 0; i < fwspec->num_ids; i++) {
> - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
> + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
> iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
> }
>
> @@ -156,16 +153,14 @@ static void qcom_iommu_tlb_inv_context(void *cookie)
> static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
> size_t granule, bool leaf, void *cookie)
> {
> - struct device *dev = cookie;
> - struct iommu_fwspec *fwspec;
> + struct qcom_iommu_domain *qcom_domain = cookie;
> + struct iommu_fwspec *fwspec = qcom_domain->fwspec;
> unsigned i, reg;
>
> reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
>
> - fwspec = dev_iommu_fwspec_get(dev);
> -
> for (i = 0; i < fwspec->num_ids; i++) {
> - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
> + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
> size_t s = size;
>
> iova = (iova >> 12) << 12;
> @@ -256,7 +251,9 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
> };
>
> qcom_domain->iommu = qcom_iommu;
> - pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev);
> + qcom_domain->fwspec = fwspec;
> +
> + pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, qcom_domain);
> if (!pgtbl_ops) {
> dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
> ret = -ENOMEM;
> @@ -269,7 +266,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
> domain->geometry.force_aperture = true;
>
> for (i = 0; i < fwspec->num_ids; i++) {
> - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
> + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
>
> if (!ctx->secure_init) {
> ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
> @@ -419,7 +416,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
>
> pm_runtime_get_sync(qcom_iommu->dev);
> for (i = 0; i < fwspec->num_ids; i++) {
> - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
> + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
>
> /* Disable the context bank: */
> iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
--
Linaro LKFT
https://lkft.linaro.org
Powered by blists - more mailing lists