[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220817012024.3251276-8-baolu.lu@linux.intel.com>
Date: Wed, 17 Aug 2022 09:20:18 +0800
From: Lu Baolu <baolu.lu@...ux.intel.com>
To: Joerg Roedel <joro@...tes.org>, Jason Gunthorpe <jgg@...dia.com>,
Christoph Hellwig <hch@...radead.org>,
Bjorn Helgaas <bhelgaas@...gle.com>,
Kevin Tian <kevin.tian@...el.com>,
Ashok Raj <ashok.raj@...el.com>, Will Deacon <will@...nel.org>,
Robin Murphy <robin.murphy@....com>,
Jean-Philippe Brucker <jean-philippe@...aro.com>,
Dave Jiang <dave.jiang@...el.com>,
Vinod Koul <vkoul@...nel.org>
Cc: Eric Auger <eric.auger@...hat.com>, Liu Yi L <yi.l.liu@...el.com>,
Jacob jun Pan <jacob.jun.pan@...el.com>,
Zhangfei Gao <zhangfei.gao@...aro.org>,
Zhu Tony <tony.zhu@...el.com>, iommu@...ts.linux.dev,
linux-pci@...r.kernel.org, linux-kernel@...r.kernel.org,
Lu Baolu <baolu.lu@...ux.intel.com>
Subject: [PATCH v11 07/13] iommu/vt-d: Add SVA domain support
Add support for SVA domain allocation and provide an SVA-specific
iommu_domain_ops. This implementation is based on the existing SVA
code. Possible cleanup and refactoring are left for incremental
changes later.
Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
Reviewed-by: Kevin Tian <kevin.tian@...el.com>
Reviewed-by: Jason Gunthorpe <jgg@...dia.com>
Reviewed-by: Yi Liu <yi.l.liu@...el.com>
---
drivers/iommu/intel/iommu.h | 5 ++++
drivers/iommu/intel/iommu.c | 2 ++
drivers/iommu/intel/svm.c | 50 +++++++++++++++++++++++++++++++++++++
3 files changed, 57 insertions(+)
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index a9b8367c9361..4875c9974abd 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -747,6 +747,7 @@ void intel_svm_unbind(struct iommu_sva *handle);
u32 intel_svm_get_pasid(struct iommu_sva *handle);
int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
struct iommu_page_response *msg);
+struct iommu_domain *intel_svm_domain_alloc(void);
struct intel_svm_dev {
struct list_head list;
@@ -772,6 +773,10 @@ struct intel_svm {
};
#else
static inline void intel_svm_check(struct intel_iommu *iommu) {}
+static inline struct iommu_domain *intel_svm_domain_alloc(void)
+{
+ return NULL;
+}
#endif
#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 7cca030a508e..27c9fd6139a8 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -4149,6 +4149,8 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
return domain;
case IOMMU_DOMAIN_IDENTITY:
return &si_domain->domain;
+ case IOMMU_DOMAIN_SVA:
+ return intel_svm_domain_alloc();
default:
return NULL;
}
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 2420fa5c2360..16a4d413fce4 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -928,3 +928,53 @@ int intel_svm_page_response(struct device *dev,
mutex_unlock(&pasid_mutex);
return ret;
}
+
+static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct iommu_sva *sva;
+ int ret = 0;
+
+ mutex_lock(&pasid_mutex);
+ /*
+ * Detach the domain if a blocking domain is set. Check the
+ * right domain type once the IOMMU driver supports a real
+ * blocking domain.
+ */
+ if (!domain || domain->type == IOMMU_DOMAIN_UNMANAGED) {
+ intel_svm_unbind_mm(dev, pasid);
+ } else {
+ struct mm_struct *mm = domain->mm;
+
+ sva = intel_svm_bind_mm(iommu, dev, mm);
+ if (IS_ERR(sva))
+ ret = PTR_ERR(sva);
+ }
+ mutex_unlock(&pasid_mutex);
+
+ return ret;
+}
+
+static void intel_svm_domain_free(struct iommu_domain *domain)
+{
+ kfree(to_dmar_domain(domain));
+}
+
+static const struct iommu_domain_ops intel_svm_domain_ops = {
+ .set_dev_pasid = intel_svm_set_dev_pasid,
+ .free = intel_svm_domain_free,
+};
+
+struct iommu_domain *intel_svm_domain_alloc(void)
+{
+ struct dmar_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return NULL;
+ domain->domain.ops = &intel_svm_domain_ops;
+
+ return &domain->domain;
+}
--
2.25.1
Powered by blists - more mailing lists