[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230712163355.3177511-6-jacob.jun.pan@linux.intel.com>
Date: Wed, 12 Jul 2023 09:33:53 -0700
From: Jacob Pan <jacob.jun.pan@...ux.intel.com>
To: LKML <linux-kernel@...r.kernel.org>, iommu@...ts.linux.dev,
"Lu Baolu" <baolu.lu@...ux.intel.com>,
Joerg Roedel <joro@...tes.org>,
Jean-Philippe Brucker <jean-philippe@...aro.com>,
"Robin Murphy" <robin.murphy@....com>
Cc: Jason Gunthorpe <jgg@...dia.com>, "Will Deacon" <will@...nel.org>,
"Tian, Kevin" <kevin.tian@...el.com>, Yi Liu <yi.l.liu@...el.com>,
"Yu, Fenghua" <fenghua.yu@...el.com>,
Tony Luck <tony.luck@...el.com>,
Jacob Pan <jacob.jun.pan@...ux.intel.com>
Subject: [PATCH v10 5/7] iommu/vt-d: Make prq draining code generic
From: Lu Baolu <baolu.lu@...ux.intel.com>
Currently draining page requests and responses for a pasid is part of SVA
implementation. This is because the driver only supports attaching an SVA
domain to a device pasid. As we are about to support attaching other types
of domains to a device pasid, the prq draining code becomes generic.
Reviewed-by: Jacob Pan <jacob.jun.pan@...ux.intel.com>
Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
---
drivers/iommu/intel/iommu.c | 30 +++++++++++++++++++-----------
drivers/iommu/intel/iommu.h | 2 ++
drivers/iommu/intel/svm.c | 8 ++------
3 files changed, 23 insertions(+), 17 deletions(-)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 40685cbfaf0e..4b54a56831b4 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -4733,21 +4733,29 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
struct iommu_domain *domain;
- /* Domain type specific cleanup: */
domain = iommu_get_domain_for_dev_pasid(dev, pasid, 0);
- if (domain) {
- switch (domain->type) {
- case IOMMU_DOMAIN_SVA:
- intel_svm_remove_dev_pasid(dev, pasid);
- break;
- default:
- /* should never reach here */
- WARN_ON(1);
- break;
- }
+ if (!domain)
+ goto out_tear_down;
+
+ /*
+ * The SVA implementation needs to handle its own stuffs like the mm
+ * notification. Before consolidating that code into iommu core, let
+ * the intel sva code handle it.
+ */
+ if (domain->type == IOMMU_DOMAIN_SVA) {
+ intel_svm_remove_dev_pasid(dev, pasid);
+ goto out_tear_down;
}
+ /*
+ * Should never reach here until we add support for attaching
+ * non-SVA domain to a pasid.
+ */
+ WARN_ON(1);
+
+out_tear_down:
intel_pasid_tear_down_entry(iommu, dev, pasid, false);
+ intel_drain_pasid_prq(dev, pasid);
}
const struct iommu_ops intel_iommu_ops = {
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 1c5e1d88862b..6d94a29f5d52 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -844,6 +844,7 @@ int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
struct iommu_page_response *msg);
struct iommu_domain *intel_svm_domain_alloc(void);
void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid);
+void intel_drain_pasid_prq(struct device *dev, u32 pasid);
struct intel_svm_dev {
struct list_head list;
@@ -862,6 +863,7 @@ struct intel_svm {
};
#else
static inline void intel_svm_check(struct intel_iommu *iommu) {}
+static inline void intel_drain_pasid_prq(struct device *dev, u32 pasid) {}
static inline struct iommu_domain *intel_svm_domain_alloc(void)
{
return NULL;
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 2a82864e9d57..588367a9e9b5 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -26,8 +26,6 @@
#include "trace.h"
static irqreturn_t prq_event_thread(int irq, void *d);
-static void intel_svm_drain_prq(struct device *dev, u32 pasid);
-#define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva)
static DEFINE_XARRAY_ALLOC(pasid_private_array);
static int pasid_private_add(ioasid_t pasid, void *priv)
@@ -391,8 +389,6 @@ void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid)
* large and has to be physically contiguous. So it's
* hard to be as defensive as we might like.
*/
- intel_pasid_tear_down_entry(iommu, dev, svm->pasid, false);
- intel_svm_drain_prq(dev, svm->pasid);
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
@@ -449,7 +445,7 @@ static bool is_canonical_address(u64 addr)
}
/**
- * intel_svm_drain_prq - Drain page requests and responses for a pasid
+ * intel_drain_pasid_prq - Drain page requests and responses for a pasid
* @dev: target device
* @pasid: pasid for draining
*
@@ -463,7 +459,7 @@ static bool is_canonical_address(u64 addr)
* described in VT-d spec CH7.10 to drain all page requests and page
* responses pending in the hardware.
*/
-static void intel_svm_drain_prq(struct device *dev, u32 pasid)
+void intel_drain_pasid_prq(struct device *dev, u32 pasid)
{
struct device_domain_info *info;
struct dmar_domain *domain;
--
2.25.1
Powered by blists - more mailing lists