[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220329053800.3049561-11-baolu.lu@linux.intel.com>
Date: Tue, 29 Mar 2022 13:37:59 +0800
From: Lu Baolu <baolu.lu@...ux.intel.com>
To: Joerg Roedel <joro@...tes.org>, Jason Gunthorpe <jgg@...dia.com>,
Christoph Hellwig <hch@...radead.org>,
Kevin Tian <kevin.tian@...el.com>,
Ashok Raj <ashok.raj@...el.com>, Will Deacon <will@...nel.org>,
Robin Murphy <robin.murphy@....com>,
Jean-Philippe Brucker <jean-philippe@...aro.com>
Cc: Eric Auger <eric.auger@...hat.com>, Liu Yi L <yi.l.liu@...el.com>,
Jacob jun Pan <jacob.jun.pan@...el.com>,
iommu@...ts.linux-foundation.org, linux-kernel@...r.kernel.org,
Lu Baolu <baolu.lu@...ux.intel.com>
Subject: [PATCH RFC v2 10/11] iommu: Per-domain I/O page fault handling
Tweak the I/O page fault handling framework to route the page faults to
the domain and call the page fault handler retrieved from the domain.
This makes the I/O page fault handling framework possible to serve more
usage scenarios as long as they have an IOMMU domain and install a page
fault handler in it. This also refactors the SVA implementation to be
the first consumer of the per-domain page fault handling model.
Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
---
include/linux/iommu.h | 4 ++
drivers/iommu/iommu-sva-lib.h | 1 -
drivers/iommu/io-pgfault.c | 70 +++++++---------------------------
drivers/iommu/iommu-sva-lib.c | 71 +++++++++++++++++++++++++++--------
4 files changed, 72 insertions(+), 74 deletions(-)
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 7e30b88d7bef..729d602fcba5 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -51,6 +51,8 @@ struct iommu_sva_cookie;
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
struct device *, unsigned long, int, void *);
typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
+typedef enum iommu_page_response_code (*iommu_domain_iopf_handler_t)
+ (struct iommu_fault *, void *);
struct iommu_domain_geometry {
dma_addr_t aperture_start; /* First address that can be mapped */
@@ -102,6 +104,8 @@ struct iommu_domain {
struct iommu_domain_geometry geometry;
struct iommu_dma_cookie *iova_cookie;
struct iommu_sva_cookie *sva_cookie;
+ iommu_domain_iopf_handler_t fault_handler;
+ void *fault_data;
};
static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
diff --git a/drivers/iommu/iommu-sva-lib.h b/drivers/iommu/iommu-sva-lib.h
index 95d51f748dff..abef83ac1b2d 100644
--- a/drivers/iommu/iommu-sva-lib.h
+++ b/drivers/iommu/iommu-sva-lib.h
@@ -8,7 +8,6 @@
#include <linux/ioasid.h>
#include <linux/mm_types.h>
-struct mm_struct *iommu_sva_find(ioasid_t pasid);
struct mm_struct *iommu_sva_domain_mm(struct iommu_domain *domain);
/* I/O Page fault */
diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
index 1df8c1dcae77..159e4f107fe3 100644
--- a/drivers/iommu/io-pgfault.c
+++ b/drivers/iommu/io-pgfault.c
@@ -69,62 +69,6 @@ static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
return iommu_page_response(dev, &resp);
}
-static enum iommu_page_response_code
-iopf_handle_single(struct iopf_fault *iopf)
-{
- vm_fault_t ret;
- struct mm_struct *mm;
- struct vm_area_struct *vma;
- unsigned int access_flags = 0;
- unsigned int fault_flags = FAULT_FLAG_REMOTE;
- struct iommu_fault_page_request *prm = &iopf->fault.prm;
- enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
-
- if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
- return status;
-
- mm = iommu_sva_find(prm->pasid);
- if (IS_ERR_OR_NULL(mm))
- return status;
-
- mmap_read_lock(mm);
-
- vma = find_extend_vma(mm, prm->addr);
- if (!vma)
- /* Unmapped area */
- goto out_put_mm;
-
- if (prm->perm & IOMMU_FAULT_PERM_READ)
- access_flags |= VM_READ;
-
- if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
- access_flags |= VM_WRITE;
- fault_flags |= FAULT_FLAG_WRITE;
- }
-
- if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
- access_flags |= VM_EXEC;
- fault_flags |= FAULT_FLAG_INSTRUCTION;
- }
-
- if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
- fault_flags |= FAULT_FLAG_USER;
-
- if (access_flags & ~vma->vm_flags)
- /* Access fault */
- goto out_put_mm;
-
- ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
- status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
- IOMMU_PAGE_RESP_SUCCESS;
-
-out_put_mm:
- mmap_read_unlock(mm);
- mmput(mm);
-
- return status;
-}
-
static void iopf_handle_group(struct work_struct *work)
{
struct iopf_group *group;
@@ -134,12 +78,24 @@ static void iopf_handle_group(struct work_struct *work)
group = container_of(work, struct iopf_group, work);
list_for_each_entry_safe(iopf, next, &group->faults, list) {
+ struct iommu_domain *domain;
+
+ domain = iommu_get_domain_for_dev_pasid(group->dev,
+ iopf->fault.prm.pasid);
+
+ if (!domain || !domain->fault_handler)
+ status = IOMMU_PAGE_RESP_INVALID;
+
/*
* For the moment, errors are sticky: don't handle subsequent
* faults in the group if there is an error.
*/
if (status == IOMMU_PAGE_RESP_SUCCESS)
- status = iopf_handle_single(iopf);
+ status = domain->fault_handler(&iopf->fault,
+ domain->fault_data);
+
+ if (domain)
+ iommu_put_domain_for_dev_pasid(domain);
if (!(iopf->fault.prm.flags &
IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
diff --git a/drivers/iommu/iommu-sva-lib.c b/drivers/iommu/iommu-sva-lib.c
index 96e967e58aa9..a53574f9559a 100644
--- a/drivers/iommu/iommu-sva-lib.c
+++ b/drivers/iommu/iommu-sva-lib.c
@@ -60,25 +60,62 @@ static int iommu_sva_alloc_pasid(struct mm_struct *mm,
return ret;
}
-/* ioasid_find getter() requires a void * argument */
-static bool __mmget_not_zero(void *mm)
+static enum iommu_page_response_code
+iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
{
- return mmget_not_zero(mm);
-}
+ vm_fault_t ret;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ unsigned int access_flags = 0;
+ struct iommu_domain *domain = data;
+ unsigned int fault_flags = FAULT_FLAG_REMOTE;
+ struct iommu_fault_page_request *prm = &fault->prm;
+ enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
-/**
- * iommu_sva_find() - Find mm associated to the given PASID
- * @pasid: Process Address Space ID assigned to the mm
- *
- * On success a reference to the mm is taken, and must be released with mmput().
- *
- * Returns the mm corresponding to this PASID, or an error if not found.
- */
-struct mm_struct *iommu_sva_find(ioasid_t pasid)
-{
- return ioasid_find(&iommu_sva_pasid, pasid, __mmget_not_zero);
+ if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
+ return status;
+
+ mm = iommu_sva_domain_mm(domain);
+ if (IS_ERR_OR_NULL(mm) || !mmget_not_zero(mm))
+ return status;
+
+ mmap_read_lock(mm);
+
+ vma = find_extend_vma(mm, prm->addr);
+ if (!vma)
+ /* Unmapped area */
+ goto out_put_mm;
+
+ if (prm->perm & IOMMU_FAULT_PERM_READ)
+ access_flags |= VM_READ;
+
+ if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
+ access_flags |= VM_WRITE;
+ fault_flags |= FAULT_FLAG_WRITE;
+ }
+
+ if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
+ access_flags |= VM_EXEC;
+ fault_flags |= FAULT_FLAG_INSTRUCTION;
+ }
+
+ if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
+ fault_flags |= FAULT_FLAG_USER;
+
+ if (access_flags & ~vma->vm_flags)
+ /* Access fault */
+ goto out_put_mm;
+
+ ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
+ status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
+ IOMMU_PAGE_RESP_SUCCESS;
+
+out_put_mm:
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return status;
}
-EXPORT_SYMBOL_GPL(iommu_sva_find);
static struct iommu_domain *
iommu_sva_alloc_domain(struct device *dev, struct mm_struct *mm)
@@ -105,6 +142,8 @@ iommu_sva_alloc_domain(struct device *dev, struct mm_struct *mm)
INIT_LIST_HEAD(&cookie->bonds);
domain->type = IOMMU_DOMAIN_SVA;
domain->sva_cookie = cookie;
+ domain->fault_handler = iommu_sva_handle_iopf;
+ domain->fault_data = domain;
curr = xa_store(&sva_domain_array, mm->pasid, domain, GFP_KERNEL);
if (xa_err(curr))
goto err_xa_store;
--
2.25.1
Powered by blists - more mailing lists