[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210121014505.1659166-3-baolu.lu@linux.intel.com>
Date: Thu, 21 Jan 2021 09:45:04 +0800
From: Lu Baolu <baolu.lu@...ux.intel.com>
To: Joerg Roedel <joro@...tes.org>, Will Deacon <will@...nel.org>
Cc: Ashok Raj <ashok.raj@...el.com>,
Jacob Pan <jacob.jun.pan@...ux.intel.com>,
Kevin Tian <kevin.tian@...el.com>,
Liu Yi L <yi.l.liu@...el.com>,
iommu@...ts.linux-foundation.org, linux-kernel@...r.kernel.org,
Lu Baolu <baolu.lu@...ux.intel.com>
Subject: [PATCH 2/3] iommu/vt-d: Allow devices to have more than 32 outstanding PRQ
The minimum per-IOMMU PRQ queue size is one 4K page, this is more entries
than the hardcoded limit of 32 in the current VT-d code. Some devices can
support up to 512 outstanding PRQs but underutilized by this limit of 32.
Although, 32 gives some rough fairness when multiple devices share the same
IOMMU PRQ queue, but far from optimal for customized use cases.
Signed-off-by: Jacob Pan <jacob.jun.pan@...ux.intel.com>
Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
---
drivers/iommu/intel/iommu.c | 3 ++-
drivers/iommu/intel/svm.c | 4 ----
include/linux/intel-svm.h | 5 +++++
3 files changed, 7 insertions(+), 5 deletions(-)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index be85af612bc1..7dee1cb9d6c1 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -33,6 +33,7 @@
#include <linux/iommu.h>
#include <linux/dma-iommu.h>
#include <linux/intel-iommu.h>
+#include <linux/intel-svm.h>
#include <linux/syscore_ops.h>
#include <linux/tboot.h>
#include <linux/dmi.h>
@@ -1527,7 +1528,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
if (info->pri_supported &&
(info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) &&
- !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
+ !pci_reset_pri(pdev) && !pci_enable_pri(pdev, PRQ_DEPTH))
info->pri_enabled = 1;
#endif
if (info->ats_supported && pci_ats_page_aligned(pdev) &&
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index f49fe715477b..77509a0a863e 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -26,8 +26,6 @@
static irqreturn_t prq_event_thread(int irq, void *d);
static void intel_svm_drain_prq(struct device *dev, u32 pasid);
-#define PRQ_ORDER 0
-
int intel_svm_enable_prq(struct intel_iommu *iommu)
{
struct page *pages;
@@ -722,8 +720,6 @@ struct page_req_dsc {
u64 priv_data[2];
};
-#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
-
static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
{
unsigned long requested = 0;
diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
index 39d368a810b8..5f7b5abefdec 100644
--- a/include/linux/intel-svm.h
+++ b/include/linux/intel-svm.h
@@ -21,6 +21,11 @@ struct svm_dev_ops {
#define SVM_REQ_EXEC (1<<1)
#define SVM_REQ_PRIV (1<<0)
+/* Page Request Queue depth */
+#define PRQ_ORDER 0
+#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
+#define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5)
+
/*
* The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main"
* PASID for the current process. Even if a PASID already exists, a new one
--
2.25.1
Powered by blists - more mailing lists