[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1632256181-36071-6-git-send-email-jacob.jun.pan@linux.intel.com>
Date: Tue, 21 Sep 2021 13:29:39 -0700
From: Jacob Pan <jacob.jun.pan@...ux.intel.com>
To: iommu@...ts.linux-foundation.org,
LKML <linux-kernel@...r.kernel.org>,
Joerg Roedel <joro@...tes.org>,
Jason Gunthorpe <jgg@...dia.com>,
"Christoph Hellwig" <hch@...radead.org>
Cc: "Lu Baolu" <baolu.lu@...ux.intel.com>,
Raj Ashok <ashok.raj@...el.com>,
"Kumar, Sanjay K" <sanjay.k.kumar@...el.com>,
Dave Jiang <dave.jiang@...el.com>,
Tony Luck <tony.luck@...el.com>, mike.campin@...el.com,
Yi Liu <yi.l.liu@...el.com>,
"Tian, Kevin" <kevin.tian@...el.com>
Subject: [RFC 5/7] iommu/vt-d: Add support for KVA PASID mode
To support KVA fast mode, the VT-d driver must support domain allocation
of IOMMU_DOMAIN_KVA type. Since all devices in fast KVA mode share the
same kernel mapping, a single KVA domain is sufficient. This global KVA
domain contains the kernel mapping, i.e. init_mm.pgd.
The programming of the KVA domain follows the existing flow of auxiliary
domain attachment.
Signed-off-by: Jacob Pan <jacob.jun.pan@...ux.intel.com>
---
drivers/iommu/intel/iommu.c | 59 ++++++++++++++++++++++++++++++++++---
1 file changed, 55 insertions(+), 4 deletions(-)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index cbcfd178c16f..0dabd5f75acf 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -293,6 +293,9 @@ static inline void context_clear_entry(struct context_entry *context)
* 3. Each iommu mapps to this domain if successful.
*/
static struct dmar_domain *si_domain;
+
+/* This domain is used for shared virtual addressing with CPU kernel mapping */
+static struct dmar_domain *kva_domain;
static int hw_pass_through = 1;
#define for_each_domain_iommu(idx, domain) \
@@ -1989,6 +1992,10 @@ static void domain_exit(struct dmar_domain *domain)
/* Remove associated devices and clear attached or cached domains */
domain_remove_dev_info(domain);
+ /* There is no IOMMU page table for KVA */
+ if (domain->pgd == (struct dma_pte *)init_mm.pgd)
+ return;
+
/* destroy iovas */
if (domain->domain.type == IOMMU_DOMAIN_DMA)
iommu_put_dma_cookie(&domain->domain);
@@ -2533,6 +2540,10 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
int agaw, level;
int flags = 0;
+ if (domain->domain.type == IOMMU_DOMAIN_KVA) {
+ flags |= PASID_FLAG_SUPERVISOR_MODE;
+ goto do_setup;
+ }
/*
* Skip top levels of page tables for iommu which has
* less agaw than default. Unnecessary for PT mode.
@@ -2554,7 +2565,7 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
flags |= PASID_FLAG_PAGE_SNOOP;
-
+do_setup:
return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
domain->iommu_did[iommu->seq_id],
flags);
@@ -2713,7 +2724,28 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
}
static int md_domain_init(struct dmar_domain *domain, int guest_width);
+#ifdef CONFIG_INTEL_IOMMU_SVM
+static int __init kva_domain_init(void)
+{
+ struct dmar_domain *dmar_domain;
+ struct iommu_domain *domain;
+ kva_domain = alloc_domain(0);
+ if (!kva_domain) {
+ pr_err("Can't allocate KVA domain\n");
+ return -EFAULT;
+ }
+ kva_domain->pgd = (struct dma_pte *)init_mm.pgd;
+ domain = &kva_domain->domain;
+ domain->type = IOMMU_DOMAIN_KVA;
+ /* REVISIT: may not need this other than sanity check */
+ domain->geometry.aperture_start = 0;
+ domain->geometry.aperture_end =
+ __DOMAIN_MAX_ADDR(dmar_domain->gaw);
+ domain->geometry.force_aperture = true;
+ return 0;
+}
+#endif
static int __init si_domain_init(int hw)
{
struct dmar_rmrr_unit *rmrr;
@@ -3363,6 +3395,11 @@ static int __init init_dmars(void)
down_write(&dmar_global_lock);
if (ret)
goto free_iommu;
+ /* For in-kernel DMA with PASID in SVA */
+ ret = kva_domain_init();
+ if (ret)
+ goto free_iommu;
+
}
#endif
ret = dmar_set_interrupt(iommu);
@@ -4558,6 +4595,9 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
domain->geometry.force_aperture = true;
return domain;
+ case IOMMU_DOMAIN_KVA:
+ /* Use a global domain for shared KVA mapping */
+ return &kva_domain->domain;
case IOMMU_DOMAIN_IDENTITY:
return &si_domain->domain;
default:
@@ -4583,7 +4623,8 @@ is_aux_domain(struct device *dev, struct iommu_domain *domain)
struct device_domain_info *info = get_domain_info(dev);
return info && info->auxd_enabled &&
- domain->type == IOMMU_DOMAIN_UNMANAGED;
+ (domain->type == IOMMU_DOMAIN_UNMANAGED ||
+ domain->type == IOMMU_DOMAIN_KVA);
}
static inline struct subdev_domain_info *
@@ -4693,8 +4734,8 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
if (ret)
goto attach_failed;
- /* Setup the PASID entry for mediated devices: */
- if (domain_use_first_level(domain))
+ /* Setup the PASID entry for devices do DMA with the default PASID */
+ if (domain_use_first_level(domain) || domain->domain.type == IOMMU_DOMAIN_KVA)
ret = domain_setup_first_level(iommu, domain, dev,
domain->default_pasid);
else
@@ -4761,6 +4802,10 @@ static int prepare_domain_attach_device(struct iommu_domain *domain,
if (!iommu)
return -ENODEV;
+ if (domain->type == IOMMU_DOMAIN_KVA) {
+ pr_info("TODO: KVA dom check if device can do full 64bit DMA");
+ return 0;
+ }
/* check if this iommu agaw is sufficient for max mapped address */
addr_width = agaw_to_width(iommu->agaw);
if (addr_width > cap_mgaw(iommu->cap))
@@ -5588,6 +5633,12 @@ static int intel_enable_pasid_dma(struct device *dev, u32 pasid, int mode)
ret = domain_setup_first_level(info->iommu, info->domain, dev,
pasid);
break;
+ case IOMMU_DMA_PASID_KVA:
+ /*
+ * KVA mode should be handled in the aux domain attach where the default
+ * PASID of the aux domain is used for setting up PASID FL.
+ */
+ fallthrough;
default:
dev_err(dev, "Invalid PASID DMA mode %d", mode);
ret = -EINVAL;
--
2.25.1
Powered by blists - more mailing lists