[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231106071226.9656-5-tina.zhang@intel.com>
Date: Mon, 6 Nov 2023 02:12:25 -0500
From: Tina Zhang <tina.zhang@...el.com>
To: Jean-Philippe Brucker <jean-philippe@...aro.org>,
Kevin Tian <kevin.tian@...el.com>,
Lu Baolu <baolu.lu@...ux.intel.com>, joro@...tes.org,
will@...nel.org, Yi Liu <yi.l.liu@...el.com>
Cc: virtualization@...ts.linux-foundation.org, iommu@...ts.linux.dev,
linux-kernel@...r.kernel.org, Tina Zhang <tina.zhang@...el.com>
Subject: [RFC PATCH 4/5] iommu/vt-d: Adapt alloc_pgtable interface to be used by others
The generic IO page table framework provides a set of interfaces for
invoking IO page table operations. Other entity (e.g., virtio-iommu
driver) can use the interface to ask VT-d driver to generate a VT-d
format IO page table. This patch adds the support.
Signed-off-by: Tina Zhang <tina.zhang@...el.com>
---
drivers/iommu/intel/iommu.c | 69 +++++++++++++++++++++++++++++++++++--
1 file changed, 66 insertions(+), 3 deletions(-)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 80bd1993861c..d714e780a031 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -5248,17 +5248,80 @@ static phys_addr_t pgtable_iova_to_phys(struct io_pgtable_ops *ops,
return intel_iommu_iova_to_phys(&dmar_domain->domain, iova);
}
+static void __iommu_calculate_cfg(struct io_pgtable_cfg *cfg)
+{
+ unsigned long fl_sagaw, sl_sagaw, sagaw;
+ int agaw, addr_width;
+
+ fl_sagaw = BIT(2) | (cap_fl5lp_support(cfg->vtd_cfg.cap_reg) ? BIT(3) : 0);
+ sl_sagaw = cap_sagaw(cfg->vtd_cfg.cap_reg);
+ sagaw = fl_sagaw & sl_sagaw;
+
+ for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH); agaw >= 0; agaw--) {
+ if (test_bit(agaw, &sagaw))
+ break;
+ }
+
+ addr_width = agaw_to_width(agaw);
+ if (cfg->ias > addr_width)
+ cfg->ias = addr_width;
+ if (cfg->oas != addr_width)
+ cfg->oas = addr_width;
+}
+
static struct io_pgtable *alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
{
- struct dmar_io_pgtable *pgtable = io_pgtable_cfg_to_dmar_pgtable(cfg);
+ struct dmar_io_pgtable *pgtable;
+ struct dmar_domain *domain;
+ int adjust_width;
+
+ /* Platform must have nested translation support */
+ if (!ecap_nest(cfg->vtd_cfg.ecap_reg))
+ return NULL;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return NULL;
+
+ domain->nid = NUMA_NO_NODE;
+ domain->use_first_level = true;
+ domain->has_iotlb_device = false;
+ INIT_LIST_HEAD(&domain->devices);
+ spin_lock_init(&domain->lock);
+ xa_init(&domain->iommu_array);
+
+ /* calculate AGAW */
+ __iommu_calculate_cfg(cfg);
+ domain->gaw = cfg->ias;
+ adjust_width = guestwidth_to_adjustwidth(domain->gaw);
+ domain->agaw = width_to_agaw(adjust_width);
+
+ domain->iommu_coherency = ecap_smpwc(cfg->vtd_cfg.ecap_reg);
+ domain->force_snooping = true;
+ domain->iommu_superpage = cap_fl1gp_support(cfg->vtd_cfg.ecap_reg) ? 2 : 1;
+ domain->max_addr = 0;
+
+ cfg->coherent_walk = domain->iommu_coherency;
+
+ pgtable = &domain->dmar_iop;
+ /* always allocate the top pgd */
+ domain->pgd = alloc_pgtable_page(domain->nid, GFP_KERNEL);
+ if (!domain->pgd)
+ goto out_free_domain;
+ domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
+
+ cfg->virt.pgd = virt_to_phys(domain->pgd);
+ cfg->tlb = &flush_ops;
pgtable->iop.ops.map_pages = pgtable_map_pages;
pgtable->iop.ops.unmap_pages = pgtable_unmap_pages;
pgtable->iop.ops.iova_to_phys = pgtable_iova_to_phys;
- cfg->tlb = &flush_ops;
-
return &pgtable->iop;
+
+out_free_domain:
+ kfree(domain);
+ return NULL;
}
struct io_pgtable_init_fns io_pgtable_intel_iommu_init_fns = {
--
2.39.3
Powered by blists - more mailing lists