[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250714045028.958850-8-baolu.lu@linux.intel.com>
Date: Mon, 14 Jul 2025 12:50:24 +0800
From: Lu Baolu <baolu.lu@...ux.intel.com>
To: Joerg Roedel <joro@...tes.org>
Cc: iommu@...ts.linux.dev,
linux-kernel@...r.kernel.org
Subject: [PATCH 07/11] iommu/vt-d: Create unique domain ops for each stage
From: Jason Gunthorpe <jgg@...dia.com>
Use the domain ops pointer to tell what kind of domain it is instead of
the internal use_first_level indication. This also protects against
wrongly using a SVA/nested/IDENTITY/BLOCKED domain type in places they
should not be.
The only remaining uses of use_first_level outside the paging domain are in
paging_domain_compatible() and intel_iommu_enforce_cache_coherency().
Thus, remove the useless sets of use_first_level in
intel_svm_domain_alloc() and intel_iommu_domain_alloc_nested(). None of
the unique ops for these domain types ever reference it on their call
chains.
Add a WARN_ON() check in domain_context_mapping_one() as it only works
with second stage.
This is preparation for iommupt which will have different ops for each of
the stages.
Reviewed-by: Kevin Tian <kevin.tian@...el.com>
Signed-off-by: Jason Gunthorpe <jgg@...dia.com>
Link: https://lore.kernel.org/r/5-v3-dbbe6f7e7ae3+124ffe-vtd_prep_jgg@nvidia.com
Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
---
drivers/iommu/intel/cache.c | 5 +--
drivers/iommu/intel/iommu.c | 60 +++++++++++++++++++++++++-----------
drivers/iommu/intel/iommu.h | 12 ++++++++
drivers/iommu/intel/nested.c | 4 +--
drivers/iommu/intel/svm.c | 1 -
5 files changed, 58 insertions(+), 24 deletions(-)
diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c
index 47692cbfaabd..876630e10849 100644
--- a/drivers/iommu/intel/cache.c
+++ b/drivers/iommu/intel/cache.c
@@ -370,7 +370,7 @@ static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag *
struct intel_iommu *iommu = tag->iommu;
u64 type = DMA_TLB_PSI_FLUSH;
- if (domain->use_first_level) {
+ if (intel_domain_is_fs_paging(domain)) {
qi_batch_add_piotlb(iommu, tag->domain_id, tag->pasid, addr,
pages, ih, domain->qi_batch);
return;
@@ -545,7 +545,8 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
qi_batch_flush_descs(iommu, domain->qi_batch);
iommu = tag->iommu;
- if (!cap_caching_mode(iommu->cap) || domain->use_first_level) {
+ if (!cap_caching_mode(iommu->cap) ||
+ intel_domain_is_fs_paging(domain)) {
iommu_flush_write_buffer(iommu);
continue;
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 0ac3c3a6d9e7..b7b1a3d2cbfc 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -1462,6 +1462,9 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
struct context_entry *context;
int ret;
+ if (WARN_ON(!intel_domain_is_ss_paging(domain)))
+ return -EINVAL;
+
pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
@@ -1780,7 +1783,7 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
static bool domain_need_iotlb_sync_map(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
- if (cap_caching_mode(iommu->cap) && !domain->use_first_level)
+ if (cap_caching_mode(iommu->cap) && intel_domain_is_ss_paging(domain))
return true;
if (rwbf_quirk || cap_rwbf(iommu->cap))
@@ -1812,12 +1815,14 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
if (!sm_supported(iommu))
ret = domain_context_mapping(domain, dev);
- else if (domain->use_first_level)
+ else if (intel_domain_is_fs_paging(domain))
ret = domain_setup_first_level(iommu, domain, dev,
IOMMU_NO_PASID, NULL);
- else
+ else if (intel_domain_is_ss_paging(domain))
ret = domain_setup_second_level(iommu, domain, dev,
IOMMU_NO_PASID, NULL);
+ else if (WARN_ON(true))
+ ret = -EINVAL;
if (ret)
goto out_block_translation;
@@ -3288,7 +3293,6 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
domain->use_first_level = first_stage;
domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
- domain->domain.ops = intel_iommu_ops.default_domain_ops;
/* calculate the address width */
addr_width = agaw_to_width(iommu->agaw);
@@ -3346,6 +3350,8 @@ intel_iommu_domain_alloc_first_stage(struct device *dev,
dmar_domain = paging_domain_alloc(dev, true);
if (IS_ERR(dmar_domain))
return ERR_CAST(dmar_domain);
+
+ dmar_domain->domain.ops = &intel_fs_paging_domain_ops;
return &dmar_domain->domain;
}
@@ -3374,6 +3380,7 @@ intel_iommu_domain_alloc_second_stage(struct device *dev,
if (IS_ERR(dmar_domain))
return ERR_CAST(dmar_domain);
+ dmar_domain->domain.ops = &intel_ss_paging_domain_ops;
dmar_domain->nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
@@ -4107,12 +4114,15 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
if (ret)
goto out_remove_dev_pasid;
- if (dmar_domain->use_first_level)
+ if (intel_domain_is_fs_paging(dmar_domain))
ret = domain_setup_first_level(iommu, dmar_domain,
dev, pasid, old);
- else
+ else if (intel_domain_is_ss_paging(dmar_domain))
ret = domain_setup_second_level(iommu, dmar_domain,
dev, pasid, old);
+ else if (WARN_ON(true))
+ ret = -EINVAL;
+
if (ret)
goto out_unwind_iopf;
@@ -4387,6 +4397,32 @@ static struct iommu_domain identity_domain = {
},
};
+const struct iommu_domain_ops intel_fs_paging_domain_ops = {
+ .attach_dev = intel_iommu_attach_device,
+ .set_dev_pasid = intel_iommu_set_dev_pasid,
+ .map_pages = intel_iommu_map_pages,
+ .unmap_pages = intel_iommu_unmap_pages,
+ .iotlb_sync_map = intel_iommu_iotlb_sync_map,
+ .flush_iotlb_all = intel_flush_iotlb_all,
+ .iotlb_sync = intel_iommu_tlb_sync,
+ .iova_to_phys = intel_iommu_iova_to_phys,
+ .free = intel_iommu_domain_free,
+ .enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
+};
+
+const struct iommu_domain_ops intel_ss_paging_domain_ops = {
+ .attach_dev = intel_iommu_attach_device,
+ .set_dev_pasid = intel_iommu_set_dev_pasid,
+ .map_pages = intel_iommu_map_pages,
+ .unmap_pages = intel_iommu_unmap_pages,
+ .iotlb_sync_map = intel_iommu_iotlb_sync_map,
+ .flush_iotlb_all = intel_flush_iotlb_all,
+ .iotlb_sync = intel_iommu_tlb_sync,
+ .iova_to_phys = intel_iommu_iova_to_phys,
+ .free = intel_iommu_domain_free,
+ .enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
+};
+
const struct iommu_ops intel_iommu_ops = {
.blocked_domain = &blocking_domain,
.release_domain = &blocking_domain,
@@ -4405,18 +4441,6 @@ const struct iommu_ops intel_iommu_ops = {
.def_domain_type = device_def_domain_type,
.pgsize_bitmap = SZ_4K,
.page_response = intel_iommu_page_response,
- .default_domain_ops = &(const struct iommu_domain_ops) {
- .attach_dev = intel_iommu_attach_device,
- .set_dev_pasid = intel_iommu_set_dev_pasid,
- .map_pages = intel_iommu_map_pages,
- .unmap_pages = intel_iommu_unmap_pages,
- .iotlb_sync_map = intel_iommu_iotlb_sync_map,
- .flush_iotlb_all = intel_flush_iotlb_all,
- .iotlb_sync = intel_iommu_tlb_sync,
- .iova_to_phys = intel_iommu_iova_to_phys,
- .free = intel_iommu_domain_free,
- .enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
- }
};
static void quirk_iommu_igfx(struct pci_dev *dev)
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 50d69cc88a1f..d09b92871659 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -1380,6 +1380,18 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
u8 devfn, int alloc);
extern const struct iommu_ops intel_iommu_ops;
+extern const struct iommu_domain_ops intel_fs_paging_domain_ops;
+extern const struct iommu_domain_ops intel_ss_paging_domain_ops;
+
+static inline bool intel_domain_is_fs_paging(struct dmar_domain *domain)
+{
+ return domain->domain.ops == &intel_fs_paging_domain_ops;
+}
+
+static inline bool intel_domain_is_ss_paging(struct dmar_domain *domain)
+{
+ return domain->domain.ops == &intel_ss_paging_domain_ops;
+}
#ifdef CONFIG_INTEL_IOMMU
extern int intel_iommu_sm;
diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c
index fc312f649f9e..1b6ad9c900a5 100644
--- a/drivers/iommu/intel/nested.c
+++ b/drivers/iommu/intel/nested.c
@@ -216,8 +216,7 @@ intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
/* Must be nested domain */
if (user_data->type != IOMMU_HWPT_DATA_VTD_S1)
return ERR_PTR(-EOPNOTSUPP);
- if (parent->ops != intel_iommu_ops.default_domain_ops ||
- !s2_domain->nested_parent)
+ if (!intel_domain_is_ss_paging(s2_domain) || !s2_domain->nested_parent)
return ERR_PTR(-EINVAL);
ret = iommu_copy_struct_from_user(&vtd, user_data,
@@ -229,7 +228,6 @@ intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
if (!domain)
return ERR_PTR(-ENOMEM);
- domain->use_first_level = true;
domain->s2_domain = s2_domain;
domain->s1_cfg = vtd;
domain->domain.ops = &intel_nested_domain_ops;
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 8c0bed36c587..e147f71f91b7 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -214,7 +214,6 @@ struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
return ERR_PTR(-ENOMEM);
domain->domain.ops = &intel_svm_domain_ops;
- domain->use_first_level = true;
INIT_LIST_HEAD(&domain->dev_pasids);
INIT_LIST_HEAD(&domain->cache_tags);
spin_lock_init(&domain->cache_lock);
--
2.43.0
Powered by blists - more mailing lists