[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1595525140-23899-4-git-send-email-jacob.jun.pan@linux.intel.com>
Date: Thu, 23 Jul 2020 10:25:37 -0700
From: Jacob Pan <jacob.jun.pan@...ux.intel.com>
To: iommu@...ts.linux-foundation.org,
LKML <linux-kernel@...r.kernel.org>,
Joerg Roedel <joro@...tes.org>,
Alex Williamson <alex.williamson@...hat.com>
Cc: "Lu Baolu" <baolu.lu@...ux.intel.com>,
David Woodhouse <dwmw2@...radead.org>,
Yi Liu <yi.l.liu@...el.com>,
"Tian, Kevin" <kevin.tian@...el.com>,
Raj Ashok <ashok.raj@...el.com>,
"Christoph Hellwig" <hch@...radead.org>,
Jean-Philippe Brucker <jean-philippe@...aro.com>,
Eric Auger <eric.auger@...hat.com>,
Jonathan Corbet <corbet@....net>,
Jacob Pan <jacob.jun.pan@...ux.intel.com>
Subject: [PATCH v6 3/6] iommu/uapi: Use named union for user data
IOMMU UAPI data size is filled by the user space which must be validated
by the kernel. To ensure backward compatibility, user data can only be
extended by either re-purpose padding bytes or extend the variable sized
union at the end. No size change is allowed before the union. Therefore,
the minimum size is the offset of the union.
To use offsetof() on the union, we must make it named.
Link: https://lore.kernel.org/linux-iommu/20200611145518.0c2817d6@x1.home/
Signed-off-by: Jacob Pan <jacob.jun.pan@...ux.intel.com>
Reviewed-by: Lu Baolu <baolu.lu@...ux.intel.com>
Reviewed-by: Eric Auger <eric.auger@...hat.com>
---
drivers/iommu/intel/iommu.c | 22 +++++++++++-----------
drivers/iommu/intel/svm.c | 2 +-
include/uapi/linux/iommu.h | 4 ++--
3 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 92c7ad66e64c..021f62078f52 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -5417,8 +5417,8 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
/* Size is only valid in address selective invalidation */
if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
- size = to_vtd_size(inv_info->addr_info.granule_size,
- inv_info->addr_info.nb_granules);
+ size = to_vtd_size(inv_info->granu.addr_info.granule_size,
+ inv_info->granu.addr_info.nb_granules);
for_each_set_bit(cache_type,
(unsigned long *)&inv_info->cache,
@@ -5439,20 +5439,20 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
* granularity.
*/
if (inv_info->granularity == IOMMU_INV_GRANU_PASID &&
- (inv_info->pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
- pasid = inv_info->pasid_info.pasid;
+ (inv_info->granu.pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
+ pasid = inv_info->granu.pasid_info.pasid;
else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
- (inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
- pasid = inv_info->addr_info.pasid;
+ (inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
+ pasid = inv_info->granu.addr_info.pasid;
switch (BIT(cache_type)) {
case IOMMU_CACHE_INV_TYPE_IOTLB:
/* HW will ignore LSB bits based on address mask */
if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
size &&
- (inv_info->addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
+ (inv_info->granu.addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n",
- inv_info->addr_info.addr, size);
+ inv_info->granu.addr_info.addr, size);
}
/*
@@ -5460,9 +5460,9 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
* We use npages = -1 to indicate that.
*/
qi_flush_piotlb(iommu, did, pasid,
- mm_to_dma_pfn(inv_info->addr_info.addr),
+ mm_to_dma_pfn(inv_info->granu.addr_info.addr),
(granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
- inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
+ inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
if (!info->ats_enabled)
break;
@@ -5485,7 +5485,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
size = 64 - VTD_PAGE_SHIFT;
addr = 0;
} else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
- addr = inv_info->addr_info.addr;
+ addr = inv_info->granu.addr_info.addr;
if (info->ats_enabled)
qi_flush_dev_iotlb_pasid(iommu, sid,
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index d386853121a2..713b3a218483 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -338,7 +338,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
spin_lock(&iommu->lock);
ret = intel_pasid_setup_nested(iommu, dev,
(pgd_t *)(uintptr_t)data->gpgd,
- data->hpasid, &data->vtd, dmar_domain,
+ data->hpasid, &data->vendor.vtd, dmar_domain,
data->addr_width);
spin_unlock(&iommu->lock);
if (ret) {
diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h
index d5e9014f690e..7c8e075c2b29 100644
--- a/include/uapi/linux/iommu.h
+++ b/include/uapi/linux/iommu.h
@@ -263,7 +263,7 @@ struct iommu_cache_invalidate_info {
union {
struct iommu_inv_pasid_info pasid_info;
struct iommu_inv_addr_info addr_info;
- };
+ } granu;
};
/**
@@ -329,7 +329,7 @@ struct iommu_gpasid_bind_data {
/* Vendor specific data */
union {
struct iommu_gpasid_bind_data_vtd vtd;
- };
+ } vendor;
};
#endif /* _UAPI_IOMMU_H */
--
2.7.4
Powered by blists - more mailing lists