[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190516084720.10498-3-eric.auger@redhat.com>
Date: Thu, 16 May 2019 10:47:15 +0200
From: Eric Auger <eric.auger@...hat.com>
To: eric.auger.pro@...il.com, eric.auger@...hat.com, joro@...tes.org,
iommu@...ts.linux-foundation.org, linux-kernel@...r.kernel.org,
dwmw2@...radead.org, lorenzo.pieralisi@....com,
robin.murphy@....com, will.deacon@....com, hanjun.guo@...aro.org,
sudeep.holla@....com
Cc: alex.williamson@...hat.com, shameerali.kolothum.thodi@...wei.com
Subject: [PATCH v2 2/7] iommu/vt-d: Duplicate iommu_resv_region objects per device list
intel_iommu_get_resv_regions() aims to return the list of
reserved regions accessible by a given @device. However several
devices can access the same reserved memory region and when
building the list it is not safe to use a single iommu_resv_region
object, whose container is the RMRR. This iommu_resv_region must
be duplicated per device reserved region list.
Let's remove the struct iommu_resv_region from the RMRR unit
and allocate the iommu_resv_region directly in
intel_iommu_get_resv_regions().
Fixes: 0659b8dc45a6 ("iommu/vt-d: Implement reserved region get/put callbacks")
Signed-off-by: Eric Auger <eric.auger@...hat.com>
---
drivers/iommu/intel-iommu.c | 31 ++++++++++++++++---------------
1 file changed, 16 insertions(+), 15 deletions(-)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2be36dff189a..590a0e78d11d 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -322,7 +322,6 @@ struct dmar_rmrr_unit {
u64 end_address; /* reserved end address */
struct dmar_dev_scope *devices; /* target devices */
int devices_cnt; /* target device count */
- struct iommu_resv_region *resv; /* reserved region handle */
};
struct dmar_atsr_unit {
@@ -4205,7 +4204,6 @@ static inline void init_iommu_pm_ops(void) {}
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_reserved_memory *rmrr;
- int prot = DMA_PTE_READ|DMA_PTE_WRITE;
struct dmar_rmrr_unit *rmrru;
size_t length;
@@ -4219,22 +4217,16 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
rmrru->end_address = rmrr->end_address;
length = rmrr->end_address - rmrr->base_address + 1;
- rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
- IOMMU_RESV_DIRECT, GFP_KERNEL);
- if (!rmrru->resv)
- goto free_rmrru;
rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
((void *)rmrr) + rmrr->header.length,
&rmrru->devices_cnt);
if (rmrru->devices_cnt && rmrru->devices == NULL)
- goto free_all;
+ goto free_rmrru;
list_add(&rmrru->list, &dmar_rmrr_units);
return 0;
-free_all:
- kfree(rmrru->resv);
free_rmrru:
kfree(rmrru);
out:
@@ -4452,7 +4444,6 @@ static void intel_iommu_free_dmars(void)
list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
list_del(&rmrru->list);
dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
- kfree(rmrru->resv);
kfree(rmrru);
}
@@ -5470,6 +5461,7 @@ static void intel_iommu_remove_device(struct device *dev)
static void intel_iommu_get_resv_regions(struct device *device,
struct list_head *head)
{
+ int prot = DMA_PTE_READ|DMA_PTE_WRITE;
struct iommu_resv_region *reg;
struct dmar_rmrr_unit *rmrr;
struct device *i_dev;
@@ -5479,10 +5471,21 @@ static void intel_iommu_get_resv_regions(struct device *device,
for_each_rmrr_units(rmrr) {
for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
i, i_dev) {
+ struct iommu_resv_region *resv;
+ size_t length;
+
if (i_dev != device)
continue;
- list_add_tail(&rmrr->resv->list, head);
+ length = rmrr->end_address - rmrr->base_address + 1;
+ resv = iommu_alloc_resv_region(rmrr->base_address,
+ length, prot,
+ IOMMU_RESV_DIRECT,
+ GFP_ATOMIC);
+ if (!resv)
+ break;
+
+ list_add_tail(&resv->list, head);
}
}
rcu_read_unlock();
@@ -5500,10 +5503,8 @@ static void intel_iommu_put_resv_regions(struct device *dev,
{
struct iommu_resv_region *entry, *next;
- list_for_each_entry_safe(entry, next, head, list) {
- if (entry->type == IOMMU_RESV_MSI)
- kfree(entry);
- }
+ list_for_each_entry_safe(entry, next, head, list)
+ kfree(entry);
}
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
--
2.20.1
Powered by blists - more mailing lists