[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20190117114714.126410-1-Suravee.Suthikulpanit@amd.com>
Date: Thu, 17 Jan 2019 11:47:35 +0000
From: "Suthikulpanit, Suravee" <Suravee.Suthikulpanit@....com>
To: "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"iommu@...ts.linux-foundation.org" <iommu@...ts.linux-foundation.org>
CC: "joro@...tes.org" <joro@...tes.org>,
"Suthikulpanit, Suravee" <Suravee.Suthikulpanit@....com>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
"Singh, Brijesh" <brijesh.singh@....com>
Subject: [PATCH v2] iommu/amd: Fix IOMMU page flush when detach all devices
from a domain
From: Suravee Suthikulpanit <suravee.suthikulpanit@....com>
When a VM is terminated, the VFIO driver detaches all pass-through
devices from VFIO domain by clearing domain id and page table root
pointer from each device table entry (DTE), and then invalidates
the DTE. Then, the VFIO driver unmap pages and invalidate IOMMU pages.
Currently, the IOMMU driver keeps track of which IOMMU and how many
devices are attached to the domain. When invalidate IOMMU pages,
the driver checks if the IOMMU is still attached to the domain before
issuing the invalidate page command.
However, since VFIO has already detached all devices from the domain,
the subsequent INVALIDATE_IOMMU_PAGES commands are being skipped as
there is no IOMMU attached to the domain. This results in data
corruption and could cause the PCI device to end up in indeterministic
state.
Fix this by introducing IOMMU domain states attached, detached,
and free. Then only issuing the IOMMU pages invalidate command when
domain is in attached and detached states.
Cc: Boris Ostrovsky <boris.ostrovsky@...cle.com>
Signed-off-by: Brijesh Singh <brijesh.singh@....com>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@....com>
---
drivers/iommu/amd_iommu.c | 25 ++++++++++++++++++++-----
drivers/iommu/amd_iommu_types.h | 8 +++++++-
2 files changed, 27 insertions(+), 6 deletions(-)
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 525659b88ade..1c64a26c50fb 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1248,7 +1248,14 @@ static void __domain_flush_pages(struct protection_domain *domain,
build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
- if (!domain->dev_iommu[i])
+ /*
+ * We issue the command only when the domain is
+ * in ATTACHED and DETACHED state. This is required
+ * since VFIO detaches all devices from the group
+ * before invalidating IOMMU pages. Please see
+ * vfio_iommu_type1_detach_group().
+ */
+ if (domain->dev_iommu[i] == IOMMU_DOMAIN_STATE_FREE)
continue;
/*
@@ -1292,7 +1299,8 @@ static void domain_flush_complete(struct protection_domain *domain)
int i;
for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
- if (domain && !domain->dev_iommu[i])
+ if (domain &&
+ domain->dev_iommu[i] < IOMMU_DOMAIN_STATE_ATTACHED)
continue;
/*
@@ -1978,8 +1986,11 @@ static void do_attach(struct iommu_dev_data *dev_data,
list_add(&dev_data->list, &domain->dev_list);
/* Do reference counting */
- domain->dev_iommu[iommu->index] += 1;
- domain->dev_cnt += 1;
+ if (domain->dev_iommu[iommu->index] < IOMMU_DOMAIN_STATE_ATTACHED)
+ domain->dev_iommu[iommu->index] = IOMMU_DOMAIN_STATE_ATTACHED;
+ else
+ domain->dev_iommu[iommu->index] += 1;
+ domain->dev_cnt += 1;
/* Update device table */
set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2);
@@ -2904,6 +2915,7 @@ static int protection_domain_init(struct protection_domain *domain)
static struct protection_domain *protection_domain_alloc(void)
{
+ int i;
struct protection_domain *domain;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
@@ -2915,6 +2927,9 @@ static struct protection_domain *protection_domain_alloc(void)
add_domain_to_list(domain);
+ for (i = 0; i < MAX_IOMMUS; i++)
+ domain->dev_iommu[i] = IOMMU_DOMAIN_STATE_FREE;
+
return domain;
out_err:
@@ -3364,7 +3379,7 @@ static int __flush_pasid(struct protection_domain *domain, int pasid,
* prevent device TLB refill from IOMMU TLB
*/
for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
- if (domain->dev_iommu[i] == 0)
+ if (domain->dev_iommu[i] < IOMMU_DOMAIN_STATE_ATTACHED)
continue;
ret = iommu_queue_command(amd_iommus[i], &cmd);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index eae0741f72dc..6d17cdcfa306 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -461,6 +461,12 @@ struct amd_irte_ops;
#define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0)
+enum IOMMU_DOMAIN_STATES {
+ IOMMU_DOMAIN_STATE_FREE = -1,
+ IOMMU_DOMAIN_STATE_DETTACHED,
+ IOMMU_DOMAIN_STATE_ATTACHED
+};
+
/*
* This structure contains generic data for IOMMU protection domains
* independent of their use.
@@ -480,7 +486,7 @@ struct protection_domain {
unsigned long flags; /* flags to find out type of domain */
bool updated; /* complete domain flush required */
unsigned dev_cnt; /* devices assigned to this domain */
- unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
+ int dev_iommu[MAX_IOMMUS]; /* per-IOMMU domain state and reference count */
};
/*
--
2.17.1
Powered by blists - more mailing lists