[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <24304722005bc6f144e2a1fdd865d1465722fc2e.1639753638.git.robin.murphy@arm.com>
Date: Fri, 17 Dec 2021 15:31:03 +0000
From: Robin Murphy <robin.murphy@....com>
To: joro@...tes.org, will@...nel.org
Cc: iommu@...ts.linux-foundation.org, suravee.suthikulpanit@....com,
baolu.lu@...ux.intel.com, willy@...radead.org,
linux-kernel@...r.kernel.org, john.garry@...wei.com,
linux-mm@...ck.org, hch@....de
Subject: [PATCH v3 9/9] iommu: Move flush queue data into iommu_dma_cookie
Complete the move into iommu-dma by refactoring the flush queues
themselves to belong to the DMA cookie rather than the IOVA domain.
The refactoring may as well extend to some minor cosmetic aspects
too, to help us stay one step ahead of the style police.
Signed-off-by: Robin Murphy <robin.murphy@....com>
---
drivers/iommu/dma-iommu.c | 171 +++++++++++++++++++++-----------------
drivers/iommu/iova.c | 2 -
include/linux/iova.h | 44 +---------
3 files changed, 95 insertions(+), 122 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 54be57fb08f0..b0568fa848e9 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -9,9 +9,12 @@
*/
#include <linux/acpi_iort.h>
+#include <linux/atomic.h>
+#include <linux/crash_dump.h>
#include <linux/device.h>
-#include <linux/dma-map-ops.h>
+#include <linux/dma-direct.h>
#include <linux/dma-iommu.h>
+#include <linux/dma-map-ops.h>
#include <linux/gfp.h>
#include <linux/huge_mm.h>
#include <linux/iommu.h>
@@ -20,11 +23,10 @@
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/pci.h>
-#include <linux/swiotlb.h>
#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
-#include <linux/crash_dump.h>
-#include <linux/dma-direct.h>
struct iommu_dma_msi_page {
struct list_head list;
@@ -41,7 +43,19 @@ struct iommu_dma_cookie {
enum iommu_dma_cookie_type type;
union {
/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
- struct iova_domain iovad;
+ struct {
+ struct iova_domain iovad;
+
+ struct iova_fq __percpu *fq; /* Flush queue */
+ /* Number of TLB flushes that have been started */
+ atomic64_t fq_flush_start_cnt;
+ /* Number of TLB flushes that have been finished */
+ atomic64_t fq_flush_finish_cnt;
+ /* Timer to regularily empty the flush queues */
+ struct timer_list fq_timer;
+ /* 1 when timer is active, 0 when not */
+ atomic_t fq_timer_on;
+ };
/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
dma_addr_t msi_iova;
};
@@ -64,6 +78,27 @@ static int __init iommu_dma_forcedac_setup(char *str)
}
early_param("iommu.forcedac", iommu_dma_forcedac_setup);
+/* Number of entries per flush queue */
+#define IOVA_FQ_SIZE 256
+
+/* Timeout (in ms) after which entries are flushed from the queue */
+#define IOVA_FQ_TIMEOUT 10
+
+/* Flush queue entry for deferred flushing */
+struct iova_fq_entry {
+ unsigned long iova_pfn;
+ unsigned long pages;
+ struct list_head freelist;
+ u64 counter; /* Flush counter when this entry was added */
+};
+
+/* Per-CPU flush queue structure */
+struct iova_fq {
+ struct iova_fq_entry entries[IOVA_FQ_SIZE];
+ unsigned int head, tail;
+ spinlock_t lock;
+};
+
#define fq_ring_for_each(i, fq) \
for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
@@ -73,9 +108,9 @@ static inline bool fq_full(struct iova_fq *fq)
return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
}
-static inline unsigned fq_ring_add(struct iova_fq *fq)
+static inline unsigned int fq_ring_add(struct iova_fq *fq)
{
- unsigned idx = fq->tail;
+ unsigned int idx = fq->tail;
assert_spin_locked(&fq->lock);
@@ -84,10 +119,10 @@ static inline unsigned fq_ring_add(struct iova_fq *fq)
return idx;
}
-static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
+static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
{
- u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
- unsigned idx;
+ u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt);
+ unsigned int idx;
assert_spin_locked(&fq->lock);
@@ -97,7 +132,7 @@ static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
break;
put_pages_list(&fq->entries[idx].freelist);
- free_iova_fast(iovad,
+ free_iova_fast(&cookie->iovad,
fq->entries[idx].iova_pfn,
fq->entries[idx].pages);
@@ -105,50 +140,50 @@ static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
}
}
-static void iova_domain_flush(struct iova_domain *iovad)
+static void fq_flush_iotlb(struct iommu_dma_cookie *cookie)
{
- atomic64_inc(&iovad->fq_flush_start_cnt);
- iovad->fq_domain->ops->flush_iotlb_all(iovad->fq_domain);
- atomic64_inc(&iovad->fq_flush_finish_cnt);
+ atomic64_inc(&cookie->fq_flush_start_cnt);
+ cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain);
+ atomic64_inc(&cookie->fq_flush_finish_cnt);
}
static void fq_flush_timeout(struct timer_list *t)
{
- struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
+ struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer);
int cpu;
- atomic_set(&iovad->fq_timer_on, 0);
- iova_domain_flush(iovad);
+ atomic_set(&cookie->fq_timer_on, 0);
+ fq_flush_iotlb(cookie);
for_each_possible_cpu(cpu) {
unsigned long flags;
struct iova_fq *fq;
- fq = per_cpu_ptr(iovad->fq, cpu);
+ fq = per_cpu_ptr(cookie->fq, cpu);
spin_lock_irqsave(&fq->lock, flags);
- fq_ring_free(iovad, fq);
+ fq_ring_free(cookie, fq);
spin_unlock_irqrestore(&fq->lock, flags);
}
}
-void queue_iova(struct iova_domain *iovad,
+static void queue_iova(struct iommu_dma_cookie *cookie,
unsigned long pfn, unsigned long pages,
struct list_head *freelist)
{
struct iova_fq *fq;
unsigned long flags;
- unsigned idx;
+ unsigned int idx;
/*
* Order against the IOMMU driver's pagetable update from unmapping
- * @pte, to guarantee that iova_domain_flush() observes that if called
+ * @pte, to guarantee that fq_flush_iotlb() observes that if called
* from a different CPU before we release the lock below. Full barrier
* so it also pairs with iommu_dma_init_fq() to avoid seeing partially
* written fq state here.
*/
smp_mb();
- fq = raw_cpu_ptr(iovad->fq);
+ fq = raw_cpu_ptr(cookie->fq);
spin_lock_irqsave(&fq->lock, flags);
/*
@@ -156,65 +191,66 @@ void queue_iova(struct iova_domain *iovad,
* flushed out on another CPU. This makes the fq_full() check below less
* likely to be true.
*/
- fq_ring_free(iovad, fq);
+ fq_ring_free(cookie, fq);
if (fq_full(fq)) {
- iova_domain_flush(iovad);
- fq_ring_free(iovad, fq);
+ fq_flush_iotlb(cookie);
+ fq_ring_free(cookie, fq);
}
idx = fq_ring_add(fq);
fq->entries[idx].iova_pfn = pfn;
fq->entries[idx].pages = pages;
- fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
+ fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt);
list_splice(freelist, &fq->entries[idx].freelist);
spin_unlock_irqrestore(&fq->lock, flags);
/* Avoid false sharing as much as possible. */
- if (!atomic_read(&iovad->fq_timer_on) &&
- !atomic_xchg(&iovad->fq_timer_on, 1))
- mod_timer(&iovad->fq_timer,
+ if (!atomic_read(&cookie->fq_timer_on) &&
+ !atomic_xchg(&cookie->fq_timer_on, 1))
+ mod_timer(&cookie->fq_timer,
jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
}
-static void free_iova_flush_queue(struct iova_domain *iovad)
+static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
{
int cpu, idx;
- if (!iovad->fq)
+ if (!cookie->fq)
return;
- del_timer_sync(&iovad->fq_timer);
- /*
- * This code runs when the iova_domain is being detroyed, so don't
- * bother to free iovas, just free any remaining pagetable pages.
- */
+ del_timer_sync(&cookie->fq_timer);
+ /* The IOVAs will be torn down separately, so just free our queued pages */
for_each_possible_cpu(cpu) {
- struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
+ struct iova_fq *fq = per_cpu_ptr(cookie->fq, cpu);
fq_ring_for_each(idx, fq)
put_pages_list(&fq->entries[idx].freelist);
}
- free_percpu(iovad->fq);
-
- iovad->fq = NULL;
- iovad->fq_domain = NULL;
+ free_percpu(cookie->fq);
}
-int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_domain)
+/* sysfs updates are serialised by the mutex of the group owning @domain */
+int iommu_dma_init_fq(struct iommu_domain *domain)
{
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_fq __percpu *queue;
int i, cpu;
- atomic64_set(&iovad->fq_flush_start_cnt, 0);
- atomic64_set(&iovad->fq_flush_finish_cnt, 0);
+ if (cookie->fq_domain)
+ return 0;
+
+ atomic64_set(&cookie->fq_flush_start_cnt, 0);
+ atomic64_set(&cookie->fq_flush_finish_cnt, 0);
queue = alloc_percpu(struct iova_fq);
- if (!queue)
+ if (!queue) {
+ pr_warn("iova flush queue initialization failed\n");
return -ENOMEM;
+ }
for_each_possible_cpu(cpu) {
struct iova_fq *fq = per_cpu_ptr(queue, cpu);
@@ -228,12 +264,16 @@ int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_dom
INIT_LIST_HEAD(&fq->entries[i].freelist);
}
- iovad->fq_domain = fq_domain;
- iovad->fq = queue;
-
- timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
- atomic_set(&iovad->fq_timer_on, 0);
+ cookie->fq = queue;
+ timer_setup(&cookie->fq_timer, fq_flush_timeout, 0);
+ atomic_set(&cookie->fq_timer_on, 0);
+ /*
+ * Prevent incomplete fq state being observable. Pairs with path from
+ * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
+ */
+ smp_wmb();
+ WRITE_ONCE(cookie->fq_domain, domain);
return 0;
}
@@ -318,7 +358,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
return;
if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) {
- free_iova_flush_queue(&cookie->iovad);
+ iommu_dma_free_fq(cookie);
put_iova_domain(&cookie->iovad);
}
@@ -467,29 +507,6 @@ static bool dev_use_swiotlb(struct device *dev)
return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev);
}
-/* sysfs updates are serialised by the mutex of the group owning @domain */
-int iommu_dma_init_fq(struct iommu_domain *domain)
-{
- struct iommu_dma_cookie *cookie = domain->iova_cookie;
- int ret;
-
- if (cookie->fq_domain)
- return 0;
-
- ret = init_iova_flush_queue(&cookie->iovad, domain);
- if (ret) {
- pr_warn("iova flush queue initialization failed\n");
- return ret;
- }
- /*
- * Prevent incomplete iovad->fq being observable. Pairs with path from
- * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
- */
- smp_wmb();
- WRITE_ONCE(cookie->fq_domain, domain);
- return 0;
-}
-
/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
@@ -628,7 +645,7 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
if (cookie->type == IOMMU_DMA_MSI_COOKIE)
cookie->msi_iova -= size;
else if (gather && gather->queued)
- queue_iova(iovad, iova_pfn(iovad, iova),
+ queue_iova(cookie, iova_pfn(iovad, iova),
size >> iova_shift(iovad),
&gather->freelist);
else
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 6673dfa8e7c5..72ac25831584 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -61,8 +61,6 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
iovad->max32_alloc_size = iovad->dma_32bit_pfn;
- iovad->fq_domain = NULL;
- iovad->fq = NULL;
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 072a09c06e8a..0abd48c5e622 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -12,9 +12,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/rbtree.h>
-#include <linux/atomic.h>
-#include <linux/dma-mapping.h>
-#include <linux/iommu.h>
/* iova structure */
struct iova {
@@ -36,27 +33,6 @@ struct iova_rcache {
struct iova_cpu_rcache __percpu *cpu_rcaches;
};
-/* Number of entries per Flush Queue */
-#define IOVA_FQ_SIZE 256
-
-/* Timeout (in ms) after which entries are flushed from the Flush-Queue */
-#define IOVA_FQ_TIMEOUT 10
-
-/* Flush Queue entry for defered flushing */
-struct iova_fq_entry {
- unsigned long iova_pfn;
- unsigned long pages;
- struct list_head freelist;
- u64 counter; /* Flush counter when this entrie was added */
-};
-
-/* Per-CPU Flush Queue structure */
-struct iova_fq {
- struct iova_fq_entry entries[IOVA_FQ_SIZE];
- unsigned head, tail;
- spinlock_t lock;
-};
-
/* holds all the iova translations for a domain */
struct iova_domain {
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
@@ -67,23 +43,9 @@ struct iova_domain {
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
unsigned long max32_alloc_size; /* Size of last failed allocation */
- struct iova_fq __percpu *fq; /* Flush Queue */
-
- atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that
- have been started */
-
- atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that
- have been finished */
-
struct iova anchor; /* rbtree lookup anchor */
+
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
-
- struct iommu_domain *fq_domain;
-
- struct timer_list fq_timer; /* Timer to regularily empty the
- flush-queues */
- atomic_t fq_timer_on; /* 1 when timer is active, 0
- when not */
struct hlist_node cpuhp_dead;
};
@@ -133,16 +95,12 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
bool size_aligned);
void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
unsigned long size);
-void queue_iova(struct iova_domain *iovad,
- unsigned long pfn, unsigned long pages,
- struct list_head *freelist);
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn, bool flush_rcache);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn);
-int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_domain);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
#else
--
2.28.0.dirty
Powered by blists - more mailing lists