lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1467978311-28322-15-git-send-email-joro@8bytes.org>
Date:	Fri,  8 Jul 2016 13:45:05 +0200
From:	Joerg Roedel <joro@...tes.org>
To:	iommu@...ts.linux-foundation.org
Cc:	linux-kernel@...r.kernel.org,
	Suravee Suthikulpanit <Suravee.Suthikulpanit@....com>,
	Vincent.Wan@....com, Joerg Roedel <jroedel@...e.de>
Subject: [PATCH 14/20] iommu/amd: Implement timeout to flush unmap queues

From: Joerg Roedel <jroedel@...e.de>

In case the queue doesn't fill up, we flush the TLB at least
10ms after the unmap happened to make sure that the TLB is
cleaned up.

Signed-off-by: Joerg Roedel <jroedel@...e.de>
---
 drivers/iommu/amd_iommu.c | 28 ++++++++++++++++++++++++++++
 1 file changed, 28 insertions(+)

diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a3e09cd..1bb003e 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -104,6 +104,9 @@ struct flush_queue {
 
 DEFINE_PER_CPU(struct flush_queue, flush_queue);
 
+static atomic_t queue_timer_on;
+static struct timer_list queue_timer;
+
 /*
  * Domain for untranslated devices - only allocated
  * if iommu=pt passed on kernel cmd line.
@@ -2159,6 +2162,24 @@ static void __queue_flush(struct flush_queue *queue)
 	queue->next = 0;
 }
 
+void queue_flush_timeout(unsigned long unsused)
+{
+	int cpu;
+
+	atomic_set(&queue_timer_on, 0);
+
+	for_each_possible_cpu(cpu) {
+		struct flush_queue *queue;
+		unsigned long flags;
+
+		queue = per_cpu_ptr(&flush_queue, cpu);
+		spin_lock_irqsave(&queue->lock, flags);
+		if (queue->next > 0)
+			__queue_flush(queue);
+		spin_unlock_irqrestore(&queue->lock, flags);
+	}
+}
+
 static void queue_add(struct dma_ops_domain *dma_dom,
 		      unsigned long address, unsigned long pages)
 {
@@ -2184,6 +2205,10 @@ static void queue_add(struct dma_ops_domain *dma_dom,
 	entry->dma_dom  = dma_dom;
 
 	spin_unlock_irqrestore(&queue->lock, flags);
+
+	if (atomic_cmpxchg(&queue_timer_on, 0, 1) == 0)
+		mod_timer(&queue_timer, jiffies + msecs_to_jiffies(10));
+
 	put_cpu_ptr(&flush_queue);
 }
 
@@ -2639,6 +2664,9 @@ out_put_iova:
 
 int __init amd_iommu_init_dma_ops(void)
 {
+	setup_timer(&queue_timer, queue_flush_timeout, 0);
+	atomic_set(&queue_timer_on, 0);
+
 	swiotlb        = iommu_pass_through ? 1 : 0;
 	iommu_detected = 1;
 
-- 
1.9.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ