[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1457469802-11850-26-git-send-email-jglisse@redhat.com>
Date: Tue, 8 Mar 2016 15:43:18 -0500
From: Jérôme Glisse <jglisse@...hat.com>
To: akpm@...ux-foundation.org, <linux-kernel@...r.kernel.org>,
linux-mm@...ck.org
Cc: Linus Torvalds <torvalds@...ux-foundation.org>, <joro@...tes.org>,
Mel Gorman <mgorman@...e.de>, "H. Peter Anvin" <hpa@...or.com>,
Peter Zijlstra <peterz@...radead.org>,
Andrea Arcangeli <aarcange@...hat.com>,
Johannes Weiner <jweiner@...hat.com>,
Larry Woodman <lwoodman@...hat.com>,
Rik van Riel <riel@...hat.com>,
Dave Airlie <airlied@...hat.com>,
Brendan Conoboy <blc@...hat.com>,
Joe Donohue <jdonohue@...hat.com>,
Christophe Harle <charle@...dia.com>,
Duncan Poole <dpoole@...dia.com>,
Sherry Cheung <SCheung@...dia.com>,
Subhash Gutti <sgutti@...dia.com>,
John Hubbard <jhubbard@...dia.com>,
Mark Hairgrove <mhairgrove@...dia.com>,
Lucien Dunning <ldunning@...dia.com>,
Cameron Buschardt <cabuschardt@...dia.com>,
Arvind Gopalakrishnan <arvindg@...dia.com>,
Haggai Eran <haggaie@...lanox.com>,
Shachar Raindel <raindel@...lanox.com>,
Liran Liss <liranl@...lanox.com>,
Roland Dreier <roland@...estorage.com>,
Ben Sander <ben.sander@....com>,
Greg Stoner <Greg.Stoner@....com>,
John Bridgman <John.Bridgman@....com>,
Michael Mantor <Michael.Mantor@....com>,
Paul Blinzer <Paul.Blinzer@....com>,
Leonid Shamis <Leonid.Shamis@....com>,
Laurent Morichetti <Laurent.Morichetti@....com>,
Alexander Deucher <Alexander.Deucher@....com>,
Jérôme Glisse <jglisse@...hat.com>
Subject: [PATCH v12 25/29] HMM: split DMA mapping function in two.
To be able to reuse the DMA mapping logic, split it in two functions.
Signed-off-by: Jérôme Glisse <jglisse@...hat.com>
---
mm/hmm.c | 120 ++++++++++++++++++++++++++++++++++-----------------------------
1 file changed, 65 insertions(+), 55 deletions(-)
diff --git a/mm/hmm.c b/mm/hmm.c
index d26abe4..07f1ab6 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -910,76 +910,86 @@ static int hmm_mirror_fault_hugetlb_entry(pte_t *ptep,
return 0;
}
+static int hmm_mirror_dma_map_range(struct hmm_mirror *mirror,
+ dma_addr_t *hmm_pte,
+ spinlock_t *lock,
+ unsigned long npages)
+{
+ struct device *dev = mirror->device->dev;
+ unsigned long i;
+ int ret = 0;
+
+ for (i = 0; i < npages; i++) {
+ dma_addr_t dma_addr, pte;
+ struct page *page;
+
+again:
+ pte = ACCESS_ONCE(hmm_pte[i]);
+ if (!hmm_pte_test_valid_pfn(&pte) || !hmm_pte_test_select(&pte))
+ continue;
+
+ page = pfn_to_page(hmm_pte_pfn(pte));
+ VM_BUG_ON(!page);
+ dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, dma_addr)) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ /*
+ * Make sure we transfer the dirty bit. Note that there
+ * might still be a window for another thread to set
+ * the dirty bit before we check for pte equality. This
+ * will just lead to a useless retry so it is not the
+ * end of the world here.
+ */
+ if (lock)
+ spin_lock(lock);
+ if (hmm_pte_test_dirty(&hmm_pte[i]))
+ hmm_pte_set_dirty(&pte);
+ if (ACCESS_ONCE(hmm_pte[i]) != pte) {
+ if (lock)
+ spin_unlock(lock);
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (hmm_pte_test_valid_pfn(&hmm_pte[i]))
+ goto again;
+ continue;
+ }
+ hmm_pte[i] = hmm_pte_from_dma_addr(dma_addr);
+ if (hmm_pte_test_write(&pte))
+ hmm_pte_set_write(&hmm_pte[i]);
+ if (hmm_pte_test_dirty(&pte))
+ hmm_pte_set_dirty(&hmm_pte[i]);
+ if (lock)
+ spin_unlock(lock);
+ }
+
+ return ret;
+}
+
static int hmm_mirror_dma_map(struct hmm_mirror *mirror,
struct hmm_pt_iter *iter,
unsigned long start,
unsigned long end)
{
- struct device *dev = mirror->device->dev;
unsigned long addr;
int ret;
for (ret = 0, addr = start; !ret && addr < end;) {
- unsigned long i = 0, next = end;
+ unsigned long next = end, npages;
dma_addr_t *hmm_pte;
+ spinlock_t *lock;
hmm_pte = hmm_pt_iter_populate(iter, addr, &next);
if (!hmm_pte)
return -ENOENT;
- do {
- dma_addr_t dma_addr, pte;
- struct page *page;
-
-again:
- pte = ACCESS_ONCE(hmm_pte[i]);
- if (!hmm_pte_test_valid_pfn(&pte) ||
- !hmm_pte_test_select(&pte)) {
- if (!hmm_pte_test_valid_dma(&pte)) {
- ret = -ENOENT;
- break;
- }
- continue;
- }
-
- page = pfn_to_page(hmm_pte_pfn(pte));
- VM_BUG_ON(!page);
- dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, dma_addr)) {
- ret = -ENOMEM;
- break;
- }
-
- hmm_pt_iter_directory_lock(iter);
- /*
- * Make sure we transfer the dirty bit. Note that there
- * might still be a window for another thread to set
- * the dirty bit before we check for pte equality. This
- * will just lead to a useless retry so it is not the
- * end of the world here.
- */
- if (hmm_pte_test_dirty(&hmm_pte[i]))
- hmm_pte_set_dirty(&pte);
- if (ACCESS_ONCE(hmm_pte[i]) != pte) {
- hmm_pt_iter_directory_unlock(iter);
- dma_unmap_page(dev, dma_addr, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- if (hmm_pte_test_valid_pfn(&pte))
- goto again;
- if (!hmm_pte_test_valid_dma(&pte)) {
- ret = -ENOENT;
- break;
- }
- } else {
- hmm_pte[i] = hmm_pte_from_dma_addr(dma_addr);
- if (hmm_pte_test_write(&pte))
- hmm_pte_set_write(&hmm_pte[i]);
- if (hmm_pte_test_dirty(&pte))
- hmm_pte_set_dirty(&hmm_pte[i]);
- hmm_pt_iter_directory_unlock(iter);
- }
- } while (addr += PAGE_SIZE, i++, addr != next && !ret);
+ npages = (next - addr) >> PAGE_SHIFT;
+ lock = hmm_pt_iter_directory_lock_ptr(iter);
+ ret = hmm_mirror_dma_map_range(mirror, hmm_pte, lock, npages);
+ addr = next;
}
return ret;
--
2.4.3
Powered by blists - more mailing lists