[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1432239792-5002-5-git-send-email-jglisse@redhat.com>
Date: Thu, 21 May 2015 16:23:00 -0400
From: jglisse@...hat.com
To: akpm@...ux-foundation.org
Cc: <linux-kernel@...r.kernel.org>, linux-mm@...ck.org,
Linus Torvalds <torvalds@...ux-foundation.org>,
<joro@...tes.org>, Mel Gorman <mgorman@...e.de>,
"H. Peter Anvin" <hpa@...or.com>,
Peter Zijlstra <peterz@...radead.org>,
Andrea Arcangeli <aarcange@...hat.com>,
Johannes Weiner <jweiner@...hat.com>,
Larry Woodman <lwoodman@...hat.com>,
Rik van Riel <riel@...hat.com>,
Dave Airlie <airlied@...hat.com>,
Brendan Conoboy <blc@...hat.com>,
Joe Donohue <jdonohue@...hat.com>,
Duncan Poole <dpoole@...dia.com>,
Sherry Cheung <SCheung@...dia.com>,
Subhash Gutti <sgutti@...dia.com>,
John Hubbard <jhubbard@...dia.com>,
Mark Hairgrove <mhairgrove@...dia.com>,
Lucien Dunning <ldunning@...dia.com>,
Cameron Buschardt <cabuschardt@...dia.com>,
Arvind Gopalakrishnan <arvindg@...dia.com>,
Haggai Eran <haggaie@...lanox.com>,
Shachar Raindel <raindel@...lanox.com>,
Liran Liss <liranl@...lanox.com>,
Roland Dreier <roland@...estorage.com>,
Ben Sander <ben.sander@....com>,
Greg Stoner <Greg.Stoner@....com>,
John Bridgman <John.Bridgman@....com>,
Michael Mantor <Michael.Mantor@....com>,
Paul Blinzer <Paul.Blinzer@....com>,
Laurent Morichetti <Laurent.Morichetti@....com>,
Alexander Deucher <Alexander.Deucher@....com>,
Oded Gabbay <Oded.Gabbay@....com>,
Jérôme Glisse <jglisse@...hat.com>
Subject: [PATCH 24/36] HMM: split DMA mapping function in two.
From: Jérôme Glisse <jglisse@...hat.com>
To be able to reuse the DMA mapping logic, split it in two functions.
Signed-off-by: Jérôme Glisse <jglisse@...hat.com>
---
mm/hmm.c | 125 +++++++++++++++++++++++++++++++++------------------------------
1 file changed, 66 insertions(+), 59 deletions(-)
diff --git a/mm/hmm.c b/mm/hmm.c
index 9dbb1e43..b8807b2 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -853,82 +853,89 @@ static int hmm_mirror_fault_pmd(pmd_t *pmdp,
return ret;
}
+static int hmm_mirror_dma_map_range(struct hmm_mirror *mirror,
+ dma_addr_t *hmm_pte,
+ spinlock_t *lock,
+ unsigned long npages)
+{
+ struct device *dev = mirror->device->dev;
+ unsigned long i;
+ int ret = 0;
+
+ for (i = 0; i < npages; i++) {
+ dma_addr_t dma_addr, pte;
+ struct page *page;
+
+again:
+ pte = ACCESS_ONCE(hmm_pte[i]);
+ if (!hmm_pte_test_valid_pfn(&pte) || !hmm_pte_test_select(&pte))
+ continue;
+
+ page = pfn_to_page(hmm_pte_pfn(pte));
+ VM_BUG_ON(!page);
+ dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, dma_addr)) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ /*
+ * Make sure we transfer the dirty bit. Note that there
+ * might still be a window for another thread to set
+ * the dirty bit before we check for pte equality. This
+ * will just lead to a useless retry so it is not the
+ * end of the world here.
+ */
+ if (lock)
+ spin_lock(lock);
+ if (hmm_pte_test_dirty(&hmm_pte[i]))
+ hmm_pte_set_dirty(&pte);
+ if (ACCESS_ONCE(hmm_pte[i]) != pte) {
+ if (lock)
+ spin_unlock(lock);
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (hmm_pte_test_valid_pfn(&hmm_pte[i]))
+ goto again;
+ continue;
+ }
+ hmm_pte[i] = hmm_pte_from_dma_addr(dma_addr);
+ if (hmm_pte_test_write(&pte))
+ hmm_pte_set_write(&hmm_pte[i]);
+ if (hmm_pte_test_dirty(&pte))
+ hmm_pte_set_dirty(&hmm_pte[i]);
+ if (lock)
+ spin_unlock(lock);
+ }
+
+ return ret;
+}
static int hmm_mirror_dma_map(struct hmm_mirror *mirror,
struct hmm_pt_iter *iter,
unsigned long start,
unsigned long end)
{
- struct device *dev = mirror->device->dev;
unsigned long addr;
int ret;
for (ret = 0, addr = start; !ret && addr < end;) {
- unsigned long i = 0, hmm_end, next;
+ unsigned long next, npages;
dma_addr_t *hmm_pte;
+ spinlock_t *lock;
hmm_pte = hmm_pt_iter_fault(iter, &mirror->pt, addr);
if (!hmm_pte)
return -ENOENT;
- hmm_end = hmm_pt_level_next(&mirror->pt, addr, end,
- mirror->pt.llevel - 1);
- do {
- dma_addr_t dma_addr, pte;
- struct page *page;
-
- next = hmm_pt_level_next(&mirror->pt, addr, hmm_end,
- mirror->pt.llevel);
-
-again:
- pte = ACCESS_ONCE(hmm_pte[i]);
- if (!hmm_pte_test_valid_pfn(&pte) ||
- !hmm_pte_test_select(&pte)) {
- if (!hmm_pte_test_valid_dma(&pte)) {
- ret = -ENOENT;
- break;
- }
- continue;
- }
-
- page = pfn_to_page(hmm_pte_pfn(pte));
- VM_BUG_ON(!page);
- dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, dma_addr)) {
- ret = -ENOMEM;
- break;
- }
+ next = hmm_pt_level_next(&mirror->pt, addr, end,
+ mirror->pt.llevel - 1);
- hmm_pt_iter_directory_lock(iter, &mirror->pt);
- /*
- * Make sure we transfer the dirty bit. Note that there
- * might still be a window for another thread to set
- * the dirty bit before we check for pte equality. This
- * will just lead to a useless retry so it is not the
- * end of the world here.
- */
- if (hmm_pte_test_dirty(&hmm_pte[i]))
- hmm_pte_set_dirty(&pte);
- if (ACCESS_ONCE(hmm_pte[i]) != pte) {
- hmm_pt_iter_directory_unlock(iter,&mirror->pt);
- dma_unmap_page(dev, dma_addr, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- if (hmm_pte_test_valid_pfn(&pte))
- goto again;
- if (!hmm_pte_test_valid_dma(&pte)) {
- ret = -ENOENT;
- break;
- }
- } else {
- hmm_pte[i] = hmm_pte_from_dma_addr(dma_addr);
- if (hmm_pte_test_write(&pte))
- hmm_pte_set_write(&hmm_pte[i]);
- if (hmm_pte_test_dirty(&pte))
- hmm_pte_set_dirty(&hmm_pte[i]);
- hmm_pt_iter_directory_unlock(iter, &mirror->pt);
- }
- } while (addr = next, i++, addr != hmm_end && !ret);
+ npages = (next - addr) >> PAGE_SHIFT;
+ lock = hmm_pt_iter_directory_lock_ptr(iter, &mirror->pt);
+ ret = hmm_mirror_dma_map_range(mirror, hmm_pte, lock, npages);
+ addr = next;
}
return ret;
--
1.9.3
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists