[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200706222347.32290-6-rcampbell@nvidia.com>
Date: Mon, 6 Jul 2020 15:23:47 -0700
From: Ralph Campbell <rcampbell@...dia.com>
To: <linux-rdma@...r.kernel.org>, <linux-mm@...ck.org>,
<nouveau@...ts.freedesktop.org>, <kvm-ppc@...r.kernel.org>,
<linux-kselftest@...r.kernel.org>, <linux-kernel@...r.kernel.org>
CC: Jerome Glisse <jglisse@...hat.com>,
John Hubbard <jhubbard@...dia.com>,
Christoph Hellwig <hch@....de>,
Jason Gunthorpe <jgg@...lanox.com>,
"Andrew Morton" <akpm@...ux-foundation.org>,
Shuah Khan <shuah@...nel.org>,
"Ben Skeggs" <bskeggs@...hat.com>,
Bharata B Rao <bharata@...ux.ibm.com>,
"Ralph Campbell" <rcampbell@...dia.com>
Subject: [PATCH 5/5] mm/hmm/test: use the new migration invalidation
Use the new MMU_NOTIFY_MIGRATE event to skip MMU invalidations of device
private memory and handle the invalidation in the driver as part of
migrating device private memory.
Signed-off-by: Ralph Campbell <rcampbell@...dia.com>
---
lib/test_hmm.c | 31 ++++++++++++++++++-------------
1 file changed, 18 insertions(+), 13 deletions(-)
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 1bd60cfb5a25..a170e5f7fb2e 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -214,6 +214,14 @@ static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
{
struct dmirror *dmirror = container_of(mni, struct dmirror, notifier);
+ /*
+ * Ignore invalidation callbacks for device private pages since
+ * the invalidation is handled as part of the migration process.
+ */
+ if (range->event == MMU_NOTIFY_MIGRATE &&
+ range->data == dmirror->mdevice)
+ return true;
+
if (mmu_notifier_range_blockable(range))
mutex_lock(&dmirror->mutex);
else if (!mutex_trylock(&dmirror->mutex))
@@ -702,7 +710,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
args.dst = dst_pfns;
args.start = addr;
args.end = next;
- args.src_owner = NULL;
+ args.src_owner = dmirror->mdevice;
args.dir = MIGRATE_VMA_FROM_SYSTEM;
ret = migrate_vma_setup(&args);
if (ret)
@@ -992,7 +1000,7 @@ static void dmirror_devmem_free(struct page *page)
}
static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
- struct dmirror_device *mdevice)
+ struct dmirror *dmirror)
{
const unsigned long *src = args->src;
unsigned long *dst = args->dst;
@@ -1014,6 +1022,7 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
continue;
lock_page(dpage);
+ xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
copy_highpage(dpage, spage);
*dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
if (*src & MIGRATE_PFN_WRITE)
@@ -1022,15 +1031,6 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
return 0;
}
-static void dmirror_devmem_fault_finalize_and_map(struct migrate_vma *args,
- struct dmirror *dmirror)
-{
- /* Invalidate the device's page table mapping. */
- mutex_lock(&dmirror->mutex);
- dmirror_do_update(dmirror, args->start, args->end);
- mutex_unlock(&dmirror->mutex);
-}
-
static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
{
struct migrate_vma args;
@@ -1060,11 +1060,16 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
if (migrate_vma_setup(&args))
return VM_FAULT_SIGBUS;
- ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror->mdevice);
+ ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror);
if (ret)
return ret;
migrate_vma_pages(&args);
- dmirror_devmem_fault_finalize_and_map(&args, dmirror);
+ /*
+ * No device finalize step is needed since
+ * dmirror_devmem_fault_alloc_and_copy() will have already
+ * invalidated the device page table. We could reinstate device MMU
+ * entries for pages that didn't migrate but that should be rare.
+ */
migrate_vma_finalize(&args);
return 0;
}
--
2.20.1
Powered by blists - more mailing lists