lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210209010722.13839-4-apopple@nvidia.com>
Date:   Tue, 9 Feb 2021 12:07:16 +1100
From:   Alistair Popple <apopple@...dia.com>
To:     <linux-mm@...ck.org>, <nouveau@...ts.freedesktop.org>,
        <bskeggs@...hat.com>, <akpm@...ux-foundation.org>
CC:     <linux-doc@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
        <kvm-ppc@...r.kernel.org>, <dri-devel@...ts.freedesktop.org>,
        <jhubbard@...dia.com>, <rcampbell@...dia.com>,
        <jglisse@...hat.com>, "Alistair Popple" <apopple@...dia.com>
Subject: [PATCH 3/9] mm/migrate: Add a unmap and pin migration mode

Some drivers need to ensure that a device has access to a particular
user page whilst preventing userspace access to that page. For example
this is required to allow a driver to implement atomic access to a page
when the device hardware does not support atomic access to system
memory.

This could be implemented by migrating the data to the device, however
this is not always optimal and may fail in some circumstances. In these
cases it is advantageous to remap the page for device access without
actually migrating the data.

To allow this kind of access introduce an unmap and pin flag called
MIGRATE_PFN_PIN/UNPIN for migration pfns. This will cause the original
page to be remapped to the provided device private page as normal, but
instead of returning or freeing the original CPU page it will pin it and
leave it isolated from the LRU.

This ensures the page remains pinned so that a device may access it
exclusively. Any userspace CPU accesses will fault and trigger the
normal device private migrate_to_ram() callback which must migrate the
mapping back to the original page, after which the device will no longer
have exclusive access to the page.

As the original page does not get freed it is safe to allow the unmap
and pin operation to proceed in cases where there are extra page
references present.

Signed-off-by: Alistair Popple <apopple@...dia.com>
---
 include/linux/migrate.h      |  2 +
 include/linux/migrate_mode.h |  1 +
 mm/migrate.c                 | 74 +++++++++++++++++++++++++-----------
 3 files changed, 54 insertions(+), 23 deletions(-)

diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 4594838a0f7c..449fc61f9a99 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -144,6 +144,8 @@ static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 #define MIGRATE_PFN_MIGRATE	(1UL << 1)
 #define MIGRATE_PFN_LOCKED	(1UL << 2)
 #define MIGRATE_PFN_WRITE	(1UL << 3)
+#define MIGRATE_PFN_PIN		(1UL << 4)
+#define MIGRATE_PFN_UNPIN	(1UL << 4)
 #define MIGRATE_PFN_SHIFT	6
 
 static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h
index 883c99249033..823497eda927 100644
--- a/include/linux/migrate_mode.h
+++ b/include/linux/migrate_mode.h
@@ -17,6 +17,7 @@ enum migrate_mode {
 	MIGRATE_SYNC_LIGHT,
 	MIGRATE_SYNC,
 	MIGRATE_SYNC_NO_COPY,
+	MIGRATE_REFERENCED,
 };
 
 #endif		/* MIGRATE_MODE_H_INCLUDED */
diff --git a/mm/migrate.c b/mm/migrate.c
index fe8bb322e2e3..71edc2679c8e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -410,7 +410,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
 		 * never have extra references except during migration, but it
 		 * is safe to ignore these.
 		 */
-		if (!is_device_private_page(page) &&
+		if (!is_device_private_page(page) && extra_count >= 0 &&
 			page_count(page) != expected_count)
 			return -EAGAIN;
 
@@ -421,6 +421,8 @@ int migrate_page_move_mapping(struct address_space *mapping,
 			__SetPageSwapBacked(newpage);
 
 		return MIGRATEPAGE_SUCCESS;
+	} else if (extra_count < 0) {
+		return -EINVAL;
 	}
 
 	oldzone = page_zone(page);
@@ -704,12 +706,15 @@ int migrate_page(struct address_space *mapping,
 
 	BUG_ON(PageWriteback(page));	/* Writeback must be complete */
 
-	rc = migrate_page_move_mapping(mapping, newpage, page, 0);
+	if (mode == MIGRATE_REFERENCED)
+		rc = migrate_page_move_mapping(mapping, newpage, page, -1);
+	else
+		rc = migrate_page_move_mapping(mapping, newpage, page, 0);
 
 	if (rc != MIGRATEPAGE_SUCCESS)
 		return rc;
 
-	if (mode != MIGRATE_SYNC_NO_COPY)
+	if (mode != MIGRATE_SYNC_NO_COPY && mode != MIGRATE_REFERENCED)
 		migrate_page_copy(newpage, page);
 	else
 		migrate_page_states(newpage, page);
@@ -2327,15 +2332,15 @@ static int migrate_vma_collect_hole(unsigned long start,
 	if (!vma_is_anonymous(walk->vma)) {
 		for (addr = start; addr < end; addr += PAGE_SIZE) {
 			migrate->src[migrate->npages] = 0;
-			migrate->dst[migrate->npages] = 0;
 			migrate->npages++;
 		}
 		return 0;
 	}
 
 	for (addr = start; addr < end; addr += PAGE_SIZE) {
-		migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
-		migrate->dst[migrate->npages] = 0;
+		if (vma_is_anonymous(walk->vma) &&
+		    !(migrate->src[migrate->npages] & MIGRATE_PFN_PIN))
+			migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
 		migrate->npages++;
 		migrate->cpages++;
 	}
@@ -2425,7 +2430,8 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
 		pte = *ptep;
 
 		if (pte_none(pte)) {
-			if (vma_is_anonymous(vma)) {
+			if (vma_is_anonymous(vma) &&
+				!(migrate->src[migrate->npages] & MIGRATE_PFN_PIN)) {
 				mpfn = MIGRATE_PFN_MIGRATE;
 				migrate->cpages++;
 			}
@@ -2525,8 +2531,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
 		}
 
 next:
-		migrate->dst[migrate->npages] = 0;
-		migrate->src[migrate->npages++] = mpfn;
+		migrate->src[migrate->npages++] |= mpfn;
 	}
 	arch_leave_lazy_mmu_mode();
 	pte_unmap_unlock(ptep - 1, ptl);
@@ -2695,7 +2700,13 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
 			put_page(page);
 		}
 
-		if (!migrate_vma_check_page(page)) {
+		/*
+		 * If the page is being unmapped and pinned it isn't actually
+		 * going to migrate, so it's safe to continue the operation with
+		 * an elevated refcount.
+		 */
+		if (!migrate_vma_check_page(page) &&
+			!(migrate->src[i] & MIGRATE_PFN_PIN)) {
 			if (remap) {
 				migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
 				migrate->cpages--;
@@ -2757,25 +2768,34 @@ static void migrate_vma_unmap(struct migrate_vma *migrate)
 		if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
 			continue;
 
-		if (page_mapped(page)) {
+		if (page_mapped(page))
 			try_to_unmap(page, flags);
-			if (page_mapped(page))
-				goto restore;
+
+		if (page_mapped(page))
+			migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
+
+		if (!migrate_vma_check_page(page) &&
+		    !(migrate->src[i] & MIGRATE_PFN_PIN))
+			migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
+
+		if (migrate->src[i] & MIGRATE_PFN_PIN) {
+			if (page_maybe_dma_pinned(page))
+				migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
+			else
+				page_ref_add(page, GUP_PIN_COUNTING_BIAS);
 		}
 
-		if (migrate_vma_check_page(page))
+		if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) {
+			migrate->cpages--;
+			restore++;
 			continue;
-
-restore:
-		migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
-		migrate->cpages--;
-		restore++;
+		}
 	}
 
 	for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
 
-		if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
+		if (!page || (migrate->src[i] &	MIGRATE_PFN_MIGRATE))
 			continue;
 
 		remove_migration_ptes(page, page, false);
@@ -3092,7 +3112,11 @@ void migrate_vma_pages(struct migrate_vma *migrate)
 			}
 		}
 
-		r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
+		if (migrate->src[i] & MIGRATE_PFN_PIN)
+			r = migrate_page(mapping, newpage, page, MIGRATE_REFERENCED);
+		else
+			r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
+
 		if (r != MIGRATEPAGE_SUCCESS)
 			migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
 	}
@@ -3148,15 +3172,19 @@ void migrate_vma_finalize(struct migrate_vma *migrate)
 
 		if (is_zone_device_page(page))
 			put_page(page);
-		else
+		else if (!(migrate->src[i] & MIGRATE_PFN_PIN))
 			putback_lru_page(page);
 
 		if (newpage != page) {
 			unlock_page(newpage);
 			if (is_zone_device_page(newpage))
 				put_page(newpage);
-			else
+			else {
+				if (migrate->dst[i] & MIGRATE_PFN_UNPIN)
+					page_ref_sub(newpage, GUP_PIN_COUNTING_BIAS);
+
 				putback_lru_page(newpage);
+			}
 		}
 	}
 }
-- 
2.20.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ