lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening PHC | |
Open Source and information security mailing list archives
| ||
|
Date: Mon, 6 Dec 2021 12:52:43 -0600 From: Alex Sierra <alex.sierra@....com> To: <akpm@...ux-foundation.org>, <Felix.Kuehling@....com>, <linux-mm@...ck.org>, <rcampbell@...dia.com>, <linux-ext4@...r.kernel.org>, <linux-xfs@...r.kernel.org> CC: <amd-gfx@...ts.freedesktop.org>, <dri-devel@...ts.freedesktop.org>, <hch@....de>, <jgg@...dia.com>, <jglisse@...hat.com>, <apopple@...dia.com>, <willy@...radead.org> Subject: [PATCH v2 03/11] mm/gup: migrate PIN_LONGTERM dev coherent pages to system Avoid long term pinning for Coherent device type pages. This could interfere with their own device memory manager. If caller tries to get user device coherent pages with PIN_LONGTERM flag set, those pages will be migrated back to system memory. Signed-off-by: Alex Sierra <alex.sierra@....com> --- mm/gup.c | 32 ++++++++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/mm/gup.c b/mm/gup.c index 886d6148d3d0..1572eacf07f4 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1689,17 +1689,37 @@ struct page *get_dump_page(unsigned long addr) #endif /* CONFIG_ELF_CORE */ #ifdef CONFIG_MIGRATION +static int migrate_device_page(unsigned long address, + struct page *page) +{ + struct vm_area_struct *vma = find_vma(current->mm, address); + struct vm_fault vmf = { + .vma = vma, + .address = address & PAGE_MASK, + .flags = FAULT_FLAG_USER, + .pgoff = linear_page_index(vma, address), + .gfp_mask = GFP_KERNEL, + .page = page, + }; + if (page->pgmap && page->pgmap->ops->migrate_to_ram) + return page->pgmap->ops->migrate_to_ram(&vmf); + + return -EBUSY; +} + /* * Check whether all pages are pinnable, if so return number of pages. If some * pages are not pinnable, migrate them, and unpin all pages. Return zero if * pages were migrated, or if some pages were not successfully isolated. * Return negative error if migration fails. */ -static long check_and_migrate_movable_pages(unsigned long nr_pages, +static long check_and_migrate_movable_pages(unsigned long start, + unsigned long nr_pages, struct page **pages, unsigned int gup_flags) { unsigned long i; + unsigned long page_index; unsigned long isolation_error_count = 0; bool drain_allow = true; LIST_HEAD(movable_page_list); @@ -1720,6 +1740,10 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages, * If we get a movable page, since we are going to be pinning * these entries, try to move them out if possible. */ + if (is_device_page(head)) { + page_index = i; + goto unpin_pages; + } if (!is_pinnable_page(head)) { if (PageHuge(head)) { if (!isolate_huge_page(head, &movable_page_list)) @@ -1750,12 +1774,16 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages, if (list_empty(&movable_page_list) && !isolation_error_count) return nr_pages; +unpin_pages: if (gup_flags & FOLL_PIN) { unpin_user_pages(pages, nr_pages); } else { for (i = 0; i < nr_pages; i++) put_page(pages[i]); } + if (is_device_page(head)) + return migrate_device_page(start + page_index * PAGE_SIZE, head); + if (!list_empty(&movable_page_list)) { ret = migrate_pages(&movable_page_list, alloc_migration_target, NULL, (unsigned long)&mtc, MIGRATE_SYNC, @@ -1798,7 +1826,7 @@ static long __gup_longterm_locked(struct mm_struct *mm, NULL, gup_flags); if (rc <= 0) break; - rc = check_and_migrate_movable_pages(rc, pages, gup_flags); + rc = check_and_migrate_movable_pages(start, rc, pages, gup_flags); } while (!rc); memalloc_pin_restore(flags); -- 2.32.0
Powered by blists - more mailing lists