lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date:   Thu, 18 Aug 2022 17:23:23 +1000
From:   Alistair Popple <apopple@...dia.com>
To:     linux-mm@...ck.org, akpm@...ux-foundation.org
Cc:     linux-kernel@...r.kernel.org,
        "Sierra Guiza, Alejandro (Alex)" <alex.sierra@....com>,
        Dan Williams <dan.j.williams@...el.com>,
        Felix Kuehling <Felix.Kuehling@....com>,
        Jason Gunthorpe <jgg@...dia.com>,
        John Hubbard <jhubbard@...dia.com>,
        Logan Gunthorpe <logang@...tatee.com>,
        Miaohe Lin <linmiaohe@...wei.com>,
        Muchun Song <songmuchun@...edance.com>,
        Ralph Campbell <rcampbell@...dia.com>,
        David Hildenbrand <david@...hat.com>,
        Matthew Wilcox <willy@...radead.org>,
        Alistair Popple <apopple@...dia.com>
Subject: Re: [PATCH v5 2/2] mm/gup.c: Refactor
 check_and_migrate_movable_pages()


Sorry Jason, I didn't see your previous reply before sending this. Based
on that I assume your preference is to have
migrate_longterm_pinnable_pages() return -EAGAIN directly. Happy to
respin with the below changes if that's the case.

Alistair Popple <apopple@...dia.com> writes:

[...]

> +/*
> + * Unpins all pages and migrates device coherent pages and movable_page_list.
> + * Returns zero if all pages were successfully migrated or -errno for failure

Returns -EAGAIN if all pages were successfully migrated or -errno for failure

> + * (or partial success).
> + */
> +static int migrate_longterm_unpinnable_pages(
> +					struct list_head *movable_page_list,
> +					unsigned long nr_pages,
> +					struct page **pages)
> +{
> +	int ret;
> +	unsigned long i;
>
> -unpin_pages:
> -	/*
> -	 * pages[i] might be NULL if any device coherent pages were found.
> -	 */
>  	for (i = 0; i < nr_pages; i++) {
> -		if (!pages[i])
> +		struct folio *folio = page_folio(pages[i]);
> +
> +		if (folio_is_device_coherent(folio)) {
> +			/*
> +			 * Migration will fail if the page is pinned, so convert
> +			 * the pin on the source page to a normal reference.
> +			 */
> +			pages[i] = NULL;
> +			folio_get(folio);
> +			gup_put_folio(folio, 1, FOLL_PIN);
> +
> +			if (migrate_device_coherent_page(&folio->page)) {
> +				ret = -EBUSY;
> +				goto err;
> +			}
> +
>  			continue;
> +		}
>
> +		/*
> +		 * We can't migrate pages with unexpected references, so drop
> +		 * the reference obtained by __get_user_pages_locked().
> +		 * Migrating pages have been added to movable_page_list after
> +		 * calling folio_isolate_lru() which takes a reference so the
> +		 * page won't be freed if it's migrating.
> +		 */
>  		unpin_user_page(pages[i]);
> +		pages[i] = NULL;
>  	}
>
> -	if (!list_empty(&movable_page_list)) {
> +	if (!list_empty(movable_page_list)) {
>  		struct migration_target_control mtc = {
>  			.nid = NUMA_NO_NODE,
>  			.gfp_mask = GFP_USER | __GFP_NOWARN,
>  		};
>
> -		ret = migrate_pages(&movable_page_list, alloc_migration_target,
> -				    NULL, (unsigned long)&mtc, MIGRATE_SYNC,
> -				    MR_LONGTERM_PIN, NULL);
> -		if (ret > 0) /* number of pages not migrated */
> +		if (migrate_pages(movable_page_list, alloc_migration_target,
> +				  NULL, (unsigned long)&mtc, MIGRATE_SYNC,
> +				  MR_LONGTERM_PIN, NULL)) {
>  			ret = -ENOMEM;
> +			goto err;
> +		}
>  	}
>
> -	if (ret && !list_empty(&movable_page_list))
> -		putback_movable_pages(&movable_page_list);
> +	putback_movable_pages(movable_page_list);
> +
> +	return 0;

return -EAGAIN;

> +
> +err:
> +	for (i = 0; i < nr_pages; i++)
> +		if (pages[i])
> +			unpin_user_page(pages[i]);
> +	putback_movable_pages(movable_page_list);
> +
>  	return ret;
>  }
> +
> +/*
> + * Check whether all pages are *allowed* to be pinned. Rather confusingly, all
> + * pages in the range are required to be pinned via FOLL_PIN, before calling
> + * this routine.
> + *
> + * If any pages in the range are not allowed to be pinned, then this routine
> + * will migrate those pages away, unpin all the pages in the range and return
> + * -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then
> + * call this routine again.
> + *
> + * If an error other than -EAGAIN occurs, this indicates a migration failure.
> + * The caller should give up, and propagate the error back up the call stack.
> + *
> + * If everything is OK and all pages in the range are allowed to be pinned, then
> + * this routine leaves all pages pinned and returns zero for success.
> + */
> +static long check_and_migrate_movable_pages(unsigned long nr_pages,
> +					    struct page **pages)
> +{
> +	int ret;
> +	unsigned long collected;
> +	LIST_HEAD(movable_page_list);
> +
> +	collected = collect_longterm_unpinnable_pages(&movable_page_list,
> +						nr_pages, pages);
> +	if (!collected)
> +		return 0;
> +
> +	ret = migrate_longterm_unpinnable_pages(&movable_page_list, nr_pages,
> +						pages);

	return migrate_longterm_unpinnable_pages(&movable_page_list, nr_pages,
						pages);

 - Alistair

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ