[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d4d0ec2b-114f-33c0-4d13-bba425fde4bb@suse.cz>
Date: Fri, 3 Jun 2016 12:10:10 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: js1304@...il.com, Andrew Morton <akpm@...ux-foundation.org>
Cc: mgorman@...hsingularity.net, Minchan Kim <minchan@...nel.org>,
Alexander Potapenko <glider@...gle.com>,
Hugh Dickins <hughd@...gle.com>,
Michal Hocko <mhocko@...nel.org>, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, Joonsoo Kim <iamjoonsoo.kim@....com>
Subject: Re: [PATCH v2 1/7] mm/compaction: split freepages without holding the
zone lock
On 05/26/2016 04:37 AM, js1304@...il.com wrote:
> From: Joonsoo Kim <iamjoonsoo.kim@....com>
>
> We don't need to split freepages with holding the zone lock. It will cause
> more contention on zone lock so not desirable.
>
> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@....com>
So it wasn't possible to at least move this code from compaction.c to
page_alloc.c? Or better, reuse prep_new_page() with some forged gfp/alloc_flags?
As we discussed in v1...
> ---
> include/linux/mm.h | 1 -
> mm/compaction.c | 42 ++++++++++++++++++++++++++++++------------
> mm/page_alloc.c | 27 ---------------------------
> 3 files changed, 30 insertions(+), 40 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index a00ec81..1a1782c 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -537,7 +537,6 @@ void __put_page(struct page *page);
> void put_pages_list(struct list_head *pages);
>
> void split_page(struct page *page, unsigned int order);
> -int split_free_page(struct page *page);
>
> /*
> * Compound pages have a destructor function. Provide a
> diff --git a/mm/compaction.c b/mm/compaction.c
> index 1427366..8e013eb 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -65,13 +65,31 @@ static unsigned long release_freepages(struct list_head *freelist)
>
> static void map_pages(struct list_head *list)
> {
> - struct page *page;
> + unsigned int i, order, nr_pages;
> + struct page *page, *next;
> + LIST_HEAD(tmp_list);
> +
> + list_for_each_entry_safe(page, next, list, lru) {
> + list_del(&page->lru);
> +
> + order = page_private(page);
> + nr_pages = 1 << order;
> + set_page_private(page, 0);
> + set_page_refcounted(page);
> +
> + arch_alloc_page(page, order);
> + kernel_map_pages(page, nr_pages, 1);
> + kasan_alloc_pages(page, order);
> + if (order)
> + split_page(page, order);
>
> - list_for_each_entry(page, list, lru) {
> - arch_alloc_page(page, 0);
> - kernel_map_pages(page, 1, 1);
> - kasan_alloc_pages(page, 0);
> + for (i = 0; i < nr_pages; i++) {
> + list_add(&page->lru, &tmp_list);
> + page++;
> + }
> }
> +
> + list_splice(&tmp_list, list);
> }
>
> static inline bool migrate_async_suitable(int migratetype)
> @@ -368,12 +386,13 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
> unsigned long flags = 0;
> bool locked = false;
> unsigned long blockpfn = *start_pfn;
> + unsigned int order;
>
> cursor = pfn_to_page(blockpfn);
>
> /* Isolate free pages. */
> for (; blockpfn < end_pfn; blockpfn++, cursor++) {
> - int isolated, i;
> + int isolated;
> struct page *page = cursor;
>
> /*
> @@ -439,13 +458,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
> goto isolate_fail;
> }
>
> - /* Found a free page, break it into order-0 pages */
> - isolated = split_free_page(page);
> + /* Found a free page, will break it into order-0 pages */
> + order = page_order(page);
> + isolated = __isolate_free_page(page, page_order(page));
> + set_page_private(page, order);
> total_isolated += isolated;
> - for (i = 0; i < isolated; i++) {
> - list_add(&page->lru, freelist);
> - page++;
> - }
> + list_add_tail(&page->lru, freelist);
>
> /* If a page was split, advance to the end of it */
> if (isolated) {
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index d27e8b9..5134f46 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -2525,33 +2525,6 @@ int __isolate_free_page(struct page *page, unsigned int order)
> }
>
> /*
> - * Similar to split_page except the page is already free. As this is only
> - * being used for migration, the migratetype of the block also changes.
> - * As this is called with interrupts disabled, the caller is responsible
> - * for calling arch_alloc_page() and kernel_map_page() after interrupts
> - * are enabled.
> - *
> - * Note: this is probably too low level an operation for use in drivers.
> - * Please consult with lkml before using this in your driver.
> - */
> -int split_free_page(struct page *page)
> -{
> - unsigned int order;
> - int nr_pages;
> -
> - order = page_order(page);
> -
> - nr_pages = __isolate_free_page(page, order);
> - if (!nr_pages)
> - return 0;
> -
> - /* Split into individual pages */
> - set_page_refcounted(page);
> - split_page(page, order);
> - return nr_pages;
> -}
> -
> -/*
> * Update NUMA hit/miss statistics
> *
> * Must be called with interrupts disabled.
>
Powered by blists - more mailing lists