[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20160810081453.GB573@swordfish>
Date: Wed, 10 Aug 2016 17:14:53 +0900
From: Sergey Senozhatsky <sergey.senozhatsky@...il.com>
To: js1304@...il.com
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Vlastimil Babka <vbabka@...e.cz>,
Minchan Kim <minchan@...nel.org>,
Michal Hocko <mhocko@...nel.org>,
Sergey Senozhatsky <sergey.senozhatsky@...il.com>,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
Joonsoo Kim <iamjoonsoo.kim@....com>
Subject: Re: [PATCH 1/5] mm/debug_pagealloc: clean-up guard page handling code
Hello,
On (08/10/16 15:16), js1304@...il.com wrote:
[..]
> -static inline void set_page_guard(struct zone *zone, struct page *page,
> +static inline bool set_page_guard(struct zone *zone, struct page *page,
> unsigned int order, int migratetype)
> {
> struct page_ext *page_ext;
>
> if (!debug_guardpage_enabled())
> - return;
> + return false;
> +
> + if (order >= debug_guardpage_minorder())
> + return false;
>
> page_ext = lookup_page_ext(page);
> if (unlikely(!page_ext))
> - return;
> + return false;
>
> __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
>
> @@ -656,6 +659,8 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
> set_page_private(page, order);
> /* Guard pages are not available for any usage */
> __mod_zone_freepage_state(zone, -(1 << order), migratetype);
> +
> + return true;
> }
>
> static inline void clear_page_guard(struct zone *zone, struct page *page,
> @@ -678,8 +683,8 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
> }
> #else
> struct page_ext_operations debug_guardpage_ops = { NULL, };
> -static inline void set_page_guard(struct zone *zone, struct page *page,
> - unsigned int order, int migratetype) {}
> +static inline bool set_page_guard(struct zone *zone, struct page *page,
> + unsigned int order, int migratetype) { return false; }
> static inline void clear_page_guard(struct zone *zone, struct page *page,
> unsigned int order, int migratetype) {}
> #endif
> @@ -1650,18 +1655,15 @@ static inline void expand(struct zone *zone, struct page *page,
> size >>= 1;
> VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
>
> - if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
> - debug_guardpage_enabled() &&
> - high < debug_guardpage_minorder()) {
> - /*
> - * Mark as guard pages (or page), that will allow to
> - * merge back to allocator when buddy will be freed.
> - * Corresponding page table entries will not be touched,
> - * pages will stay not present in virtual address space
> - */
> - set_page_guard(zone, &page[size], high, migratetype);
> + /*
> + * Mark as guard pages (or page), that will allow to
> + * merge back to allocator when buddy will be freed.
> + * Corresponding page table entries will not be touched,
> + * pages will stay not present in virtual address space
> + */
> + if (set_page_guard(zone, &page[size], high, migratetype))
> continue;
> - }
so previously IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) could have optimized out
the entire branch -- no set_page_guard() invocation and checks, right? but
now we would call set_page_guard() every time?
-ss
> +
> list_add(&page[size].lru, &area->free_list[migratetype]);
> area->nr_free++;
> set_page_order(&page[size], high);
Powered by blists - more mailing lists