lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <5369FE33.2020908@suse.cz>
Date:	Wed, 07 May 2014 11:34:43 +0200
From:	Vlastimil Babka <vbabka@...e.cz>
To:	David Rientjes <rientjes@...gle.com>,
	Andrew Morton <akpm@...ux-foundation.org>
CC:	Mel Gorman <mgorman@...e.de>, Rik van Riel <riel@...hat.com>,
	Joonsoo Kim <iamjoonsoo.kim@....com>,
	Greg Thelen <gthelen@...gle.com>,
	Hugh Dickins <hughd@...gle.com>, linux-kernel@...r.kernel.org,
	linux-mm@...ck.org
Subject: Re: [patch v3 3/6] mm, compaction: add per-zone migration pfn cache
 for async compaction

On 05/07/2014 04:22 AM, David Rientjes wrote:
> Each zone has a cached migration scanner pfn for memory compaction so that
> subsequent calls to memory compaction can start where the previous call left
> off.
>
> Currently, the compaction migration scanner only updates the per-zone cached pfn
> when pageblocks were not skipped for async compaction.  This creates a
> dependency on calling sync compaction to avoid having subsequent calls to async
> compaction from scanning an enormous amount of non-MOVABLE pageblocks each time
> it is called.  On large machines, this could be potentially very expensive.
>
> This patch adds a per-zone cached migration scanner pfn only for async
> compaction.  It is updated everytime a pageblock has been scanned in its
> entirety and when no pages from it were successfully isolated.  The cached
> migration scanner pfn for sync compaction is updated only when called for sync
> compaction.
>
> Signed-off-by: David Rientjes <rientjes@...gle.com>
> ---
>   v3: do not update pageblock skip metadata when skipped due to async per
>       Vlastimil.

Great.

Acked-by: Vlastimil Babka <vbabka@...e.cz>


>   include/linux/mmzone.h |  5 ++--
>   mm/compaction.c        | 66 ++++++++++++++++++++++++++++++--------------------
>   2 files changed, 43 insertions(+), 28 deletions(-)
>
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -360,9 +360,10 @@ struct zone {
>   	/* Set to true when the PG_migrate_skip bits should be cleared */
>   	bool			compact_blockskip_flush;
>
> -	/* pfns where compaction scanners should start */
> +	/* pfn where compaction free scanner should start */
>   	unsigned long		compact_cached_free_pfn;
> -	unsigned long		compact_cached_migrate_pfn;
> +	/* pfn where async and sync compaction migration scanner should start */
> +	unsigned long		compact_cached_migrate_pfn[2];
>   #endif
>   #ifdef CONFIG_MEMORY_HOTPLUG
>   	/* see spanned/present_pages for more description */
> diff --git a/mm/compaction.c b/mm/compaction.c
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -89,7 +89,8 @@ static void __reset_isolation_suitable(struct zone *zone)
>   	unsigned long end_pfn = zone_end_pfn(zone);
>   	unsigned long pfn;
>
> -	zone->compact_cached_migrate_pfn = start_pfn;
> +	zone->compact_cached_migrate_pfn[0] = start_pfn;
> +	zone->compact_cached_migrate_pfn[1] = start_pfn;
>   	zone->compact_cached_free_pfn = end_pfn;
>   	zone->compact_blockskip_flush = false;
>
> @@ -131,9 +132,10 @@ void reset_isolation_suitable(pg_data_t *pgdat)
>    */
>   static void update_pageblock_skip(struct compact_control *cc,
>   			struct page *page, unsigned long nr_isolated,
> -			bool migrate_scanner)
> +			bool set_unsuitable, bool migrate_scanner)
>   {
>   	struct zone *zone = cc->zone;
> +	unsigned long pfn;
>
>   	if (cc->ignore_skip_hint)
>   		return;
> @@ -141,20 +143,31 @@ static void update_pageblock_skip(struct compact_control *cc,
>   	if (!page)
>   		return;
>
> -	if (!nr_isolated) {
> -		unsigned long pfn = page_to_pfn(page);
> +	if (nr_isolated)
> +		return;
> +
> +	/*
> +	 * Only skip pageblocks when all forms of compaction will be known to
> +	 * fail in the near future.
> +	 */
> +	if (set_unsuitable)
>   		set_pageblock_skip(page);
>
> -		/* Update where compaction should restart */
> -		if (migrate_scanner) {
> -			if (!cc->finished_update_migrate &&
> -			    pfn > zone->compact_cached_migrate_pfn)
> -				zone->compact_cached_migrate_pfn = pfn;
> -		} else {
> -			if (!cc->finished_update_free &&
> -			    pfn < zone->compact_cached_free_pfn)
> -				zone->compact_cached_free_pfn = pfn;
> -		}
> +	pfn = page_to_pfn(page);
> +
> +	/* Update where async and sync compaction should restart */
> +	if (migrate_scanner) {
> +		if (cc->finished_update_migrate)
> +			return;
> +		if (pfn > zone->compact_cached_migrate_pfn[0])
> +			zone->compact_cached_migrate_pfn[0] = pfn;
> +		if (cc->sync && pfn > zone->compact_cached_migrate_pfn[1])
> +			zone->compact_cached_migrate_pfn[1] = pfn;
> +	} else {
> +		if (cc->finished_update_free)
> +			return;
> +		if (pfn < zone->compact_cached_free_pfn)
> +			zone->compact_cached_free_pfn = pfn;
>   	}
>   }
>   #else
> @@ -166,7 +179,7 @@ static inline bool isolation_suitable(struct compact_control *cc,
>
>   static void update_pageblock_skip(struct compact_control *cc,
>   			struct page *page, unsigned long nr_isolated,
> -			bool migrate_scanner)
> +			bool set_unsuitable, bool migrate_scanner)
>   {
>   }
>   #endif /* CONFIG_COMPACTION */
> @@ -329,7 +342,8 @@ isolate_fail:
>
>   	/* Update the pageblock-skip if the whole pageblock was scanned */
>   	if (blockpfn == end_pfn)
> -		update_pageblock_skip(cc, valid_page, total_isolated, false);
> +		update_pageblock_skip(cc, valid_page, total_isolated, true,
> +				      false);
>
>   	count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
>   	if (total_isolated)
> @@ -464,7 +478,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
>   	unsigned long flags;
>   	bool locked = false;
>   	struct page *page = NULL, *valid_page = NULL;
> -	bool skipped_async_unsuitable = false;
> +	bool set_unsuitable = true;
>   	const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) |
>   				    (unevictable ? ISOLATE_UNEVICTABLE : 0);
>
> @@ -541,8 +555,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
>   			 */
>   			mt = get_pageblock_migratetype(page);
>   			if (!cc->sync && !migrate_async_suitable(mt)) {
> -				cc->finished_update_migrate = true;
> -				skipped_async_unsuitable = true;
> +				set_unsuitable = false;
>   				goto next_pageblock;
>   			}
>   		}
> @@ -646,11 +659,10 @@ next_pageblock:
>   	/*
>   	 * Update the pageblock-skip information and cached scanner pfn,
>   	 * if the whole pageblock was scanned without isolating any page.
> -	 * This is not done when pageblock was skipped due to being unsuitable
> -	 * for async compaction, so that eventual sync compaction can try.
>   	 */
> -	if (low_pfn == end_pfn && !skipped_async_unsuitable)
> -		update_pageblock_skip(cc, valid_page, nr_isolated, true);
> +	if (low_pfn == end_pfn)
> +		update_pageblock_skip(cc, valid_page, nr_isolated,
> +				      set_unsuitable, true);
>
>   	trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
>
> @@ -877,7 +889,8 @@ static int compact_finished(struct zone *zone,
>   	/* Compaction run completes if the migrate and free scanner meet */
>   	if (cc->free_pfn <= cc->migrate_pfn) {
>   		/* Let the next compaction start anew. */
> -		zone->compact_cached_migrate_pfn = zone->zone_start_pfn;
> +		zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
> +		zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
>   		zone->compact_cached_free_pfn = zone_end_pfn(zone);
>
>   		/*
> @@ -1002,7 +1015,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
>   	 * information on where the scanners should start but check that it
>   	 * is initialised by ensuring the values are within zone boundaries.
>   	 */
> -	cc->migrate_pfn = zone->compact_cached_migrate_pfn;
> +	cc->migrate_pfn = zone->compact_cached_migrate_pfn[cc->sync];
>   	cc->free_pfn = zone->compact_cached_free_pfn;
>   	if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
>   		cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
> @@ -1010,7 +1023,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
>   	}
>   	if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
>   		cc->migrate_pfn = start_pfn;
> -		zone->compact_cached_migrate_pfn = cc->migrate_pfn;
> +		zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
> +		zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
>   	}
>
>   	trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
>

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ