lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Mon, 7 Dec 2015 16:59:56 +0800
From:	Aaron Lu <aaron.lu@...el.com>
To:	Joonsoo Kim <iamjoonsoo.kim@....com>
Cc:	Vlastimil Babka <vbabka@...e.cz>, linux-mm@...ck.org,
	linux-kernel@...r.kernel.org, Rik van Riel <riel@...hat.com>,
	David Rientjes <rientjes@...gle.com>,
	Mel Gorman <mgorman@...e.de>, Minchan Kim <minchan@...nel.org>
Subject: Re: [RFC 0/3] reduce latency of direct async compaction

On Mon, Dec 07, 2015 at 04:35:24PM +0900, Joonsoo Kim wrote:
> It looks like overhead still remain. I guess that migration scanner
> would call pageblock_pfn_to_page() for more extended range so
> overhead still remain.
> 
> I have an idea to solve his problem. Aaron, could you test following patch
> on top of base? It tries to skip calling pageblock_pfn_to_page()

It doesn't apply on top of 25364a9e54fb8296837061bf684b76d20eec01fb
cleanly, so I made some changes to make it apply and the result is:
https://github.com/aaronlu/linux/commit/cb8d05829190b806ad3948ff9b9e08c8ba1daf63

There is a problem occured right after the test starts:
[   58.080962] BUG: unable to handle kernel paging request at ffffea0082000018
[   58.089124] IP: [<ffffffff81193f29>] compaction_alloc+0xf9/0x270
[   58.096109] PGD 107ffd6067 PUD 207f7d5067 PMD 0
[   58.101569] Oops: 0000 [#1] SMP 

The full dmesg is attached.

Regards,
Aaron

> if we check that zone is contiguous at initialization stage.
> 
> Thanks.
> 
> ---->8----
> From 9c4fbf8f8ed37eb88a04a97908e76ba2437404a2 Mon Sep 17 00:00:00 2001
> From: Joonsoo Kim <iamjoonsoo.kim@....com>
> Date: Mon, 7 Dec 2015 14:51:42 +0900
> Subject: [PATCH] mm/compaction: Optimize pageblock_pfn_to_page() for
>  contiguous zone
> 
> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@....com>
> ---
>  include/linux/mmzone.h |  1 +
>  mm/compaction.c        | 35 ++++++++++++++++++++++++++++++++++-
>  2 files changed, 35 insertions(+), 1 deletion(-)
> 
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index e23a9e7..573f9a9 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -521,6 +521,7 @@ struct zone {
>  #endif
>  
>  #if defined CONFIG_COMPACTION || defined CONFIG_CMA
> +       int                     contiguous;
>         /* Set to true when the PG_migrate_skip bits should be cleared */
>         bool                    compact_blockskip_flush;
>  #endif
> diff --git a/mm/compaction.c b/mm/compaction.c
> index 67b8d90..f4e8c89 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -88,7 +88,7 @@ static inline bool migrate_async_suitable(int migratetype)
>   * the first and last page of a pageblock and avoid checking each individual
>   * page in a pageblock.
>   */
> -static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
> +static struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
>                                 unsigned long end_pfn, struct zone *zone)
>  {
>         struct page *start_page;
> @@ -114,6 +114,37 @@ static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
>         return start_page;
>  }
>  
> +static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
> +                               unsigned long end_pfn, struct zone *zone)
> +{
> +       if (zone->contiguous == 1)
> +               return pfn_to_page(start_pfn);
> +
> +       return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
> +}
> +
> +static void check_zone_contiguous(struct zone *zone)
> +{
> +       unsigned long pfn = zone->zone_start_pfn;
> +       unsigned long end_pfn = zone_end_pfn(zone);
> +
> +       /* Already checked */
> +       if (zone->contiguous)
> +               return;
> +
> +       pfn = ALIGN(pfn + 1, pageblock_nr_pages);
> +       for (; pfn < end_pfn; pfn += pageblock_nr_pages) {
> +               if (!__pageblock_pfn_to_page(pfn, end_pfn, zone)) {
> +                       /* We have hole */
> +                       zone->contiguous = -1;
> +                       return;
> +               }
> +       }
> +
> +       /* We don't have hole */
> +       zone->contiguous = 1;
> +}
> +
>  #ifdef CONFIG_COMPACTION
>  
>  /* Do not skip compaction more than 64 times */
> @@ -1353,6 +1384,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
>                 ;
>         }
>  
> +       check_zone_contiguous(zone);
> +
>         /*
>          * Clear pageblock skip if there were failures recently and compaction
>          * is about to be retried after being deferred. kswapd does not do
> -- 
> 1.9.1
> 
> 

View attachment "dmesg" of type "text/plain" (166534 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ