lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200219193529.GD11847@dhcp22.suse.cz>
Date:   Wed, 19 Feb 2020 20:35:29 +0100
From:   Michal Hocko <mhocko@...nel.org>
To:     Sultan Alsawaf <sultan@...neltoast.com>
Cc:     Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org, Mel Gorman <mgorman@...e.de>,
        Johannes Weiner <hannes@...xchg.org>
Subject: Re: [PATCH] mm: Stop kswapd early when nothing's waiting for it to
 free pages

[Cc Mel and Johannes]

On Wed 19-02-20 10:25:22, Sultan Alsawaf wrote:
> From: Sultan Alsawaf <sultan@...neltoast.com>
> 
> Keeping kswapd running when all the failed allocations that invoked it
> are satisfied incurs a high overhead due to unnecessary page eviction
> and writeback, as well as spurious VM pressure events to various
> registered shrinkers. When kswapd doesn't need to work to make an
> allocation succeed anymore, stop it prematurely to save resources.

I do not think this patch is correct. kswapd is supposed to balance a
node and get it up to the high watermark. The number of contexts which
woke it up is not really relevant. If for no other reasons then each
allocation request might be of a different size.

Could you be more specific about the problem you are trying to address
please?

> Signed-off-by: Sultan Alsawaf <sultan@...neltoast.com>
> ---
>  include/linux/mmzone.h |  2 ++
>  mm/page_alloc.c        | 17 ++++++++++++++---
>  mm/vmscan.c            |  3 ++-
>  3 files changed, 18 insertions(+), 4 deletions(-)
> 
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index 462f6873905a..49c922abfb90 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -20,6 +20,7 @@
>  #include <linux/atomic.h>
>  #include <linux/mm_types.h>
>  #include <linux/page-flags.h>
> +#include <linux/refcount.h>
>  #include <asm/page.h>
>  
>  /* Free memory management - zoned buddy allocator.  */
> @@ -735,6 +736,7 @@ typedef struct pglist_data {
>  	unsigned long node_spanned_pages; /* total size of physical page
>  					     range, including holes */
>  	int node_id;
> +	refcount_t kswapd_waiters;
>  	wait_queue_head_t kswapd_wait;
>  	wait_queue_head_t pfmemalloc_wait;
>  	struct task_struct *kswapd;	/* Protected by
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 3c4eb750a199..2d4caacfd2fc 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -4401,6 +4401,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>  	int no_progress_loops;
>  	unsigned int cpuset_mems_cookie;
>  	int reserve_flags;
> +	pg_data_t *pgdat = ac->preferred_zoneref->zone->zone_pgdat;
> +	bool woke_kswapd = false;
>  
>  	/*
>  	 * We also sanity check to catch abuse of atomic reserves being used by
> @@ -4434,8 +4436,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>  	if (!ac->preferred_zoneref->zone)
>  		goto nopage;
>  
> -	if (alloc_flags & ALLOC_KSWAPD)
> +	if (alloc_flags & ALLOC_KSWAPD) {
> +		if (!woke_kswapd) {
> +			refcount_inc(&pgdat->kswapd_waiters);
> +			woke_kswapd = true;
> +		}
>  		wake_all_kswapds(order, gfp_mask, ac);
> +	}
>  
>  	/*
>  	 * The adjusted alloc_flags might result in immediate success, so try
> @@ -4640,9 +4647,12 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>  		goto retry;
>  	}
>  fail:
> -	warn_alloc(gfp_mask, ac->nodemask,
> -			"page allocation failure: order:%u", order);
>  got_pg:
> +	if (woke_kswapd)
> +		refcount_dec(&pgdat->kswapd_waiters);
> +	if (!page)
> +		warn_alloc(gfp_mask, ac->nodemask,
> +				"page allocation failure: order:%u", order);
>  	return page;
>  }
>  
> @@ -6711,6 +6721,7 @@ static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
>  	pgdat_page_ext_init(pgdat);
>  	spin_lock_init(&pgdat->lru_lock);
>  	lruvec_init(&pgdat->__lruvec);
> +	pgdat->kswapd_waiters = (refcount_t)REFCOUNT_INIT(0);
>  }
>  
>  static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index c05eb9efec07..e795add372d1 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -3694,7 +3694,8 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
>  		__fs_reclaim_release();
>  		ret = try_to_freeze();
>  		__fs_reclaim_acquire();
> -		if (ret || kthread_should_stop())
> +		if (ret || kthread_should_stop() ||
> +		    !refcount_read(&pgdat->kswapd_waiters))
>  			break;
>  
>  		/*
> -- 
> 2.25.1
> 

-- 
Michal Hocko
SUSE Labs

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ