lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20160113164713.GF17512@dhcp22.suse.cz>
Date:	Wed, 13 Jan 2016 17:47:14 +0100
From:	Michal Hocko <mhocko@...nel.org>
To:	Vladimir Davydov <vdavydov@...tuozzo.com>
Cc:	Andrew Morton <akpm@...ux-foundation.org>,
	Johannes Weiner <hannes@...xchg.org>, linux-mm@...ck.org,
	cgroups@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 2/7] mm: vmscan: pass memcg to get_scan_count()

On Thu 17-12-15 15:29:55, Vladimir Davydov wrote:
> memcg will come in handy in get_scan_count(). It can already be used for
> getting swappiness immediately in get_scan_count() instead of passing it
> around. The following patches will add more memcg-related values, which
> will be used there.

OK, the down side would be that every user (even outside of memcg
proper) has to be aware that the memcg might be NULL but this makes
the code a bit easier so...

> Signed-off-by: Vladimir Davydov <vdavydov@...tuozzo.com>
> Acked-by: Johannes Weiner <hannes@...xchg.org>

Acked-by: Michal Hocko <mhocko@...e.com>

> ---
>  mm/vmscan.c | 20 ++++++++------------
>  1 file changed, 8 insertions(+), 12 deletions(-)
> 
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index bb01b04154ad..acc6bff84e26 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1957,10 +1957,11 @@ enum scan_balance {
>   * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
>   * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
>   */
> -static void get_scan_count(struct lruvec *lruvec, int swappiness,
> +static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
>  			   struct scan_control *sc, unsigned long *nr,
>  			   unsigned long *lru_pages)
>  {
> +	int swappiness = mem_cgroup_swappiness(memcg);
>  	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
>  	u64 fraction[2];
>  	u64 denominator = 0;	/* gcc */
> @@ -2184,9 +2185,10 @@ static inline void init_tlb_ubc(void)
>  /*
>   * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
>   */
> -static void shrink_lruvec(struct lruvec *lruvec, int swappiness,
> -			  struct scan_control *sc, unsigned long *lru_pages)
> +static void shrink_zone_memcg(struct zone *zone, struct mem_cgroup *memcg,
> +			      struct scan_control *sc, unsigned long *lru_pages)
>  {
> +	struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
>  	unsigned long nr[NR_LRU_LISTS];
>  	unsigned long targets[NR_LRU_LISTS];
>  	unsigned long nr_to_scan;
> @@ -2196,7 +2198,7 @@ static void shrink_lruvec(struct lruvec *lruvec, int swappiness,
>  	struct blk_plug plug;
>  	bool scan_adjusted;
>  
> -	get_scan_count(lruvec, swappiness, sc, nr, lru_pages);
> +	get_scan_count(lruvec, memcg, sc, nr, lru_pages);
>  
>  	/* Record the original scan target for proportional adjustments later */
>  	memcpy(targets, nr, sizeof(nr));
> @@ -2400,8 +2402,6 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc,
>  			unsigned long lru_pages;
>  			unsigned long reclaimed;
>  			unsigned long scanned;
> -			struct lruvec *lruvec;
> -			int swappiness;
>  
>  			if (mem_cgroup_low(root, memcg)) {
>  				if (!sc->may_thrash)
> @@ -2409,12 +2409,10 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc,
>  				mem_cgroup_events(memcg, MEMCG_LOW, 1);
>  			}
>  
> -			lruvec = mem_cgroup_zone_lruvec(zone, memcg);
> -			swappiness = mem_cgroup_swappiness(memcg);
>  			reclaimed = sc->nr_reclaimed;
>  			scanned = sc->nr_scanned;
>  
> -			shrink_lruvec(lruvec, swappiness, sc, &lru_pages);
> +			shrink_zone_memcg(zone, memcg, sc, &lru_pages);
>  			zone_lru_pages += lru_pages;
>  
>  			if (memcg && is_classzone)
> @@ -2884,8 +2882,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
>  		.may_unmap = 1,
>  		.may_swap = !noswap,
>  	};
> -	struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
> -	int swappiness = mem_cgroup_swappiness(memcg);
>  	unsigned long lru_pages;
>  
>  	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
> @@ -2902,7 +2898,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
>  	 * will pick up pages from other mem cgroup's as well. We hack
>  	 * the priority and make it zero.
>  	 */
> -	shrink_lruvec(lruvec, swappiness, &sc, &lru_pages);
> +	shrink_zone_memcg(zone, memcg, &sc, &lru_pages);
>  
>  	trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
>  
> -- 
> 2.1.4
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@...ck.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@...ck.org"> email@...ck.org </a>

-- 
Michal Hocko
SUSE Labs

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ