[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d4eb6520-e1ee-9202-49d0-a9216a96a488@linux.alibaba.com>
Date: Thu, 6 Aug 2020 11:47:53 +0800
From: Alex Shi <alex.shi@...ux.alibaba.com>
To: akpm@...ux-foundation.org, mgorman@...hsingularity.net,
tj@...nel.org, hughd@...gle.com, khlebnikov@...dex-team.ru,
daniel.m.jordan@...cle.com, yang.shi@...ux.alibaba.com,
willy@...radead.org, hannes@...xchg.org, lkp@...el.com,
linux-mm@...ck.org, linux-kernel@...r.kernel.org,
cgroups@...r.kernel.org, shakeelb@...gle.com,
iamjoonsoo.kim@....com, richard.weiyang@...il.com,
kirill@...temov.name, alexander.duyck@...il.com,
rong.a.chen@...el.com
Subject: Re: [PATCH v17 01/21] mm/vmscan: remove unnecessary lruvec adding
在 2020/7/25 下午8:59, Alex Shi 写道:
> We don't have to add a freeable page into lru and then remove from it.
> This change saves a couple of actions and makes the moving more clear.
>
> The SetPageLRU needs to be kept here for list intergrity.
> Otherwise:
> #0 mave_pages_to_lru #1 release_pages
> if (put_page_testzero())
> if !put_page_testzero
> !PageLRU //skip lru_lock
> list_add(&page->lru,)
> list_add(&page->lru,) //corrupt
The race comments should be corrected to this:
/*
* The SetPageLRU needs to be kept here for list intergrity.
* Otherwise:
* #0 mave_pages_to_lru #1 release_pages
* if !put_page_testzero
* if (put_page_testzero())
* !PageLRU //skip lru_lock
* SetPageLRU()
* list_add(&page->lru,)
* list_add(&page->lru,)
*/
>
> [akpm@...ux-foundation.org: coding style fixes]
> Signed-off-by: Alex Shi <alex.shi@...ux.alibaba.com>
> Cc: Andrew Morton <akpm@...ux-foundation.org>
> Cc: Johannes Weiner <hannes@...xchg.org>
> Cc: Tejun Heo <tj@...nel.org>
> Cc: Matthew Wilcox <willy@...radead.org>
> Cc: Hugh Dickins <hughd@...gle.com>
> Cc: linux-mm@...ck.org
> Cc: linux-kernel@...r.kernel.org
> ---
> mm/vmscan.c | 37 ++++++++++++++++++++++++-------------
> 1 file changed, 24 insertions(+), 13 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 749d239c62b2..ddb29d813d77 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1856,26 +1856,29 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
> while (!list_empty(list)) {
> page = lru_to_page(list);
> VM_BUG_ON_PAGE(PageLRU(page), page);
> + list_del(&page->lru);
> if (unlikely(!page_evictable(page))) {
> - list_del(&page->lru);
> spin_unlock_irq(&pgdat->lru_lock);
> putback_lru_page(page);
> spin_lock_irq(&pgdat->lru_lock);
> continue;
> }
> - lruvec = mem_cgroup_page_lruvec(page, pgdat);
>
> + /*
> + * The SetPageLRU needs to be kept here for list intergrity.
> + * Otherwise:
> + * #0 mave_pages_to_lru #1 release_pages
> + * if (put_page_testzero())
> + * if !put_page_testzero
> + * !PageLRU //skip lru_lock
> + * list_add(&page->lru,)
> + * list_add(&page->lru,) //corrupt
> + */
/*
* The SetPageLRU needs to be kept here for list intergrity.
* Otherwise:
* #0 mave_pages_to_lru #1 release_pages
* if !put_page_testzero
* if (put_page_testzero())
* !PageLRU //skip lru_lock
* SetPageLRU()
* list_add(&page->lru,)
* list_add(&page->lru,)
*/
> SetPageLRU(page);
> - lru = page_lru(page);
>
> - nr_pages = hpage_nr_pages(page);
> - update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
> - list_move(&page->lru, &lruvec->lists[lru]);
> -
> - if (put_page_testzero(page)) {
> + if (unlikely(put_page_testzero(page))) {
> __ClearPageLRU(page);
> __ClearPageActive(page);
> - del_page_from_lru_list(page, lruvec, lru);
>
> if (unlikely(PageCompound(page))) {
> spin_unlock_irq(&pgdat->lru_lock);
> @@ -1883,11 +1886,19 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
> spin_lock_irq(&pgdat->lru_lock);
> } else
> list_add(&page->lru, &pages_to_free);
> - } else {
> - nr_moved += nr_pages;
> - if (PageActive(page))
> - workingset_age_nonresident(lruvec, nr_pages);
> +
> + continue;
> }
> +
> + lruvec = mem_cgroup_page_lruvec(page, pgdat);
> + lru = page_lru(page);
> + nr_pages = hpage_nr_pages(page);
> +
> + update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
> + list_add(&page->lru, &lruvec->lists[lru]);
> + nr_moved += nr_pages;
> + if (PageActive(page))
> + workingset_age_nonresident(lruvec, nr_pages);
> }
>
> /*
>
Powered by blists - more mailing lists