[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200802110925.GW23808@casper.infradead.org>
Date: Sun, 2 Aug 2020 12:09:25 +0100
From: Matthew Wilcox <willy@...radead.org>
To: Alex Shi <alex.shi@...ux.alibaba.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Wei Yang <richard.weiyang@...il.com>,
Hugh Dickins <hughd@...gle.com>,
"Kirill A. Shutemov" <kirill@...temov.name>,
Andrea Arcangeli <aarcange@...hat.com>,
Johannes Weiner <hannes@...xchg.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, Thomas Gleixner <tglx@...utronix.de>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Peter Zijlstra <peterz@...radead.org>
Subject: Re: [PATCH 4/4] mm/thp: narrow lru locking
You're playing games with separating interrupt disabling from spinlocks.
Have you got any feedback from -rt people on this?
On Sat, Aug 01, 2020 at 12:09:17PM +0800, Alex Shi wrote:
> lru_lock and page cache xa_lock have no reason with current sequence,
> put them together isn't necessary. let's narrow the lru locking, but
> left the local_irq_disable to block interrupt re-entry and statistic update.
>
> Hugh Dickins point: split_huge_page_to_list() was already silly,to be
> using the _irqsave variant: it's just been taking sleeping locks, so
> would already be broken if entered with interrupts enabled.
> so we can save passing flags argument down to __split_huge_page().
>
> Signed-off-by: Alex Shi <alex.shi@...ux.alibaba.com>
> Signed-off-by: Wei Yang <richard.weiyang@...il.com>
> Reviewed-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
> Cc: Hugh Dickins <hughd@...gle.com>
> Cc: Kirill A. Shutemov <kirill@...temov.name>
> Cc: Andrea Arcangeli <aarcange@...hat.com>
> Cc: Johannes Weiner <hannes@...xchg.org>
> Cc: Matthew Wilcox <willy@...radead.org>
> Cc: Andrew Morton <akpm@...ux-foundation.org>
> Cc: linux-mm@...ck.org
> Cc: linux-kernel@...r.kernel.org
> ---
> mm/huge_memory.c | 25 +++++++++++++------------
> 1 file changed, 13 insertions(+), 12 deletions(-)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index dfe7378ab4be..070480e66bf4 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2398,7 +2398,7 @@ static void __split_huge_page_tail(struct page *head, int tail,
> }
>
> static void __split_huge_page(struct page *page, struct list_head *list,
> - pgoff_t end, unsigned long flags)
> + pgoff_t end)
> {
> struct page *head = compound_head(page);
> pg_data_t *pgdat = page_pgdat(head);
> @@ -2407,8 +2407,6 @@ static void __split_huge_page(struct page *page, struct list_head *list,
> unsigned long offset = 0;
> int i;
>
> - lruvec = mem_cgroup_page_lruvec(head, pgdat);
> -
> /* complete memcg works before add pages to LRU */
> mem_cgroup_split_huge_fixup(head);
>
> @@ -2420,6 +2418,11 @@ static void __split_huge_page(struct page *page, struct list_head *list,
> xa_lock(&swap_cache->i_pages);
> }
>
> + /* prevent PageLRU to go away from under us, and freeze lru stats */
> + spin_lock(&pgdat->lru_lock);
> +
> + lruvec = mem_cgroup_page_lruvec(head, pgdat);
> +
> for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
> __split_huge_page_tail(head, i, lruvec, list);
> /* Some pages can be beyond i_size: drop them from page cache */
> @@ -2439,6 +2442,8 @@ static void __split_huge_page(struct page *page, struct list_head *list,
> }
>
> ClearPageCompound(head);
> + spin_unlock(&pgdat->lru_lock);
> + /* Caller disabled irqs, so they are still disabled here */
>
> split_page_owner(head, HPAGE_PMD_ORDER);
>
> @@ -2456,8 +2461,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
> page_ref_add(head, 2);
> xa_unlock(&head->mapping->i_pages);
> }
> -
> - spin_unlock_irqrestore(&pgdat->lru_lock, flags);
> + local_irq_enable();
>
> remap_page(head);
>
> @@ -2596,12 +2600,10 @@ bool can_split_huge_page(struct page *page, int *pextra_pins)
> int split_huge_page_to_list(struct page *page, struct list_head *list)
> {
> struct page *head = compound_head(page);
> - struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
> struct deferred_split *ds_queue = get_deferred_split_queue(head);
> struct anon_vma *anon_vma = NULL;
> struct address_space *mapping = NULL;
> int count, mapcount, extra_pins, ret;
> - unsigned long flags;
> pgoff_t end;
>
> VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
> @@ -2662,9 +2664,8 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
> unmap_page(head);
> VM_BUG_ON_PAGE(compound_mapcount(head), head);
>
> - /* prevent PageLRU to go away from under us, and freeze lru stats */
> - spin_lock_irqsave(&pgdata->lru_lock, flags);
> -
> + /* block interrupt reentry in xa_lock and spinlock */
> + local_irq_disable();
> if (mapping) {
> XA_STATE(xas, &mapping->i_pages, page_index(head));
>
> @@ -2694,7 +2695,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
> __dec_node_page_state(head, NR_FILE_THPS);
> }
>
> - __split_huge_page(page, list, end, flags);
> + __split_huge_page(page, list, end);
> if (PageSwapCache(head)) {
> swp_entry_t entry = { .val = page_private(head) };
>
> @@ -2713,7 +2714,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
> spin_unlock(&ds_queue->split_queue_lock);
> fail: if (mapping)
> xa_unlock(&mapping->i_pages);
> - spin_unlock_irqrestore(&pgdata->lru_lock, flags);
> + local_irq_enable();
> remap_page(head);
> ret = -EBUSY;
> }
> --
> 1.8.3.1
>
Powered by blists - more mailing lists