[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d976abae-9d3f-4c79-64a7-8802552202fe@suse.cz>
Date: Wed, 14 Apr 2021 19:10:44 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: Mel Gorman <mgorman@...hsingularity.net>,
Linux-MM <linux-mm@...ck.org>,
Linux-RT-Users <linux-rt-users@...r.kernel.org>
Cc: LKML <linux-kernel@...r.kernel.org>,
Chuck Lever <chuck.lever@...cle.com>,
Jesper Dangaard Brouer <brouer@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>,
Michal Hocko <mhocko@...nel.org>
Subject: Re: [PATCH 06/11] mm/page_alloc: Reduce duration that IRQs are
disabled for VM counters
On 4/14/21 3:39 PM, Mel Gorman wrote:
> IRQs are left disabled for the zone and node VM event counters. This is
> unnecessary as the affected counters are allowed to race for preemmption
> and IRQs.
>
> This patch reduces the scope of IRQs being disabled
> via local_[lock|unlock]_irq on !PREEMPT_RT kernels. One
> __mod_zone_freepage_state is still called with IRQs disabled. While this
> could be moved out, it's not free on all architectures as some require
> IRQs to be disabled for mod_zone_page_state on !PREEMPT_RT kernels.
>
> Signed-off-by: Mel Gorman <mgorman@...hsingularity.net>
Nice!
Acked-by: Vlastimil Babka <vbabka@...e.cz>
> ---
> mm/page_alloc.c | 12 ++++++------
> 1 file changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index cff0f1c98b28..295624fe293b 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -3474,11 +3474,11 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
> pcp = this_cpu_ptr(zone->per_cpu_pageset);
> list = &pcp->lists[migratetype];
> page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list);
> + local_unlock_irqrestore(&pagesets.lock, flags);
> if (page) {
> __count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
> zone_statistics(preferred_zone, zone, 1);
> }
> - local_unlock_irqrestore(&pagesets.lock, flags);
> return page;
> }
>
> @@ -3530,15 +3530,15 @@ struct page *rmqueue(struct zone *preferred_zone,
> if (!page)
> page = __rmqueue(zone, order, migratetype, alloc_flags);
> } while (page && check_new_pages(page, order));
> - spin_unlock(&zone->lock);
> if (!page)
> goto failed;
> +
> __mod_zone_freepage_state(zone, -(1 << order),
> get_pcppage_migratetype(page));
> + spin_unlock_irqrestore(&zone->lock, flags);
>
> __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
> zone_statistics(preferred_zone, zone, 1);
> - local_irq_restore(flags);
>
> out:
> /* Separate test+clear to avoid unnecessary atomics */
> @@ -3551,7 +3551,7 @@ struct page *rmqueue(struct zone *preferred_zone,
> return page;
>
> failed:
> - local_irq_restore(flags);
> + spin_unlock_irqrestore(&zone->lock, flags);
> return NULL;
> }
>
> @@ -5103,11 +5103,11 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
> nr_populated++;
> }
>
> + local_unlock_irqrestore(&pagesets.lock, flags);
> +
> __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
> zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
>
> - local_unlock_irqrestore(&pagesets.lock, flags);
> -
> return nr_populated;
>
> failed_irq:
>
Powered by blists - more mailing lists