[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20201008123129.GC4967@dhcp22.suse.cz>
Date: Thu, 8 Oct 2020 14:31:29 +0200
From: Michal Hocko <mhocko@...e.com>
To: Vlastimil Babka <vbabka@...e.cz>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Pavel Tatashin <pasha.tatashin@...een.com>,
David Hildenbrand <david@...hat.com>,
Oscar Salvador <osalvador@...e.de>,
Joonsoo Kim <iamjoonsoo.kim@....com>
Subject: Re: [PATCH v2 5/7] mm, page_alloc: cache pageset high and batch in
struct zone
On Thu 08-10-20 13:41:59, Vlastimil Babka wrote:
> All per-cpu pagesets for a zone use the same high and batch values, that are
> duplicated there just for performance (locality) reasons. This patch adds the
> same variables also to struct zone as a shared copy.
>
> This will be useful later for making possible to disable pcplists temporarily
> by setting high value to 0, while remembering the values for restoring them
> later. But we can also immediately benefit from not updating pagesets of all
> possible cpus in case the newly recalculated values (after sysctl change or
> memory online/offline) are actually unchanged from the previous ones.
>
> Signed-off-by: Vlastimil Babka <vbabka@...e.cz>
Acked-by: Michal Hocko <mhocko@...e.com>
I would consider the check flipped with early return more pleasing to my
eyes but nothing to lose sleep over.
> ---
> include/linux/mmzone.h | 6 ++++++
> mm/page_alloc.c | 17 +++++++++++++++--
> 2 files changed, 21 insertions(+), 2 deletions(-)
>
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index fb3bf696c05e..c63863794afc 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -470,6 +470,12 @@ struct zone {
> #endif
> struct pglist_data *zone_pgdat;
> struct per_cpu_pageset __percpu *pageset;
> + /*
> + * the high and batch values are copied to individual pagesets for
> + * faster access
> + */
> + int pageset_high;
> + int pageset_batch;
>
> #ifndef CONFIG_SPARSEMEM
> /*
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index f33c36312eb5..5b98dd5ab006 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -5900,6 +5900,9 @@ static void build_zonelists(pg_data_t *pgdat)
> * Other parts of the kernel may not check if the zone is available.
> */
> static void pageset_init(struct per_cpu_pageset *p);
> +/* These effectively disable the pcplists in the boot pageset completely */
> +#define BOOT_PAGESET_HIGH 0
> +#define BOOT_PAGESET_BATCH 1
> static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
> static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
>
> @@ -6289,8 +6292,8 @@ static void pageset_init(struct per_cpu_pageset *p)
> * need to be as careful as pageset_update() as nobody can access the
> * pageset yet.
> */
> - pcp->high = 0;
> - pcp->batch = 1;
> + pcp->high = BOOT_PAGESET_HIGH;
> + pcp->batch = BOOT_PAGESET_BATCH;
> }
>
> /*
> @@ -6314,6 +6317,14 @@ static void zone_set_pageset_high_and_batch(struct zone *zone)
> new_batch = max(1UL, 1 * new_batch);
> }
>
> + if (zone->pageset_high != new_high ||
> + zone->pageset_batch != new_batch) {
> + zone->pageset_high = new_high;
> + zone->pageset_batch = new_batch;
> + } else {
> + return;
> + }
> +
> for_each_possible_cpu(cpu) {
> p = per_cpu_ptr(zone->pageset, cpu);
> pageset_update(&p->pcp, new_high, new_batch);
> @@ -6374,6 +6385,8 @@ static __meminit void zone_pcp_init(struct zone *zone)
> * offset of a (static) per cpu variable into the per cpu area.
> */
> zone->pageset = &boot_pageset;
> + zone->pageset_high = BOOT_PAGESET_HIGH;
> + zone->pageset_batch = BOOT_PAGESET_BATCH;
>
> if (populated_zone(zone))
> printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
> --
> 2.28.0
--
Michal Hocko
SUSE Labs
Powered by blists - more mailing lists