[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CALvZod7_fhgV39HXmmMApubW-39CjJ5t+WjmkyA_DNGF7b5O+w@mail.gmail.com>
Date: Fri, 8 Oct 2021 10:35:22 -0700
From: Shakeel Butt <shakeelb@...gle.com>
To: Vasily Averin <vvs@...tuozzo.com>, Roman Gushchin <guro@...com>
Cc: Michal Hocko <mhocko@...nel.org>,
Johannes Weiner <hannes@...xchg.org>,
Vladimir Davydov <vdavydov.dev@...il.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Cgroups <cgroups@...r.kernel.org>, Linux MM <linux-mm@...ck.org>,
LKML <linux-kernel@...r.kernel.org>, kernel@...nvz.org,
Mel Gorman <mgorman@...hsingularity.net>,
Uladzislau Rezki <urezki@...il.com>,
Vlastimil Babka <vbabka@...e.cz>
Subject: Re: [PATCH memcg] memcg: enable memory accounting in __alloc_pages_bulk
+Roman
On Fri, Oct 8, 2021 at 2:23 AM Vasily Averin <vvs@...tuozzo.com> wrote:
>
> Enable memory accounting for bulk page allocator.
>
> Fixes: 387ba26fb1cb ("mm/page_alloc: add a bulk page allocator")
> Cc: <stable@...r.kernel.org>
> Signed-off-by: Vasily Averin <vvs@...tuozzo.com>
> ---
> mm/page_alloc.c | 64 +++++++++++++++++++++++++++++++++++++++++++++++--
> 1 file changed, 62 insertions(+), 2 deletions(-)
>
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index b37435c274cf..602819a232e5 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -5172,6 +5172,55 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
> return true;
> }
>
Please move the following memcg functions to memcontrol.[h|c] files.
> +#ifdef CONFIG_MEMCG_KMEM
> +static bool memcg_bulk_pre_charge_hook(struct obj_cgroup **objcgp, gfp_t gfp,
> + unsigned int nr_pages)
> +{
> + struct obj_cgroup *objcg = NULL;
> +
> + if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT))
> + return true;
> +
> + objcg = get_obj_cgroup_from_current();
> +
> + if (objcg && obj_cgroup_charge(objcg, gfp, nr_pages << PAGE_SHIFT)) {
Please use obj_cgroup_charge_pages() when you move this code to memcontrol.c
> + obj_cgroup_put(objcg);
> + return false;
> + }
> + obj_cgroup_get_many(objcg, nr_pages);
> + *objcgp = objcg;
> + return true;
> +}
> +
> +static void memcg_bulk_charge_hook(struct obj_cgroup *objcg,
> + struct page *page)
> +{
> + page->memcg_data = (unsigned long)objcg | MEMCG_DATA_KMEM;
> +}
> +
> +static void memcg_bulk_post_charge_hook(struct obj_cgroup *objcg,
> + unsigned int nr_pages)
> +{
> + obj_cgroup_uncharge(objcg, nr_pages << PAGE_SHIFT);
> + percpu_ref_put_many(&objcg->refcnt, nr_pages + 1);
Introduce the obj_cgroup_put_many() and you don't need to keep the
extra ref from the pre hook i.e. put the ref in the pre hook.
> +}
> +#else
> +static bool memcg_bulk_pre_charge_hook(struct obj_cgroup **objcgp, gfp_t gfp,
> + unsigned int nr_pages)
> +{
> + return true;
> +}
> +
> +static void memcg_bulk_charge_hook(struct obj_cgroup *objcgp,
> + struct page *page)
> +{
> +}
> +
> +static void memcg_bulk_post_charge_hook(struct obj_cgroup *objcg,
> + unsigned int nr_pages)
> +{
> +}
> +#endif
> /*
> * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
> * @gfp: GFP flags for the allocation
> @@ -5207,6 +5256,8 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
> gfp_t alloc_gfp;
> unsigned int alloc_flags = ALLOC_WMARK_LOW;
> int nr_populated = 0, nr_account = 0;
> + unsigned int nr_pre_charge = 0;
> + struct obj_cgroup *objcg = NULL;
>
> /*
> * Skip populated array elements to determine if any pages need
> @@ -5275,6 +5326,10 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
> if (unlikely(!zone))
> goto failed;
>
> + nr_pre_charge = nr_pages - nr_populated;
> + if (!memcg_bulk_pre_charge_hook(&objcg, gfp, nr_pre_charge))
> + goto failed;
> +
> /* Attempt the batch allocation */
> local_lock_irqsave(&pagesets.lock, flags);
> pcp = this_cpu_ptr(zone->per_cpu_pageset);
> @@ -5287,9 +5342,9 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
> nr_populated++;
> continue;
> }
> -
> page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
> pcp, pcp_list);
> +
> if (unlikely(!page)) {
> /* Try and get at least one page */
> if (!nr_populated)
> @@ -5297,6 +5352,8 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
> break;
> }
> nr_account++;
> + if (objcg)
> + memcg_bulk_charge_hook(objcg, page);
Logically this above should be after prep_new_page().
>
> prep_new_page(page, 0, gfp, 0);
> if (page_list)
> @@ -5310,13 +5367,16 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
>
> __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
> zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
> + if (objcg)
> + memcg_bulk_post_charge_hook(objcg, nr_pre_charge - nr_account);
>
> out:
> return nr_populated;
>
> failed_irq:
> local_unlock_irqrestore(&pagesets.lock, flags);
> -
> + if (objcg)
> + memcg_bulk_post_charge_hook(objcg, nr_pre_charge);
> failed:
> page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
> if (page) {
> --
> 2.31.1
>
Powered by blists - more mailing lists