[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YOvsijKufJzjHuvd@dhcp22.suse.cz>
Date: Mon, 12 Jul 2021 09:17:30 +0200
From: Michal Hocko <mhocko@...e.com>
To: Suren Baghdasaryan <surenb@...gle.com>
Cc: tj@...nel.org, hannes@...xchg.org, vdavydov.dev@...il.com,
akpm@...ux-foundation.org, shakeelb@...gle.com, guro@...com,
songmuchun@...edance.com, shy828301@...il.com, alexs@...nel.org,
richard.weiyang@...il.com, vbabka@...e.cz, axboe@...nel.dk,
iamjoonsoo.kim@....com, david@...hat.com, willy@...radead.org,
apopple@...dia.com, minchan@...nel.org, linmiaohe@...wei.com,
linux-kernel@...r.kernel.org, cgroups@...r.kernel.org,
linux-mm@...ck.org, kernel-team@...roid.com
Subject: Re: [PATCH v3 3/3] mm, memcg: inline swap-related functions to
improve disabled memcg config
On Fri 09-07-21 17:36:26, Suren Baghdasaryan wrote:
> Inline mem_cgroup_try_charge_swap, mem_cgroup_uncharge_swap and
> cgroup_throttle_swaprate functions to perform mem_cgroup_disabled static
> key check inline before calling the main body of the function. This
> minimizes the memcg overhead in the pagefault and exit_mmap paths when
> memcgs are disabled using cgroup_disable=memory command-line option.
> This change results in ~1% overhead reduction when running PFT test
> comparing {CONFIG_MEMCG=n} against {CONFIG_MEMCG=y, cgroup_disable=memory}
> configuration on an 8-core ARM64 Android device.
>
> Signed-off-by: Suren Baghdasaryan <surenb@...gle.com>
> Reviewed-by: Shakeel Butt <shakeelb@...gle.com>
> Acked-by: Johannes Weiner <hannes@...xchg.org>
I find it a bit surprising to see such a big difference over a function
call in a slow path like swap in/out.
Anyway the change makes sense.
Acked-by: Michal Hocko <mhocko@...e.com>
Thanks!
> ---
> include/linux/swap.h | 26 +++++++++++++++++++++++---
> mm/memcontrol.c | 14 ++++----------
> mm/swapfile.c | 5 +----
> 3 files changed, 28 insertions(+), 17 deletions(-)
>
> diff --git a/include/linux/swap.h b/include/linux/swap.h
> index 6f5a43251593..f30d26b0f71d 100644
> --- a/include/linux/swap.h
> +++ b/include/linux/swap.h
> @@ -721,7 +721,13 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
> #endif
>
> #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
> -extern void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
> +extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
> +static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
> +{
> + if (mem_cgroup_disabled())
> + return;
> + __cgroup_throttle_swaprate(page, gfp_mask);
> +}
> #else
> static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
> {
> @@ -730,8 +736,22 @@ static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
>
> #ifdef CONFIG_MEMCG_SWAP
> extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
> -extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
> -extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
> +extern int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
> +static inline int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
> +{
> + if (mem_cgroup_disabled())
> + return 0;
> + return __mem_cgroup_try_charge_swap(page, entry);
> +}
> +
> +extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
> +static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
> +{
> + if (mem_cgroup_disabled())
> + return;
> + __mem_cgroup_uncharge_swap(entry, nr_pages);
> +}
> +
> extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
> extern bool mem_cgroup_swap_full(struct page *page);
> #else
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index cdaf7003b43d..0b05322836ec 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -7234,7 +7234,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
> }
>
> /**
> - * mem_cgroup_try_charge_swap - try charging swap space for a page
> + * __mem_cgroup_try_charge_swap - try charging swap space for a page
> * @page: page being added to swap
> * @entry: swap entry to charge
> *
> @@ -7242,16 +7242,13 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
> *
> * Returns 0 on success, -ENOMEM on failure.
> */
> -int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
> +int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
> {
> unsigned int nr_pages = thp_nr_pages(page);
> struct page_counter *counter;
> struct mem_cgroup *memcg;
> unsigned short oldid;
>
> - if (mem_cgroup_disabled())
> - return 0;
> -
> if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
> return 0;
>
> @@ -7287,18 +7284,15 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
> }
>
> /**
> - * mem_cgroup_uncharge_swap - uncharge swap space
> + * __mem_cgroup_uncharge_swap - uncharge swap space
> * @entry: swap entry to uncharge
> * @nr_pages: the amount of swap space to uncharge
> */
> -void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
> +void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
> {
> struct mem_cgroup *memcg;
> unsigned short id;
>
> - if (mem_cgroup_disabled())
> - return;
> -
> id = swap_cgroup_record(entry, 0, nr_pages);
> rcu_read_lock();
> memcg = mem_cgroup_from_id(id);
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index 707fa0481bb4..04a0c83f1313 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -3773,14 +3773,11 @@ static void free_swap_count_continuations(struct swap_info_struct *si)
> }
>
> #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
> -void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
> +void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
> {
> struct swap_info_struct *si, *next;
> int nid = page_to_nid(page);
>
> - if (mem_cgroup_disabled())
> - return;
> -
> if (!(gfp_mask & __GFP_IO))
> return;
>
> --
> 2.32.0.93.g670b81a890-goog
--
Michal Hocko
SUSE Labs
Powered by blists - more mailing lists