[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <fdb2bd2e-eaac-482b-bf32-641f9df7a5fc@suse.cz>
Date: Tue, 12 Nov 2024 11:07:20 +0100
From: Vlastimil Babka <vbabka@...e.cz>
To: Suren Baghdasaryan <surenb@...gle.com>, akpm@...ux-foundation.org
Cc: willy@...radead.org, liam.howlett@...cle.com, lorenzo.stoakes@...cle.com,
mhocko@...e.com, hannes@...xchg.org, mjguzik@...il.com,
oliver.sang@...el.com, mgorman@...hsingularity.net, david@...hat.com,
peterx@...hat.com, oleg@...hat.com, dave@...olabs.net, paulmck@...nel.org,
brauner@...nel.org, dhowells@...hat.com, hdanton@...a.com, hughd@...gle.com,
minchan@...gle.com, jannh@...gle.com, shakeel.butt@...ux.dev,
souravpanda@...gle.com, pasha.tatashin@...een.com, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, kernel-team@...roid.com
Subject: Re: [PATCH 4/4] mm: move lesser used vma_area_struct members into the
last cacheline
On 11/11/24 21:55, Suren Baghdasaryan wrote:
> Move several vma_area_struct members which are rarely or never used
> during page fault handling into the last cacheline to better pack
> vm_area_struct. As a result vm_area_struct will fit into 3 cachelines
> as opposed to 4 cachelines before this change. New vm_area_struct layout:
>
> struct vm_area_struct {
> union {
> struct {
> long unsigned int vm_start; /* 0 8 */
> long unsigned int vm_end; /* 8 8 */
> }; /* 0 16 */
> struct callback_head vm_rcu ; /* 0 16 */
> } __attribute__((__aligned__(8))); /* 0 16 */
> struct mm_struct * vm_mm; /* 16 8 */
> pgprot_t vm_page_prot; /* 24 8 */
> union {
> const vm_flags_t vm_flags; /* 32 8 */
> vm_flags_t __vm_flags; /* 32 8 */
> }; /* 32 8 */
> bool detached; /* 40 1 */
>
> /* XXX 3 bytes hole, try to pack */
>
> unsigned int vm_lock_seq; /* 44 4 */
> struct list_head anon_vma_chain; /* 48 16 */
> /* --- cacheline 1 boundary (64 bytes) --- */
> struct anon_vma * anon_vma; /* 64 8 */
> const struct vm_operations_struct * vm_ops; /* 72 8 */
> long unsigned int vm_pgoff; /* 80 8 */
> struct file * vm_file; /* 88 8 */
> void * vm_private_data; /* 96 8 */
> atomic_long_t swap_readahead_info; /* 104 8 */
> struct mempolicy * vm_policy; /* 112 8 */
>
> /* XXX 8 bytes hole, try to pack */
>
> /* --- cacheline 2 boundary (128 bytes) --- */
> struct vma_lock vm_lock (__aligned__(64)); /* 128 4 */
>
> /* XXX 4 bytes hole, try to pack */
>
> struct {
> struct rb_node rb (__aligned__(8)); /* 136 24 */
> long unsigned int rb_subtree_last; /* 160 8 */
> } __attribute__((__aligned__(8))) shared; /* 136 32 */
> struct vm_userfaultfd_ctx vm_userfaultfd_ctx; /* 168 0 */
I don't see anon_name in the output, I thought it was added for Android? :)
>
> /* size: 192, cachelines: 3, members: 17 */
> /* sum members: 153, holes: 3, sum holes: 15 */
> /* padding: 24 */
Instead you seem to have padding so an attempt to use SLAB_TYPESAFE_BY_RCU
should use that and not add more up to 256 pages.
Perhaps this pahole output wasn't generated with a fully representative config?
> /* forced alignments: 3, forced holes: 2, sum forced holes: 12 */
> } __attribute__((__aligned__(64)));
>
>
> Memory consumption per 1000 VMAs becomes 48 pages:
>
> slabinfo after vm_area_struct changes:
> <name> ... <objsize> <objperslab> <pagesperslab> : ...
> vm_area_struct ... 192 42 2 : ...
>
>
> Signed-off-by: Suren Baghdasaryan <surenb@...gle.com>
> ---
> include/linux/mm_types.h | 37 ++++++++++++++++++-------------------
> 1 file changed, 18 insertions(+), 19 deletions(-)
>
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 789bccc05520..c3755b680911 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -733,16 +733,6 @@ struct vm_area_struct {
> unsigned int vm_lock_seq;
> #endif
>
> - /*
> - * For areas with an address space and backing store,
> - * linkage into the address_space->i_mmap interval tree.
> - *
> - */
> - struct {
> - struct rb_node rb;
> - unsigned long rb_subtree_last;
> - } shared;
> -
> /*
> * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
> * list, after a COW of one of the file pages. A MAP_SHARED vma
> @@ -762,14 +752,6 @@ struct vm_area_struct {
> struct file * vm_file; /* File we map to (can be NULL). */
> void * vm_private_data; /* was vm_pte (shared mem) */
>
> -#ifdef CONFIG_ANON_VMA_NAME
> - /*
> - * For private and shared anonymous mappings, a pointer to a null
> - * terminated string containing the name given to the vma, or NULL if
> - * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
> - */
> - struct anon_vma_name *anon_name;
> -#endif
> #ifdef CONFIG_SWAP
> atomic_long_t swap_readahead_info;
> #endif
> @@ -782,11 +764,28 @@ struct vm_area_struct {
> #ifdef CONFIG_NUMA_BALANCING
> struct vma_numab_state *numab_state; /* NUMA Balancing state */
> #endif
> - struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
> #ifdef CONFIG_PER_VMA_LOCK
> /* Unstable RCU readers are allowed to read this. */
> struct vma_lock vm_lock ____cacheline_aligned_in_smp;
> #endif
> + /*
> + * For areas with an address space and backing store,
> + * linkage into the address_space->i_mmap interval tree.
> + *
> + */
> + struct {
> + struct rb_node rb;
> + unsigned long rb_subtree_last;
> + } shared;
> +#ifdef CONFIG_ANON_VMA_NAME
> + /*
> + * For private and shared anonymous mappings, a pointer to a null
> + * terminated string containing the name given to the vma, or NULL if
> + * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
> + */
> + struct anon_vma_name *anon_name;
> +#endif
> + struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
> } __randomize_layout;
>
> #ifdef CONFIG_NUMA
Powered by blists - more mailing lists