[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAGudoHE73o5oK77iOFKqH4D4Cz6t9TAu4882+_F9vHH7muNH-A@mail.gmail.com>
Date: Wed, 28 Aug 2024 22:13:49 +0200
From: Mateusz Guzik <mjguzik@...il.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: muchun.song@...ux.dev, dave@...olabs.net, linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Subject: Re: [PATCH] mm/hugetlb: sort out global lock annotations
On Wed, Aug 28, 2024 at 9:49 PM Andrew Morton <akpm@...ux-foundation.org> wrote:
>
> On Wed, 28 Aug 2024 18:07:04 +0200 Mateusz Guzik <mjguzik@...il.com> wrote:
>
> > The mutex array pointer shares a cacheline with the spinlock:
> > ffffffff84187480 B hugetlb_fault_mutex_table
> > ffffffff84187488 B hugetlb_lock
>
> Fair enough. My x86_64 defconfig now has
>
> num_fault_mutexes:
> .zero 4
> .globl hugetlb_lock
> .section .data..cacheline_aligned,"aw"
> .align 64
> .type hugetlb_lock, @object
> .size hugetlb_lock, 4
> hugetlb_lock:
> .zero 4
> .section .init.data
> .align 32
> .type default_hugepages_in_node, @object
> .size default_hugepages_in_node, 256
> default_hugepages_in_node:
> .zero 256
> .type parsed_default_hugepagesz, @object
> .size parsed_default_hugepagesz, 1
>
> which looks good.
>
> > --- a/mm/hugetlb.c
> > +++ b/mm/hugetlb.c
> > @@ -72,14 +72,14 @@ static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
> > * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
> > * free_huge_pages, and surplus_huge_pages.
> > */
> > -DEFINE_SPINLOCK(hugetlb_lock);
> > +__cacheline_aligned_in_smp DEFINE_SPINLOCK(hugetlb_lock);
> >
> > /*
> > * Serializes faults on the same logical page. This is used to
> > * prevent spurious OOMs when the hugepage pool is fully utilized.
> > */
> > -static int num_fault_mutexes;
> > -struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
> > +static __ro_after_init int num_fault_mutexes;
> > +__ro_after_init struct mutex *hugetlb_fault_mutex_table;
>
> It's conventional (within MM, at least) to put the section thing at the
> end of the definition, so tweak:
>
> --- a/mm/hugetlb.c~mm-hugetlb-sort-out-global-lock-annotations-fix
> +++ a/mm/hugetlb.c
> @@ -72,14 +72,14 @@ static unsigned int default_hugepages_in
> * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
> * free_huge_pages, and surplus_huge_pages.
> */
> -__cacheline_aligned_in_smp DEFINE_SPINLOCK(hugetlb_lock);
> +DEFINE_SPINLOCK(hugetlb_lock) __cacheline_aligned_in_smp;
>
I tried things in this order and this does not compile for me:
In file included from ./arch/x86/include/asm/current.h:10,
from ./arch/x86/include/asm/preempt.h:7,
from ./include/linux/preempt.h:79,
from ./include/linux/spinlock.h:56,
from ./include/linux/mmzone.h:8,
from ./include/linux/gfp.h:7,
from ./include/linux/mm.h:7,
from mm/hugetlb.c:8:
./include/linux/cache.h:80:3: error: expected ‘,’ or ‘;’ before ‘__attribute__’
80 | __attribute__((__aligned__(SMP_CACHE_BYTES), \
| ^~~~~~~~~~~~~
./include/linux/cache.h:86:36: note: in expansion of macro ‘__cacheline_aligned’
86 | #define __cacheline_aligned_in_smp __cacheline_aligned
| ^~~~~~~~~~~~~~~~~~~
mm/hugetlb.c:75:31: note: in expansion of macro ‘__cacheline_aligned_in_smp’
75 | DEFINE_SPINLOCK(hugetlb_lock) __cacheline_aligned_in_smp;
| ^~~~~~~~~~~~~~~~~~~~~~~~~~
I'm at next-20240828 with gcc 13.2.0
> /*
> * Serializes faults on the same logical page. This is used to
> * prevent spurious OOMs when the hugepage pool is fully utilized.
> */
> -static __ro_after_init int num_fault_mutexes;
> -__ro_after_init struct mutex *hugetlb_fault_mutex_table;
> +static int num_fault_mutexes __ro_after_init;
> +struct mutex *hugetlb_fault_mutex_table __ro_after_init;
>
> /* Forward declaration */
> static int hugetlb_acct_memory(struct hstate *h, long delta);
> _
>
>
--
Mateusz Guzik <mjguzik gmail.com>
Powered by blists - more mailing lists