lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240828124929.db332259c2afad1e9e545b1f@linux-foundation.org>
Date: Wed, 28 Aug 2024 12:49:29 -0700
From: Andrew Morton <akpm@...ux-foundation.org>
To: Mateusz Guzik <mjguzik@...il.com>
Cc: muchun.song@...ux.dev, dave@...olabs.net, linux-kernel@...r.kernel.org,
 linux-mm@...ck.org
Subject: Re: [PATCH] mm/hugetlb: sort out global lock annotations

On Wed, 28 Aug 2024 18:07:04 +0200 Mateusz Guzik <mjguzik@...il.com> wrote:

> The mutex array pointer shares a cacheline with the spinlock:
> ffffffff84187480 B hugetlb_fault_mutex_table
> ffffffff84187488 B hugetlb_lock

Fair enough.  My x86_64 defconfig now has

num_fault_mutexes:
	.zero	4
	.globl	hugetlb_lock
	.section	.data..cacheline_aligned,"aw"
	.align 64
	.type	hugetlb_lock, @object
	.size	hugetlb_lock, 4
hugetlb_lock:
	.zero	4
	.section	.init.data
	.align 32
	.type	default_hugepages_in_node, @object
	.size	default_hugepages_in_node, 256
default_hugepages_in_node:
	.zero	256
	.type	parsed_default_hugepagesz, @object
	.size	parsed_default_hugepagesz, 1

which looks good.

> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -72,14 +72,14 @@ static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
>   * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
>   * free_huge_pages, and surplus_huge_pages.
>   */
> -DEFINE_SPINLOCK(hugetlb_lock);
> +__cacheline_aligned_in_smp DEFINE_SPINLOCK(hugetlb_lock);
>  
>  /*
>   * Serializes faults on the same logical page.  This is used to
>   * prevent spurious OOMs when the hugepage pool is fully utilized.
>   */
> -static int num_fault_mutexes;
> -struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
> +static __ro_after_init int num_fault_mutexes;
> +__ro_after_init struct mutex *hugetlb_fault_mutex_table;

It's conventional (within MM, at least) to put the section thing at the
end of the definition, so tweak:

--- a/mm/hugetlb.c~mm-hugetlb-sort-out-global-lock-annotations-fix
+++ a/mm/hugetlb.c
@@ -72,14 +72,14 @@ static unsigned int default_hugepages_in
  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
  * free_huge_pages, and surplus_huge_pages.
  */
-__cacheline_aligned_in_smp DEFINE_SPINLOCK(hugetlb_lock);
+DEFINE_SPINLOCK(hugetlb_lock) __cacheline_aligned_in_smp;
 
 /*
  * Serializes faults on the same logical page.  This is used to
  * prevent spurious OOMs when the hugepage pool is fully utilized.
  */
-static __ro_after_init int num_fault_mutexes;
-__ro_after_init struct mutex *hugetlb_fault_mutex_table;
+static int num_fault_mutexes __ro_after_init;
+struct mutex *hugetlb_fault_mutex_table __ro_after_init;
 
 /* Forward declaration */
 static int hugetlb_acct_memory(struct hstate *h, long delta);
_



Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ