[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20171127085951.4hoxh6bp6dcb4gnx@hirez.programming.kicks-ass.net>
Date: Mon, 27 Nov 2017 09:59:51 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: Ingo Molnar <mingo@...nel.org>
Cc: linux-kernel@...r.kernel.org,
Dave Hansen <dave.hansen@...ux.intel.com>,
Andy Lutomirski <luto@...capital.net>,
Thomas Gleixner <tglx@...utronix.de>,
"H . Peter Anvin" <hpa@...or.com>, Borislav Petkov <bp@...en8.de>,
Linus Torvalds <torvalds@...ux-foundation.org>
Subject: [PATCH] mm: Unify page_table_lock allocation pattern
Subject: mm: Unify page_table_lock allocation pattern
From: Peter Zijlstra <peterz@...radead.org>
Date: Mon Nov 27 09:35:04 CET 2017
There are two different patterns wrt page_table_lock and allocating
new pages. Get rid of this diversity.
I picked this variant because it does less work under the lock and
makes page_table_lock a leaf lock.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
mm/memory.c | 33 ++++++++++++++++++++-------------
1 file changed, 20 insertions(+), 13 deletions(-)
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4100,11 +4100,14 @@ int __p4d_alloc(struct mm_struct *mm, pg
smp_wmb(); /* See comment in __pte_alloc */
spin_lock(&mm->page_table_lock);
- if (pgd_present(*pgd)) /* Another has populated it */
- p4d_free(mm, new);
- else
+ if (!pgd_present(*pgd)) {
pgd_populate(mm, pgd, new);
+ new = NULL;
+ }
spin_unlock(&mm->page_table_lock);
+ if (new)
+ p4d_free(mm, new);
+
return 0;
}
#endif /* __PAGETABLE_P4D_FOLDED */
@@ -4124,17 +4127,19 @@ int __pud_alloc(struct mm_struct *mm, p4
spin_lock(&mm->page_table_lock);
#ifndef __ARCH_HAS_5LEVEL_HACK
- if (p4d_present(*p4d)) /* Another has populated it */
- pud_free(mm, new);
- else
+ if (!p4d_present(*p4d)) {
p4d_populate(mm, p4d, new);
+ new = NULL;
+ }
#else
- if (pgd_present(*p4d)) /* Another has populated it */
- pud_free(mm, new);
- else
+ if (!pgd_present(*p4d)) {
pgd_populate(mm, p4d, new);
+ new = NULL;
+ }
#endif /* __ARCH_HAS_5LEVEL_HACK */
spin_unlock(&mm->page_table_lock);
+ if (new)
+ pud_free(mm, new);
return 0;
}
#endif /* __PAGETABLE_PUD_FOLDED */
@@ -4158,16 +4163,18 @@ int __pmd_alloc(struct mm_struct *mm, pu
if (!pud_present(*pud)) {
mm_inc_nr_pmds(mm);
pud_populate(mm, pud, new);
- } else /* Another has populated it */
- pmd_free(mm, new);
+ new = NULL;
+ }
#else
if (!pgd_present(*pud)) {
mm_inc_nr_pmds(mm);
pgd_populate(mm, pud, new);
- } else /* Another has populated it */
- pmd_free(mm, new);
+ new = NULL;
+ }
#endif /* __ARCH_HAS_4LEVEL_HACK */
spin_unlock(ptl);
+ if (new)
+ pmd_free(mm, new);
return 0;
}
#endif /* __PAGETABLE_PMD_FOLDED */
Powered by blists - more mailing lists