[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190920195047.7703-7-leonardo@linux.ibm.com>
Date: Fri, 20 Sep 2019 16:50:42 -0300
From: Leonardo Bras <leonardo@...ux.ibm.com>
To: linuxppc-dev@...ts.ozlabs.org, linux-kernel@...r.kernel.org
Cc: Leonardo Bras <leonardo@...ux.ibm.com>,
Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Paul Mackerras <paulus@...ba.org>,
Michael Ellerman <mpe@...erman.id.au>,
Arnd Bergmann <arnd@...db.de>,
"Aneesh Kumar K.V" <aneesh.kumar@...ux.ibm.com>,
Christophe Leroy <christophe.leroy@....fr>,
Andrew Morton <akpm@...ux-foundation.org>,
Dan Williams <dan.j.williams@...el.com>,
Nicholas Piggin <npiggin@...il.com>,
Mahesh Salgaonkar <mahesh@...ux.vnet.ibm.com>,
Thomas Gleixner <tglx@...utronix.de>,
Richard Fontana <rfontana@...hat.com>,
Ganesh Goudar <ganeshgr@...ux.ibm.com>,
Allison Randal <allison@...utok.net>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Mike Rapoport <rppt@...ux.ibm.com>,
YueHaibing <yuehaibing@...wei.com>,
Ira Weiny <ira.weiny@...el.com>,
Jason Gunthorpe <jgg@...pe.ca>,
John Hubbard <jhubbard@...dia.com>,
Keith Busch <keith.busch@...el.com>
Subject: [PATCH v2 06/11] powerpc/mm/book3s64/hash: Applies counting method to monitor lockless pgtbl walks
Applies the counting-based method for monitoring all hash-related functions
that do lockless pagetable walks.
Signed-off-by: Leonardo Bras <leonardo@...ux.ibm.com>
---
arch/powerpc/mm/book3s64/hash_tlb.c | 2 ++
arch/powerpc/mm/book3s64/hash_utils.c | 7 +++++++
2 files changed, 9 insertions(+)
diff --git a/arch/powerpc/mm/book3s64/hash_tlb.c b/arch/powerpc/mm/book3s64/hash_tlb.c
index 4a70d8dd39cd..5e5213c3f7c4 100644
--- a/arch/powerpc/mm/book3s64/hash_tlb.c
+++ b/arch/powerpc/mm/book3s64/hash_tlb.c
@@ -209,6 +209,7 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
* to being hashed). This is not the most performance oriented
* way to do things but is fine for our needs here.
*/
+ start_lockless_pgtbl_walk(mm);
local_irq_save(flags);
arch_enter_lazy_mmu_mode();
for (; start < end; start += PAGE_SIZE) {
@@ -230,6 +231,7 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
}
arch_leave_lazy_mmu_mode();
local_irq_restore(flags);
+ end_lockless_pgtbl_walk(mm);
}
void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index b8ad14bb1170..299946cedc3a 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1322,6 +1322,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
#endif /* CONFIG_PPC_64K_PAGES */
/* Get PTE and page size from page tables */
+ start_lockless_pgtbl_walk(mm);
ptep = find_linux_pte(pgdir, ea, &is_thp, &hugeshift);
if (ptep == NULL || !pte_present(*ptep)) {
DBG_LOW(" no PTE !\n");
@@ -1438,6 +1439,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
DBG_LOW(" -> rc=%d\n", rc);
bail:
+ end_lockless_pgtbl_walk(mm);
exception_exit(prev_state);
return rc;
}
@@ -1547,10 +1549,12 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
vsid = get_user_vsid(&mm->context, ea, ssize);
if (!vsid)
return;
+
/*
* Hash doesn't like irqs. Walking linux page table with irq disabled
* saves us from holding multiple locks.
*/
+ start_lockless_pgtbl_walk(mm);
local_irq_save(flags);
/*
@@ -1597,6 +1601,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
pte_val(*ptep));
out_exit:
local_irq_restore(flags);
+ end_lockless_pgtbl_walk(mm);
}
#ifdef CONFIG_PPC_MEM_KEYS
@@ -1613,11 +1618,13 @@ u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address)
if (!mm || !mm->pgd)
return 0;
+ start_lockless_pgtbl_walk(mm);
local_irq_save(flags);
ptep = find_linux_pte(mm->pgd, address, NULL, NULL);
if (ptep)
pkey = pte_to_pkey_bits(pte_val(READ_ONCE(*ptep)));
local_irq_restore(flags);
+ end_lockless_pgtbl_walk(mm);
return pkey;
}
--
2.20.1
Powered by blists - more mailing lists