[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1432813957-46874-5-git-send-email-dingel@linux.vnet.ibm.com>
Date: Thu, 28 May 2015 13:52:36 +0200
From: Dominik Dingel <dingel@...ux.vnet.ibm.com>
To: linux-kernel@...r.kernel.org
Cc: Russell King <linux@....linux.org.uk>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will.deacon@....com>,
Tony Luck <tony.luck@...el.com>,
Fenghua Yu <fenghua.yu@...el.com>,
James Hogan <james.hogan@...tec.com>,
Ralf Baechle <ralf@...ux-mips.org>,
Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Paul Mackerras <paulus@...ba.org>,
Michael Ellerman <mpe@...erman.id.au>,
Martin Schwidefsky <schwidefsky@...ibm.com>,
Heiko Carstens <heiko.carstens@...ibm.com>,
linux390@...ibm.com, "David S. Miller" <davem@...emloft.net>,
Chris Metcalf <cmetcalf@...hip.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>, x86@...nel.org,
Andrew Morton <akpm@...ux-foundation.org>,
Zhang Zhen <zhenzhang.zhang@...wei.com>,
Dominik Dingel <dingel@...ux.vnet.ibm.com>,
David Rientjes <rientjes@...gle.com>,
"Aneesh Kumar K.V" <aneesh.kumar@...ux.vnet.ibm.com>,
Nathan Lynch <nathan_lynch@...tor.com>,
Christian Borntraeger <borntraeger@...ibm.com>,
Andy Lutomirski <luto@...capital.net>,
Michael Holzheu <holzheu@...ux.vnet.ibm.com>,
Hugh Dickins <hughd@...gle.com>,
Naoya Horiguchi <n-horiguchi@...jp.nec.com>,
Paolo Bonzini <pbonzini@...hat.com>,
"Jason J. Herne" <jjherne@...ux.vnet.ibm.com>,
Davidlohr Bueso <dave@...olabs.net>,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
Luiz Capitulino <lcapitulino@...hat.com>,
Mike Kravetz <mike.kravetz@...cle.com>,
linux-arm-kernel@...ts.infradead.org, linux-ia64@...r.kernel.org,
linux-metag@...r.kernel.org, linux-mips@...ux-mips.org,
linuxppc-dev@...ts.ozlabs.org, linux-s390@...r.kernel.org,
linux-sh@...r.kernel.org, sparclinux@...r.kernel.org,
linux-mm@...ck.org
Subject: [PATCH 4/5] s390/hugetlb: remove dead code for sw emulated huge pages
We now support only hugepages on hardware with EDAT1 support.
So we remove the prepare/release_hugepage hooks and
simplify set_huge_pte_at and huge_ptep_get.
Acked-by: Martin Schwidefsky <schwidefsky@...ibm.com>
Signed-off-by: Dominik Dingel <dingel@...ux.vnet.ibm.com>
---
arch/s390/include/asm/hugetlb.h | 3 ---
arch/s390/mm/hugetlbpage.c | 60 +++--------------------------------------
2 files changed, 3 insertions(+), 60 deletions(-)
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index dfb542a..0130d03 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -37,9 +37,6 @@ static inline int prepare_hugepage_range(struct file *file,
#define arch_clear_hugepage_flags(page) do { } while (0)
-int arch_prepare_hugepage(struct page *page);
-void arch_release_hugepage(struct page *page);
-
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index fa6e1bc..999616b 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -80,31 +80,16 @@ static inline pte_t __pmd_to_pte(pmd_t pmd)
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
- pmd_t pmd;
+ pmd_t pmd = __pte_to_pmd(pte);
- pmd = __pte_to_pmd(pte);
- if (!MACHINE_HAS_HPAGE) {
- /* Emulated huge ptes loose the dirty and young bit */
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
- pmd_val(pmd) |= pte_page(pte)[1].index;
- } else
- pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
+ pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
*(pmd_t *) ptep = pmd;
}
pte_t huge_ptep_get(pte_t *ptep)
{
- unsigned long origin;
- pmd_t pmd;
+ pmd_t pmd = *(pmd_t *) ptep;
- pmd = *(pmd_t *) ptep;
- if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
- origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
- pmd_val(pmd) |= *(unsigned long *) origin;
- /* Emulated huge ptes are young and dirty by definition */
- pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG | _SEGMENT_ENTRY_DIRTY;
- }
return __pmd_to_pte(pmd);
}
@@ -119,45 +104,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
return pte;
}
-int arch_prepare_hugepage(struct page *page)
-{
- unsigned long addr = page_to_phys(page);
- pte_t pte;
- pte_t *ptep;
- int i;
-
- if (MACHINE_HAS_HPAGE)
- return 0;
-
- ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
- if (!ptep)
- return -ENOMEM;
-
- pte_val(pte) = addr;
- for (i = 0; i < PTRS_PER_PTE; i++) {
- set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
- pte_val(pte) += PAGE_SIZE;
- }
- page[1].index = (unsigned long) ptep;
- return 0;
-}
-
-void arch_release_hugepage(struct page *page)
-{
- pte_t *ptep;
-
- if (MACHINE_HAS_HPAGE)
- return;
-
- ptep = (pte_t *) page[1].index;
- if (!ptep)
- return;
- clear_table((unsigned long *) ptep, _PAGE_INVALID,
- PTRS_PER_PTE * sizeof(pte_t));
- page_table_free(&init_mm, (unsigned long *) ptep);
- page[1].index = 0;
-}
-
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
--
2.3.7
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists