[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210211000322.159437-2-mike.kravetz@oracle.com>
Date: Wed, 10 Feb 2021 16:03:18 -0800
From: Mike Kravetz <mike.kravetz@...cle.com>
To: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
linux-s390@...r.kernel.org
Cc: shu wang <malate_wangshu@...mail.com>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Peter Xu <peterx@...hat.com>,
Andrea Arcangeli <aarcange@...hat.com>,
Heiko Carstens <hca@...ux.ibm.com>,
Alexey Dobriyan <adobriyan@...il.com>,
Matthew Wilcox <willy@...radead.org>,
Michel Lespinasse <walken@...gle.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Mike Kravetz <mike.kravetz@...cle.com>
Subject: [RFC PATCH 1/5] hugetlb: add hugetlb helpers for soft dirty support
Add interfaces to set and clear soft dirty in hugetlb ptes. Make
hugetlb interfaces needed for /proc clear_refs available outside
hugetlb.c.
arch/s390 has it's own version of most routines in asm-generic/hugetlb.h,
so add new routines there as well.
Signed-off-by: Mike Kravetz <mike.kravetz@...cle.com>
---
arch/s390/include/asm/hugetlb.h | 30 ++++++++++++++++++++++++++++++
include/asm-generic/hugetlb.h | 30 ++++++++++++++++++++++++++++++
include/linux/hugetlb.h | 1 +
mm/hugetlb.c | 10 +---------
4 files changed, 62 insertions(+), 9 deletions(-)
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 60f9241e5e4a..b7d26248fb1c 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -105,6 +105,11 @@ static inline pte_t huge_pte_mkdirty(pte_t pte)
return pte_mkdirty(pte);
}
+static inline pte_t huge_pte_mkyoung(pte_t pte)
+{
+ return pte_mkyoung(pte);
+}
+
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
return pte_wrprotect(pte);
@@ -115,9 +120,34 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
return pte_modify(pte, newprot);
}
+static inline bool huge_pte_soft_dirty(pte_t pte)
+{
+ return pte_soft_dirty(pte);
+}
+
+static inline pte_t huge_pte_clear_soft_dirty(pte_t pte)
+{
+ return pte_clear_soft_dirty(pte);
+}
+
+static inline pte_t huge_pte_swp_clear_soft_dirty(pte_t pte)
+{
+ return pte_swp_clear_soft_dirty(pte);
+}
+
static inline bool gigantic_page_runtime_supported(void)
{
return true;
}
+#if !defined(__HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE) && !defined(MODULE)
+#include <asm/tlbflush.h>
+
+static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ flush_tlb_range(vma, start, end);
+}
+#endif
+
#endif /* _ASM_S390_HUGETLB_H */
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index 8e1e6244a89d..d8a78dab63bf 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -27,11 +27,31 @@ static inline pte_t huge_pte_mkdirty(pte_t pte)
return pte_mkdirty(pte);
}
+static inline pte_t huge_pte_mkyoung(pte_t pte)
+{
+ return pte_mkyoung(pte);
+}
+
static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
{
return pte_modify(pte, newprot);
}
+static inline bool huge_pte_soft_dirty(pte_t pte)
+{
+ return pte_soft_dirty(pte);
+}
+
+static inline pte_t huge_pte_clear_soft_dirty(pte_t pte)
+{
+ return pte_clear_soft_dirty(pte);
+}
+
+static inline pte_t huge_pte_swp_clear_soft_dirty(pte_t pte)
+{
+ return pte_swp_clear_soft_dirty(pte);
+}
+
#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz)
@@ -133,4 +153,14 @@ static inline bool gigantic_page_runtime_supported(void)
}
#endif /* __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED */
+#if !defined(__HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE) && !defined(MODULE)
+#include <asm/tlbflush.h>
+
+static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ flush_tlb_range(vma, start, end);
+}
+#endif
+
#endif /* _ASM_GENERIC_HUGETLB_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index b5807f23caf8..7b6c35c5df99 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -187,6 +187,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot);
bool is_hugetlb_entry_migration(pte_t pte);
+bool is_hugetlb_entry_hwpoisoned(pte_t pte);
#else /* !CONFIG_HUGETLB_PAGE */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 4bdb58ab14cb..47f3123afd1a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3759,7 +3759,7 @@ bool is_hugetlb_entry_migration(pte_t pte)
return false;
}
-static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
+bool is_hugetlb_entry_hwpoisoned(pte_t pte)
{
swp_entry_t swp;
@@ -4965,14 +4965,6 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
return i ? i : err;
}
-#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
-/*
- * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
- * implement this.
- */
-#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
-#endif
-
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot)
{
--
2.29.2
Powered by blists - more mailing lists