lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening PHC | |
Open Source and information security mailing list archives
| ||
|
Date: Thu, 17 Mar 2022 16:48:26 -0700 From: Yang Shi <shy828301@...il.com> To: vbabka@...e.cz, kirill.shutemov@...ux.intel.com, linmiaohe@...wei.com, songliubraving@...com, riel@...riel.com, willy@...radead.org, ziy@...dia.com, akpm@...ux-foundation.org, tytso@....edu, adilger.kernel@...ger.ca, darrick.wong@...cle.com Cc: shy828301@...il.com, linux-mm@...ck.org, linux-fsdevel@...r.kernel.org, linux-ext4@...r.kernel.org, linux-xfs@...r.kernel.org, linux-kernel@...r.kernel.org Subject: [v2 PATCH 7/8] mm: khugepaged: introduce khugepaged_enter_file() helper The following patch will have filesystems code call khugepaged_enter() to make readonly FS THP collapse more consistent. Extract the current implementation used by shmem in khugepaged_enter_file() helper so that it could be reused by other filesystems and export the symbol for modules. Signed-off-by: Yang Shi <shy828301@...il.com> --- include/linux/khugepaged.h | 6 ++++++ mm/khugepaged.c | 11 +++++++++++ mm/shmem.c | 14 ++++---------- 3 files changed, 21 insertions(+), 10 deletions(-) diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index 54e169116d49..06464e9a1f91 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -21,6 +21,8 @@ extern void khugepaged_fork(struct mm_struct *mm, extern void khugepaged_exit(struct mm_struct *mm); extern void khugepaged_enter(struct vm_area_struct *vma, unsigned long vm_flags); +extern void khugepaged_enter_file(struct vm_area_struct *vma, + unsigned long vm_flags); extern void khugepaged_min_free_kbytes_update(void); #ifdef CONFIG_SHMEM @@ -53,6 +55,10 @@ static inline void khugepaged_enter(struct vm_area_struct *vma, unsigned long vm_flags) { } +static inline void khugepaged_enter_file(struct vm_area_struct *vma, + unsigned long vm_flags) +{ +} static inline void khugepaged_enter_vma_merge(struct vm_area_struct *vma, unsigned long vm_flags) { diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 4cb4379ecf25..93c9072983e2 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -577,6 +577,17 @@ void khugepaged_enter(struct vm_area_struct *vma, unsigned long vm_flags) __khugepaged_enter(vma->vm_mm); } +void khugepaged_enter_file(struct vm_area_struct *vma, unsigned long vm_flags) +{ + if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && + khugepaged_enabled() && + (((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < + (vma->vm_end & HPAGE_PMD_MASK))) + if (hugepage_vma_check(vma, vm_flags)) + __khugepaged_enter(vma->vm_mm); +} +EXPORT_SYMBOL_GPL(khugepaged_enter_file); + static void release_pte_page(struct page *page) { mod_node_page_state(page_pgdat(page), diff --git a/mm/shmem.c b/mm/shmem.c index a09b29ec2b45..c2346e5d2b24 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2233,11 +2233,9 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma) file_accessed(file); vma->vm_ops = &shmem_vm_ops; - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && - ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < - (vma->vm_end & HPAGE_PMD_MASK)) { - khugepaged_enter(vma, vma->vm_flags); - } + + khugepaged_enter_file(vma, vma->vm_flags); + return 0; } @@ -4132,11 +4130,7 @@ int shmem_zero_setup(struct vm_area_struct *vma) vma->vm_file = file; vma->vm_ops = &shmem_vm_ops; - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && - ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < - (vma->vm_end & HPAGE_PMD_MASK)) { - khugepaged_enter(vma, vma->vm_flags); - } + khugepaged_enter_file(vma, vma->vm_flags); return 0; } -- 2.26.3
Powered by blists - more mailing lists