lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening PHC | |
Open Source and information security mailing list archives
| ||
|
Date: Mon, 17 Feb 2020 10:33:51 +0530 From: Anshuman Khandual <anshuman.khandual@....com> To: linux-mm@...ck.org, linux-kernel@...r.kernel.org Cc: Anshuman Khandual <anshuman.khandual@....com>, Paul Mackerras <paulus@...abs.org>, Benjamin Herrenschmidt <benh@...nel.crashing.org>, Michael Ellerman <mpe@...erman.id.au>, Alexander Viro <viro@...iv.linux.org.uk>, Will Deacon <will@...nel.org>, "Aneesh Kumar K.V" <aneesh.kumar@...ux.ibm.com>, Andrew Morton <akpm@...ux-foundation.org>, Nick Piggin <npiggin@...il.com>, Peter Zijlstra <peterz@...radead.org>, Arnd Bergmann <arnd@...db.de>, Ingo Molnar <mingo@...hat.com>, Arnaldo Carvalho de Melo <acme@...nel.org>, kvm-ppc@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org, linux-fsdevel@...r.kernel.org, linux-arch@...r.kernel.org Subject: [PATCH 3/5] mm/vma: Replace all remaining open encodings with is_vm_hugetlb_page() This replaces all remaining open encodings with is_vm_hugetlb_page(). Cc: Paul Mackerras <paulus@...abs.org> Cc: Benjamin Herrenschmidt <benh@...nel.crashing.org> Cc: Michael Ellerman <mpe@...erman.id.au> Cc: Alexander Viro <viro@...iv.linux.org.uk> Cc: Will Deacon <will@...nel.org> Cc: "Aneesh Kumar K.V" <aneesh.kumar@...ux.ibm.com> Cc: Andrew Morton <akpm@...ux-foundation.org> Cc: Nick Piggin <npiggin@...il.com> Cc: Peter Zijlstra <peterz@...radead.org> Cc: Arnd Bergmann <arnd@...db.de> Cc: Ingo Molnar <mingo@...hat.com> Cc: Arnaldo Carvalho de Melo <acme@...nel.org> Cc: kvm-ppc@...r.kernel.org Cc: linuxppc-dev@...ts.ozlabs.org Cc: linux-kernel@...r.kernel.org Cc: linux-fsdevel@...r.kernel.org Cc: linux-arch@...r.kernel.org Cc: linux-mm@...ck.org Signed-off-by: Anshuman Khandual <anshuman.khandual@....com> --- arch/powerpc/kvm/e500_mmu_host.c | 2 +- fs/binfmt_elf.c | 2 +- include/asm-generic/tlb.h | 2 +- kernel/events/core.c | 3 ++- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 425d13806645..3922575a1c31 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -422,7 +422,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, break; } } else if (vma && hva >= vma->vm_start && - (vma->vm_flags & VM_HUGETLB)) { + (is_vm_hugetlb_page(vma))) { unsigned long psize = vma_kernel_pagesize(vma); tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index f4713ea76e82..6bc97ede10ba 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1317,7 +1317,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, } /* Hugetlb memory check */ - if (vma->vm_flags & VM_HUGETLB) { + if (is_vm_hugetlb_page(vma)) { if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED)) goto whole; if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE)) diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index f391f6b500b4..d42c236d4965 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -398,7 +398,7 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) * We rely on tlb_end_vma() to issue a flush, such that when we reset * these values the batch is empty. */ - tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB); + tlb->vma_huge = is_vm_hugetlb_page(vma); tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); } diff --git a/kernel/events/core.c b/kernel/events/core.c index e453589da97c..eb0ee3c5f322 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -28,6 +28,7 @@ #include <linux/export.h> #include <linux/vmalloc.h> #include <linux/hardirq.h> +#include <linux/hugetlb_inline.h> #include <linux/rculist.h> #include <linux/uaccess.h> #include <linux/syscalls.h> @@ -7693,7 +7694,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) flags |= MAP_EXECUTABLE; if (vma->vm_flags & VM_LOCKED) flags |= MAP_LOCKED; - if (vma->vm_flags & VM_HUGETLB) + if (is_vm_hugetlb_page(vma)) flags |= MAP_HUGETLB; if (file) { -- 2.20.1
Powered by blists - more mailing lists