[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220621204632.3370049-32-Liam.Howlett@oracle.com>
Date: Tue, 21 Jun 2022 20:47:02 +0000
From: Liam Howlett <liam.howlett@...cle.com>
To: "maple-tree@...ts.infradead.org" <maple-tree@...ts.infradead.org>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
"damon @ lists . linux . dev" <damon@...ts.linux.dev>,
SeongJae Park <sj@...nel.org>,
David Hildenbrand <david@...hat.com>
Subject: [PATCH v10 31/69] arm64: Change elfcore for_each_mte_vma() to use VMA
iterator
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
Rework for_each_mte_vma() to use a VMA iterator instead of an explicit
linked-list.
Link: https://lkml.kernel.org/r/20220504011345.662299-16-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
Acked-by: Catalin Marinas <catalin.marinas@....com>
Link: https://lore.kernel.org/r/20220218023650.672072-1-Liam.Howlett@oracle.com
Signed-off-by: Will Deacon <will@...nel.org>
Cc: David Howells <dhowells@...hat.com>
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Cc: SeongJae Park <sj@...nel.org>
Cc: Vlastimil Babka <vbabka@...e.cz>
Cc: Davidlohr Bueso <dave@...olabs.net>
Signed-off-by: Andrew Morton <akpm@...ux-foundation.org>
---
arch/arm64/kernel/elfcore.c | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/kernel/elfcore.c b/arch/arm64/kernel/elfcore.c
index 98d67444a5b6..27ef7ad3ffd2 100644
--- a/arch/arm64/kernel/elfcore.c
+++ b/arch/arm64/kernel/elfcore.c
@@ -8,9 +8,9 @@
#include <asm/cpufeature.h>
#include <asm/mte.h>
-#define for_each_mte_vma(tsk, vma) \
+#define for_each_mte_vma(vmi, vma) \
if (system_supports_mte()) \
- for (vma = tsk->mm->mmap; vma; vma = vma->vm_next) \
+ for_each_vma(vmi, vma) \
if (vma->vm_flags & VM_MTE)
static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
@@ -81,8 +81,9 @@ Elf_Half elf_core_extra_phdrs(void)
{
struct vm_area_struct *vma;
int vma_count = 0;
+ VMA_ITERATOR(vmi, current->mm, 0);
- for_each_mte_vma(current, vma)
+ for_each_mte_vma(vmi, vma)
vma_count++;
return vma_count;
@@ -91,8 +92,9 @@ Elf_Half elf_core_extra_phdrs(void)
int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
{
struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, current->mm, 0);
- for_each_mte_vma(current, vma) {
+ for_each_mte_vma(vmi, vma) {
struct elf_phdr phdr;
phdr.p_type = PT_AARCH64_MEMTAG_MTE;
@@ -116,8 +118,9 @@ size_t elf_core_extra_data_size(void)
{
struct vm_area_struct *vma;
size_t data_size = 0;
+ VMA_ITERATOR(vmi, current->mm, 0);
- for_each_mte_vma(current, vma)
+ for_each_mte_vma(vmi, vma)
data_size += mte_vma_tag_dump_size(vma);
return data_size;
@@ -126,8 +129,9 @@ size_t elf_core_extra_data_size(void)
int elf_core_write_extra_data(struct coredump_params *cprm)
{
struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, current->mm, 0);
- for_each_mte_vma(current, vma) {
+ for_each_mte_vma(vmi, vma) {
if (vma->vm_flags & VM_DONTDUMP)
continue;
--
2.35.1
Powered by blists - more mailing lists