[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <1601865614-4918-3-git-send-email-chinwen.chang@mediatek.com>
Date: Mon, 5 Oct 2020 10:40:13 +0800
From: Chinwen Chang <chinwen.chang@...iatek.com>
To: Andrew Morton <akpm@...ux-foundation.org>
CC: <linux-mm@...ck.org>, <linux-kernel@...r.kernel.org>,
Chinwen Chang <chinwen.chang@...iatek.com>,
Michel Lespinasse <walken@...gle.com>
Subject: [RESEND, PATCH v4 2/3] mm: smaps*: extend smap_gather_stats to support specified beginning
Extend smap_gather_stats to support indicated beginning address at
which it should start gathering. To achieve the goal, we add a new
parameter @start assigned by the caller and try to refactor it for
simplicity.
If @start is 0, it will use the range of @vma for gathering.
Change since v2:
- This is a new change to make the retry behavior of smaps_rollup
- more complete as suggested by Michel [1]
[1] https://lore.kernel.org/lkml/CANN689FtCsC71cjAjs0GPspOhgo_HRj+diWsoU1wr98YPktgWg@mail.gmail.com/
Change-Id: I8652e0ee6c5e93fb56376a68d71ed6cdd8ac10e8
Signed-off-by: Chinwen Chang <chinwen.chang@...iatek.com>
CC: Michel Lespinasse <walken@...gle.com>
Reviewed-by: Steven Price <steven.price@....com>
---
fs/proc/task_mmu.c | 30 ++++++++++++++++++++++--------
1 file changed, 22 insertions(+), 8 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index dbda449..76e623a 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -723,9 +723,21 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
.pte_hole = smaps_pte_hole,
};
+/*
+ * Gather mem stats from @vma with the indicated beginning
+ * address @start, and keep them in @mss.
+ *
+ * Use vm_start of @vma as the beginning address if @start is 0.
+ */
static void smap_gather_stats(struct vm_area_struct *vma,
- struct mem_size_stats *mss)
+ struct mem_size_stats *mss, unsigned long start)
{
+ const struct mm_walk_ops *ops = &smaps_walk_ops;
+
+ /* Invalid start */
+ if (start >= vma->vm_end)
+ return;
+
#ifdef CONFIG_SHMEM
/* In case of smaps_rollup, reset the value from previous vma */
mss->check_shmem_swap = false;
@@ -742,18 +754,20 @@ static void smap_gather_stats(struct vm_area_struct *vma,
*/
unsigned long shmem_swapped = shmem_swap_usage(vma);
- if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
- !(vma->vm_flags & VM_WRITE)) {
+ if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
+ !(vma->vm_flags & VM_WRITE))) {
mss->swap += shmem_swapped;
} else {
mss->check_shmem_swap = true;
- walk_page_vma(vma, &smaps_shmem_walk_ops, mss);
- return;
+ ops = &smaps_shmem_walk_ops;
}
}
#endif
/* mmap_lock is held in m_start */
- walk_page_vma(vma, &smaps_walk_ops, mss);
+ if (!start)
+ walk_page_vma(vma, ops, mss);
+ else
+ walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss);
}
#define SEQ_PUT_DEC(str, val) \
@@ -805,7 +819,7 @@ static int show_smap(struct seq_file *m, void *v)
memset(&mss, 0, sizeof(mss));
- smap_gather_stats(vma, &mss);
+ smap_gather_stats(vma, &mss, 0);
show_map_vma(m, vma);
@@ -854,7 +868,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
hold_task_mempolicy(priv);
for (vma = priv->mm->mmap; vma; vma = vma->vm_next) {
- smap_gather_stats(vma, &mss);
+ smap_gather_stats(vma, &mss, 0);
last_vma_end = vma->vm_end;
}
--
1.9.1
Powered by blists - more mailing lists